summaryrefslogtreecommitdiff
path: root/test/support
diff options
context:
space:
mode:
authorLee Garrett <lgarrett@rocketjump.eu>2022-06-13 23:13:57 +0200
committerLee Garrett <lgarrett@rocketjump.eu>2022-06-13 23:13:57 +0200
commitdf2a2cd18c338647061f3448248f8b97b6971f49 (patch)
treef223b6b9084be551de18fdb4fe0d596c68a9cebc /test/support
parent71ed02a1e802462d5d9b5f7e0fad42307a175278 (diff)
downloaddebian-ansible-core-df2a2cd18c338647061f3448248f8b97b6971f49.zip
New upstream version 2.13.0
Diffstat (limited to 'test/support')
-rw-r--r--test/support/integration/plugins/inventory/foreman.py3
-rw-r--r--test/support/integration/plugins/lookup/rabbitmq.py190
-rw-r--r--test/support/integration/plugins/module_utils/crypto.py2125
-rw-r--r--test/support/integration/plugins/module_utils/database.py142
-rw-r--r--test/support/integration/plugins/module_utils/ecs/__init__.py0
-rw-r--r--test/support/integration/plugins/module_utils/ecs/api.py364
-rw-r--r--test/support/integration/plugins/module_utils/mysql.py106
-rw-r--r--test/support/integration/plugins/module_utils/postgres.py330
-rw-r--r--test/support/integration/plugins/module_utils/rabbitmq.py220
-rw-r--r--test/support/integration/plugins/modules/aws_s3.py925
-rw-r--r--test/support/integration/plugins/modules/cloud_init_data_facts.py134
-rw-r--r--test/support/integration/plugins/modules/deploy_helper.py521
-rw-r--r--test/support/integration/plugins/modules/ec2_ami_info.py3
-rw-r--r--test/support/integration/plugins/modules/locale_gen.py237
-rw-r--r--test/support/integration/plugins/modules/lvg.py295
-rw-r--r--test/support/integration/plugins/modules/mongodb_parameter.py223
-rw-r--r--test/support/integration/plugins/modules/mongodb_user.py474
-rw-r--r--test/support/integration/plugins/modules/pids.py89
-rw-r--r--test/support/integration/plugins/modules/postgresql_db.py657
-rw-r--r--test/support/integration/plugins/modules/postgresql_privs.py1097
-rw-r--r--test/support/integration/plugins/modules/postgresql_query.py364
-rw-r--r--test/support/integration/plugins/modules/postgresql_set.py434
-rw-r--r--test/support/integration/plugins/modules/postgresql_table.py601
-rw-r--r--test/support/integration/plugins/modules/postgresql_user.py927
-rw-r--r--test/support/integration/plugins/modules/rabbitmq_plugin.py180
-rw-r--r--test/support/integration/plugins/modules/rabbitmq_queue.py257
-rw-r--r--test/support/integration/plugins/modules/s3_bucket.py740
-rw-r--r--test/support/integration/plugins/modules/selogin.py260
-rw-r--r--test/support/integration/plugins/modules/x509_crl.py783
-rw-r--r--test/support/integration/plugins/modules/x509_crl_info.py281
-rw-r--r--test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py2
-rw-r--r--test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py3
-rw-r--r--test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py3
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py523
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps159
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1226
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py133
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1404
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py208
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1153
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py71
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps122
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py56
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1139
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py168
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1187
-rw-r--r--[l---------]test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py237
-rw-r--r--test/support/windows-integration/plugins/modules/win_data_deduplication.ps1129
-rw-r--r--test/support/windows-integration/plugins/modules/win_data_deduplication.py87
-rw-r--r--test/support/windows-integration/plugins/modules/win_dsc.ps1398
-rw-r--r--test/support/windows-integration/plugins/modules/win_dsc.py183
-rw-r--r--test/support/windows-integration/plugins/modules/win_feature.ps1111
-rw-r--r--test/support/windows-integration/plugins/modules/win_feature.py149
-rw-r--r--test/support/windows-integration/plugins/modules/win_find.ps1416
-rw-r--r--test/support/windows-integration/plugins/modules/win_find.py345
-rw-r--r--test/support/windows-integration/plugins/modules/win_format.ps1200
-rw-r--r--test/support/windows-integration/plugins/modules/win_format.py103
-rw-r--r--test/support/windows-integration/plugins/modules/win_path.ps1145
-rw-r--r--test/support/windows-integration/plugins/modules/win_path.py79
-rw-r--r--test/support/windows-integration/plugins/modules/win_tempfile.py67
-rw-r--r--test/support/windows-integration/plugins/modules/win_template.py66
61 files changed, 2579 insertions, 15455 deletions
diff --git a/test/support/integration/plugins/inventory/foreman.py b/test/support/integration/plugins/inventory/foreman.py
index 39e0de33..d026ebdb 100644
--- a/test/support/integration/plugins/inventory/foreman.py
+++ b/test/support/integration/plugins/inventory/foreman.py
@@ -81,11 +81,12 @@ password: secure
validate_certs: False
'''
+from collections.abc import MutableMapping
+
from ansible.module_utils.compat.version import LooseVersion
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
-from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name, Constructable
# 3rd party imports
diff --git a/test/support/integration/plugins/lookup/rabbitmq.py b/test/support/integration/plugins/lookup/rabbitmq.py
deleted file mode 100644
index 7c2745f4..00000000
--- a/test/support/integration/plugins/lookup/rabbitmq.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# (c) 2018, John Imison <john+github@imison.net>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
- lookup: rabbitmq
- author: John Imison <@Im0>
- version_added: "2.8"
- short_description: Retrieve messages from an AMQP/AMQPS RabbitMQ queue.
- description:
- - This lookup uses a basic get to retrieve all, or a limited number C(count), messages from a RabbitMQ queue.
- options:
- url:
- description:
- - An URI connection string to connect to the AMQP/AMQPS RabbitMQ server.
- - For more information refer to the URI spec U(https://www.rabbitmq.com/uri-spec.html).
- required: True
- queue:
- description:
- - The queue to get messages from.
- required: True
- count:
- description:
- - How many messages to collect from the queue.
- - If not set, defaults to retrieving all the messages from the queue.
- requirements:
- - The python pika package U(https://pypi.org/project/pika/).
- notes:
- - This lookup implements BlockingChannel.basic_get to get messages from a RabbitMQ server.
- - After retrieving a message from the server, receipt of the message is acknowledged and the message on the server is deleted.
- - Pika is a pure-Python implementation of the AMQP 0-9-1 protocol that tries to stay fairly independent of the underlying network support library.
- - More information about pika can be found at U(https://pika.readthedocs.io/en/stable/).
- - This plugin is tested against RabbitMQ. Other AMQP 0.9.1 protocol based servers may work but not tested/guaranteed.
- - Assigning the return messages to a variable under C(vars) may result in unexpected results as the lookup is evaluated every time the
- variable is referenced.
- - Currently this plugin only handles text based messages from a queue. Unexpected results may occur when retrieving binary data.
-"""
-
-
-EXAMPLES = """
-- name: Get all messages off a queue
- debug:
- msg: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello') }}"
-
-
-# If you are intending on using the returned messages as a variable in more than
-# one task (eg. debug, template), it is recommended to set_fact.
-
-- name: Get 2 messages off a queue and set a fact for re-use
- set_fact:
- messages: "{{ lookup('rabbitmq', url='amqp://guest:guest@192.168.0.10:5672/%2F', queue='hello', count=2) }}"
-
-- name: Dump out contents of the messages
- debug:
- var: messages
-
-"""
-
-RETURN = """
- _list:
- description:
- - A list of dictionaries with keys and value from the queue.
- type: list
- contains:
- content_type:
- description: The content_type on the message in the queue.
- type: str
- delivery_mode:
- description: The delivery_mode on the message in the queue.
- type: str
- delivery_tag:
- description: The delivery_tag on the message in the queue.
- type: str
- exchange:
- description: The exchange the message came from.
- type: str
- message_count:
- description: The message_count for the message on the queue.
- type: str
- msg:
- description: The content of the message.
- type: str
- redelivered:
- description: The redelivered flag. True if the message has been delivered before.
- type: bool
- routing_key:
- description: The routing_key on the message in the queue.
- type: str
- headers:
- description: The headers for the message returned from the queue.
- type: dict
- json:
- description: If application/json is specified in content_type, json will be loaded into variables.
- type: dict
-
-"""
-
-import json
-
-from ansible.errors import AnsibleError, AnsibleParserError
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils._text import to_native, to_text
-from ansible.utils.display import Display
-
-try:
- import pika
- from pika import spec
- HAS_PIKA = True
-except ImportError:
- HAS_PIKA = False
-
-display = Display()
-
-
-class LookupModule(LookupBase):
-
- def run(self, terms, variables=None, url=None, queue=None, count=None):
- if not HAS_PIKA:
- raise AnsibleError('pika python package is required for rabbitmq lookup.')
- if not url:
- raise AnsibleError('URL is required for rabbitmq lookup.')
- if not queue:
- raise AnsibleError('Queue is required for rabbitmq lookup.')
-
- display.vvv(u"terms:%s : variables:%s url:%s queue:%s count:%s" % (terms, variables, url, queue, count))
-
- try:
- parameters = pika.URLParameters(url)
- except Exception as e:
- raise AnsibleError("URL malformed: %s" % to_native(e))
-
- try:
- connection = pika.BlockingConnection(parameters)
- except Exception as e:
- raise AnsibleError("Connection issue: %s" % to_native(e))
-
- try:
- conn_channel = connection.channel()
- except pika.exceptions.AMQPChannelError as e:
- try:
- connection.close()
- except pika.exceptions.AMQPConnectionError as ie:
- raise AnsibleError("Channel and connection closing issues: %s / %s" % to_native(e), to_native(ie))
- raise AnsibleError("Channel issue: %s" % to_native(e))
-
- ret = []
- idx = 0
-
- while True:
- method_frame, properties, body = conn_channel.basic_get(queue=queue)
- if method_frame:
- display.vvv(u"%s, %s, %s " % (method_frame, properties, to_text(body)))
-
- # TODO: In the future consider checking content_type and handle text/binary data differently.
- msg_details = dict({
- 'msg': to_text(body),
- 'message_count': method_frame.message_count,
- 'routing_key': method_frame.routing_key,
- 'delivery_tag': method_frame.delivery_tag,
- 'redelivered': method_frame.redelivered,
- 'exchange': method_frame.exchange,
- 'delivery_mode': properties.delivery_mode,
- 'content_type': properties.content_type,
- 'headers': properties.headers
- })
- if properties.content_type == 'application/json':
- try:
- msg_details['json'] = json.loads(msg_details['msg'])
- except ValueError as e:
- raise AnsibleError("Unable to decode JSON for message %s: %s" % (method_frame.delivery_tag, to_native(e)))
-
- ret.append(msg_details)
- conn_channel.basic_ack(method_frame.delivery_tag)
- idx += 1
- if method_frame.message_count == 0 or idx == count:
- break
- # If we didn't get a method_frame, exit.
- else:
- break
-
- if connection.is_closed:
- return [ret]
- else:
- try:
- connection.close()
- except pika.exceptions.AMQPConnectionError:
- pass
- return [ret]
diff --git a/test/support/integration/plugins/module_utils/crypto.py b/test/support/integration/plugins/module_utils/crypto.py
deleted file mode 100644
index f3f43f07..00000000
--- a/test/support/integration/plugins/module_utils/crypto.py
+++ /dev/null
@@ -1,2125 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ----------------------------------------------------------------------
-# A clearly marked portion of this file is licensed under the BSD license
-# Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
-# Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
-# For more details, search for the function _obj2txt().
-# ---------------------------------------------------------------------
-# A clearly marked portion of this file is extracted from a project that
-# is licensed under the Apache License 2.0
-# Copyright (c) the OpenSSL contributors
-# For more details, search for the function _OID_MAP.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-import sys
-from ansible.module_utils.compat.version import LooseVersion
-
-try:
- import OpenSSL
- from OpenSSL import crypto
-except ImportError:
- # An error will be raised in the calling class to let the end
- # user know that OpenSSL couldn't be found.
- pass
-
-try:
- import cryptography
- from cryptography import x509
- from cryptography.hazmat.backends import default_backend as cryptography_backend
- from cryptography.hazmat.primitives.serialization import load_pem_private_key
- from cryptography.hazmat.primitives import hashes
- from cryptography.hazmat.primitives import serialization
- import ipaddress
-
- # Older versions of cryptography (< 2.1) do not have __hash__ functions for
- # general name objects (DNSName, IPAddress, ...), while providing overloaded
- # equality and string representation operations. This makes it impossible to
- # use them in hash-based data structures such as set or dict. Since we are
- # actually doing that in openssl_certificate, and potentially in other code,
- # we need to monkey-patch __hash__ for these classes to make sure our code
- # works fine.
- if LooseVersion(cryptography.__version__) < LooseVersion('2.1'):
- # A very simply hash function which relies on the representation
- # of an object to be implemented. This is the case since at least
- # cryptography 1.0, see
- # https://github.com/pyca/cryptography/commit/7a9abce4bff36c05d26d8d2680303a6f64a0e84f
- def simple_hash(self):
- return hash(repr(self))
-
- # The hash functions for the following types were added for cryptography 2.1:
- # https://github.com/pyca/cryptography/commit/fbfc36da2a4769045f2373b004ddf0aff906cf38
- x509.DNSName.__hash__ = simple_hash
- x509.DirectoryName.__hash__ = simple_hash
- x509.GeneralName.__hash__ = simple_hash
- x509.IPAddress.__hash__ = simple_hash
- x509.OtherName.__hash__ = simple_hash
- x509.RegisteredID.__hash__ = simple_hash
-
- if LooseVersion(cryptography.__version__) < LooseVersion('1.2'):
- # The hash functions for the following types were added for cryptography 1.2:
- # https://github.com/pyca/cryptography/commit/b642deed88a8696e5f01ce6855ccf89985fc35d0
- # https://github.com/pyca/cryptography/commit/d1b5681f6db2bde7a14625538bd7907b08dfb486
- x509.RFC822Name.__hash__ = simple_hash
- x509.UniformResourceIdentifier.__hash__ = simple_hash
-
- # Test whether we have support for X25519, X448, Ed25519 and/or Ed448
- try:
- import cryptography.hazmat.primitives.asymmetric.x25519
- CRYPTOGRAPHY_HAS_X25519 = True
- try:
- cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.private_bytes
- CRYPTOGRAPHY_HAS_X25519_FULL = True
- except AttributeError:
- CRYPTOGRAPHY_HAS_X25519_FULL = False
- except ImportError:
- CRYPTOGRAPHY_HAS_X25519 = False
- CRYPTOGRAPHY_HAS_X25519_FULL = False
- try:
- import cryptography.hazmat.primitives.asymmetric.x448
- CRYPTOGRAPHY_HAS_X448 = True
- except ImportError:
- CRYPTOGRAPHY_HAS_X448 = False
- try:
- import cryptography.hazmat.primitives.asymmetric.ed25519
- CRYPTOGRAPHY_HAS_ED25519 = True
- except ImportError:
- CRYPTOGRAPHY_HAS_ED25519 = False
- try:
- import cryptography.hazmat.primitives.asymmetric.ed448
- CRYPTOGRAPHY_HAS_ED448 = True
- except ImportError:
- CRYPTOGRAPHY_HAS_ED448 = False
-
- HAS_CRYPTOGRAPHY = True
-except ImportError:
- # Error handled in the calling module.
- CRYPTOGRAPHY_HAS_X25519 = False
- CRYPTOGRAPHY_HAS_X25519_FULL = False
- CRYPTOGRAPHY_HAS_X448 = False
- CRYPTOGRAPHY_HAS_ED25519 = False
- CRYPTOGRAPHY_HAS_ED448 = False
- HAS_CRYPTOGRAPHY = False
-
-
-import abc
-import base64
-import binascii
-import datetime
-import errno
-import hashlib
-import os
-import re
-import tempfile
-
-from ansible.module_utils import six
-from ansible.module_utils._text import to_native, to_bytes, to_text
-
-
-class OpenSSLObjectError(Exception):
- pass
-
-
-class OpenSSLBadPassphraseError(OpenSSLObjectError):
- pass
-
-
-def get_fingerprint_of_bytes(source):
- """Generate the fingerprint of the given bytes."""
-
- fingerprint = {}
-
- try:
- algorithms = hashlib.algorithms
- except AttributeError:
- try:
- algorithms = hashlib.algorithms_guaranteed
- except AttributeError:
- return None
-
- for algo in algorithms:
- f = getattr(hashlib, algo)
- try:
- h = f(source)
- except ValueError:
- # This can happen for hash algorithms not supported in FIPS mode
- # (https://github.com/ansible/ansible/issues/67213)
- continue
- try:
- # Certain hash functions have a hexdigest() which expects a length parameter
- pubkey_digest = h.hexdigest()
- except TypeError:
- pubkey_digest = h.hexdigest(32)
- fingerprint[algo] = ':'.join(pubkey_digest[i:i + 2] for i in range(0, len(pubkey_digest), 2))
-
- return fingerprint
-
-
-def get_fingerprint(path, passphrase=None, content=None, backend='pyopenssl'):
- """Generate the fingerprint of the public key. """
-
- privatekey = load_privatekey(path, passphrase=passphrase, content=content, check_passphrase=False, backend=backend)
-
- if backend == 'pyopenssl':
- try:
- publickey = crypto.dump_publickey(crypto.FILETYPE_ASN1, privatekey)
- except AttributeError:
- # If PyOpenSSL < 16.0 crypto.dump_publickey() will fail.
- try:
- bio = crypto._new_mem_buf()
- rc = crypto._lib.i2d_PUBKEY_bio(bio, privatekey._pkey)
- if rc != 1:
- crypto._raise_current_error()
- publickey = crypto._bio_to_string(bio)
- except AttributeError:
- # By doing this we prevent the code from raising an error
- # yet we return no value in the fingerprint hash.
- return None
- elif backend == 'cryptography':
- publickey = privatekey.public_key().public_bytes(
- serialization.Encoding.DER,
- serialization.PublicFormat.SubjectPublicKeyInfo
- )
-
- return get_fingerprint_of_bytes(publickey)
-
-
-def load_file_if_exists(path, module=None, ignore_errors=False):
- try:
- with open(path, 'rb') as f:
- return f.read()
- except EnvironmentError as exc:
- if exc.errno == errno.ENOENT:
- return None
- if ignore_errors:
- return None
- if module is None:
- raise
- module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
- except Exception as exc:
- if ignore_errors:
- return None
- if module is None:
- raise
- module.fail_json('Error while loading {0} - {1}'.format(path, str(exc)))
-
-
-def load_privatekey(path, passphrase=None, check_passphrase=True, content=None, backend='pyopenssl'):
- """Load the specified OpenSSL private key.
-
- The content can also be specified via content; in that case,
- this function will not load the key from disk.
- """
-
- try:
- if content is None:
- with open(path, 'rb') as b_priv_key_fh:
- priv_key_detail = b_priv_key_fh.read()
- else:
- priv_key_detail = content
-
- if backend == 'pyopenssl':
-
- # First try: try to load with real passphrase (resp. empty string)
- # Will work if this is the correct passphrase, or the key is not
- # password-protected.
- try:
- result = crypto.load_privatekey(crypto.FILETYPE_PEM,
- priv_key_detail,
- to_bytes(passphrase or ''))
- except crypto.Error as e:
- if len(e.args) > 0 and len(e.args[0]) > 0:
- if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
- # This happens in case we have the wrong passphrase.
- if passphrase is not None:
- raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key!')
- else:
- raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
- raise OpenSSLObjectError('Error while deserializing key: {0}'.format(e))
- if check_passphrase:
- # Next we want to make sure that the key is actually protected by
- # a passphrase (in case we did try the empty string before, make
- # sure that the key is not protected by the empty string)
- try:
- crypto.load_privatekey(crypto.FILETYPE_PEM,
- priv_key_detail,
- to_bytes('y' if passphrase == 'x' else 'x'))
- if passphrase is not None:
- # Since we can load the key without an exception, the
- # key isn't password-protected
- raise OpenSSLBadPassphraseError('Passphrase provided, but private key is not password-protected!')
- except crypto.Error as e:
- if passphrase is None and len(e.args) > 0 and len(e.args[0]) > 0:
- if e.args[0][0][2] in ('bad decrypt', 'bad password read'):
- # The key is obviously protected by the empty string.
- # Don't do this at home (if it's possible at all)...
- raise OpenSSLBadPassphraseError('No passphrase provided, but private key is password-protected!')
- elif backend == 'cryptography':
- try:
- result = load_pem_private_key(priv_key_detail,
- None if passphrase is None else to_bytes(passphrase),
- cryptography_backend())
- except TypeError as dummy:
- raise OpenSSLBadPassphraseError('Wrong or empty passphrase provided for private key')
- except ValueError as dummy:
- raise OpenSSLBadPassphraseError('Wrong passphrase provided for private key')
-
- return result
- except (IOError, OSError) as exc:
- raise OpenSSLObjectError(exc)
-
-
-def load_certificate(path, content=None, backend='pyopenssl'):
- """Load the specified certificate."""
-
- try:
- if content is None:
- with open(path, 'rb') as cert_fh:
- cert_content = cert_fh.read()
- else:
- cert_content = content
- if backend == 'pyopenssl':
- return crypto.load_certificate(crypto.FILETYPE_PEM, cert_content)
- elif backend == 'cryptography':
- return x509.load_pem_x509_certificate(cert_content, cryptography_backend())
- except (IOError, OSError) as exc:
- raise OpenSSLObjectError(exc)
-
-
-def load_certificate_request(path, content=None, backend='pyopenssl'):
- """Load the specified certificate signing request."""
- try:
- if content is None:
- with open(path, 'rb') as csr_fh:
- csr_content = csr_fh.read()
- else:
- csr_content = content
- except (IOError, OSError) as exc:
- raise OpenSSLObjectError(exc)
- if backend == 'pyopenssl':
- return crypto.load_certificate_request(crypto.FILETYPE_PEM, csr_content)
- elif backend == 'cryptography':
- return x509.load_pem_x509_csr(csr_content, cryptography_backend())
-
-
-def parse_name_field(input_dict):
- """Take a dict with key: value or key: list_of_values mappings and return a list of tuples"""
-
- result = []
- for key in input_dict:
- if isinstance(input_dict[key], list):
- for entry in input_dict[key]:
- result.append((key, entry))
- else:
- result.append((key, input_dict[key]))
- return result
-
-
-def convert_relative_to_datetime(relative_time_string):
- """Get a datetime.datetime or None from a string in the time format described in sshd_config(5)"""
-
- parsed_result = re.match(
- r"^(?P<prefix>[+-])((?P<weeks>\d+)[wW])?((?P<days>\d+)[dD])?((?P<hours>\d+)[hH])?((?P<minutes>\d+)[mM])?((?P<seconds>\d+)[sS]?)?$",
- relative_time_string)
-
- if parsed_result is None or len(relative_time_string) == 1:
- # not matched or only a single "+" or "-"
- return None
-
- offset = datetime.timedelta(0)
- if parsed_result.group("weeks") is not None:
- offset += datetime.timedelta(weeks=int(parsed_result.group("weeks")))
- if parsed_result.group("days") is not None:
- offset += datetime.timedelta(days=int(parsed_result.group("days")))
- if parsed_result.group("hours") is not None:
- offset += datetime.timedelta(hours=int(parsed_result.group("hours")))
- if parsed_result.group("minutes") is not None:
- offset += datetime.timedelta(
- minutes=int(parsed_result.group("minutes")))
- if parsed_result.group("seconds") is not None:
- offset += datetime.timedelta(
- seconds=int(parsed_result.group("seconds")))
-
- if parsed_result.group("prefix") == "+":
- return datetime.datetime.utcnow() + offset
- else:
- return datetime.datetime.utcnow() - offset
-
-
-def get_relative_time_option(input_string, input_name, backend='cryptography'):
- """Return an absolute timespec if a relative timespec or an ASN1 formatted
- string is provided.
-
- The return value will be a datetime object for the cryptography backend,
- and a ASN1 formatted string for the pyopenssl backend."""
- result = to_native(input_string)
- if result is None:
- raise OpenSSLObjectError(
- 'The timespec "%s" for %s is not valid' %
- input_string, input_name)
- # Relative time
- if result.startswith("+") or result.startswith("-"):
- result_datetime = convert_relative_to_datetime(result)
- if backend == 'pyopenssl':
- return result_datetime.strftime("%Y%m%d%H%M%SZ")
- elif backend == 'cryptography':
- return result_datetime
- # Absolute time
- if backend == 'pyopenssl':
- return input_string
- elif backend == 'cryptography':
- for date_fmt in ['%Y%m%d%H%M%SZ', '%Y%m%d%H%MZ', '%Y%m%d%H%M%S%z', '%Y%m%d%H%M%z']:
- try:
- return datetime.datetime.strptime(result, date_fmt)
- except ValueError:
- pass
-
- raise OpenSSLObjectError(
- 'The time spec "%s" for %s is invalid' %
- (input_string, input_name)
- )
-
-
-def select_message_digest(digest_string):
- digest = None
- if digest_string == 'sha256':
- digest = hashes.SHA256()
- elif digest_string == 'sha384':
- digest = hashes.SHA384()
- elif digest_string == 'sha512':
- digest = hashes.SHA512()
- elif digest_string == 'sha1':
- digest = hashes.SHA1()
- elif digest_string == 'md5':
- digest = hashes.MD5()
- return digest
-
-
-def write_file(module, content, default_mode=None, path=None):
- '''
- Writes content into destination file as securely as possible.
- Uses file arguments from module.
- '''
- # Find out parameters for file
- file_args = module.load_file_common_arguments(module.params, path=path)
- if file_args['mode'] is None:
- file_args['mode'] = default_mode
- # Create tempfile name
- tmp_fd, tmp_name = tempfile.mkstemp(prefix=b'.ansible_tmp')
- try:
- os.close(tmp_fd)
- except Exception as dummy:
- pass
- module.add_cleanup_file(tmp_name) # if we fail, let Ansible try to remove the file
- try:
- try:
- # Create tempfile
- file = os.open(tmp_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
- os.write(file, content)
- os.close(file)
- except Exception as e:
- try:
- os.remove(tmp_name)
- except Exception as dummy:
- pass
- module.fail_json(msg='Error while writing result into temporary file: {0}'.format(e))
- # Update destination to wanted permissions
- if os.path.exists(file_args['path']):
- module.set_fs_attributes_if_different(file_args, False)
- # Move tempfile to final destination
- module.atomic_move(tmp_name, file_args['path'])
- # Try to update permissions again
- module.set_fs_attributes_if_different(file_args, False)
- except Exception as e:
- try:
- os.remove(tmp_name)
- except Exception as dummy:
- pass
- module.fail_json(msg='Error while writing result: {0}'.format(e))
-
-
-@six.add_metaclass(abc.ABCMeta)
-class OpenSSLObject(object):
-
- def __init__(self, path, state, force, check_mode):
- self.path = path
- self.state = state
- self.force = force
- self.name = os.path.basename(path)
- self.changed = False
- self.check_mode = check_mode
-
- def check(self, module, perms_required=True):
- """Ensure the resource is in its desired state."""
-
- def _check_state():
- return os.path.exists(self.path)
-
- def _check_perms(module):
- file_args = module.load_file_common_arguments(module.params)
- return not module.set_fs_attributes_if_different(file_args, False)
-
- if not perms_required:
- return _check_state()
-
- return _check_state() and _check_perms(module)
-
- @abc.abstractmethod
- def dump(self):
- """Serialize the object into a dictionary."""
-
- pass
-
- @abc.abstractmethod
- def generate(self):
- """Generate the resource."""
-
- pass
-
- def remove(self, module):
- """Remove the resource from the filesystem."""
-
- try:
- os.remove(self.path)
- self.changed = True
- except OSError as exc:
- if exc.errno != errno.ENOENT:
- raise OpenSSLObjectError(exc)
- else:
- pass
-
-
-# #####################################################################################
-# #####################################################################################
-# This has been extracted from the OpenSSL project's objects.txt:
-# https://github.com/openssl/openssl/blob/9537fe5757bb07761fa275d779bbd40bcf5530e4/crypto/objects/objects.txt
-# Extracted with https://gist.github.com/felixfontein/376748017ad65ead093d56a45a5bf376
-#
-# In case the following data structure has any copyrightable content, note that it is licensed as follows:
-# Copyright (c) the OpenSSL contributors
-# Licensed under the Apache License 2.0
-# https://github.com/openssl/openssl/blob/master/LICENSE
-_OID_MAP = {
- '0': ('itu-t', 'ITU-T', 'ccitt'),
- '0.3.4401.5': ('ntt-ds', ),
- '0.3.4401.5.3.1.9': ('camellia', ),
- '0.3.4401.5.3.1.9.1': ('camellia-128-ecb', 'CAMELLIA-128-ECB'),
- '0.3.4401.5.3.1.9.3': ('camellia-128-ofb', 'CAMELLIA-128-OFB'),
- '0.3.4401.5.3.1.9.4': ('camellia-128-cfb', 'CAMELLIA-128-CFB'),
- '0.3.4401.5.3.1.9.6': ('camellia-128-gcm', 'CAMELLIA-128-GCM'),
- '0.3.4401.5.3.1.9.7': ('camellia-128-ccm', 'CAMELLIA-128-CCM'),
- '0.3.4401.5.3.1.9.9': ('camellia-128-ctr', 'CAMELLIA-128-CTR'),
- '0.3.4401.5.3.1.9.10': ('camellia-128-cmac', 'CAMELLIA-128-CMAC'),
- '0.3.4401.5.3.1.9.21': ('camellia-192-ecb', 'CAMELLIA-192-ECB'),
- '0.3.4401.5.3.1.9.23': ('camellia-192-ofb', 'CAMELLIA-192-OFB'),
- '0.3.4401.5.3.1.9.24': ('camellia-192-cfb', 'CAMELLIA-192-CFB'),
- '0.3.4401.5.3.1.9.26': ('camellia-192-gcm', 'CAMELLIA-192-GCM'),
- '0.3.4401.5.3.1.9.27': ('camellia-192-ccm', 'CAMELLIA-192-CCM'),
- '0.3.4401.5.3.1.9.29': ('camellia-192-ctr', 'CAMELLIA-192-CTR'),
- '0.3.4401.5.3.1.9.30': ('camellia-192-cmac', 'CAMELLIA-192-CMAC'),
- '0.3.4401.5.3.1.9.41': ('camellia-256-ecb', 'CAMELLIA-256-ECB'),
- '0.3.4401.5.3.1.9.43': ('camellia-256-ofb', 'CAMELLIA-256-OFB'),
- '0.3.4401.5.3.1.9.44': ('camellia-256-cfb', 'CAMELLIA-256-CFB'),
- '0.3.4401.5.3.1.9.46': ('camellia-256-gcm', 'CAMELLIA-256-GCM'),
- '0.3.4401.5.3.1.9.47': ('camellia-256-ccm', 'CAMELLIA-256-CCM'),
- '0.3.4401.5.3.1.9.49': ('camellia-256-ctr', 'CAMELLIA-256-CTR'),
- '0.3.4401.5.3.1.9.50': ('camellia-256-cmac', 'CAMELLIA-256-CMAC'),
- '0.9': ('data', ),
- '0.9.2342': ('pss', ),
- '0.9.2342.19200300': ('ucl', ),
- '0.9.2342.19200300.100': ('pilot', ),
- '0.9.2342.19200300.100.1': ('pilotAttributeType', ),
- '0.9.2342.19200300.100.1.1': ('userId', 'UID'),
- '0.9.2342.19200300.100.1.2': ('textEncodedORAddress', ),
- '0.9.2342.19200300.100.1.3': ('rfc822Mailbox', 'mail'),
- '0.9.2342.19200300.100.1.4': ('info', ),
- '0.9.2342.19200300.100.1.5': ('favouriteDrink', ),
- '0.9.2342.19200300.100.1.6': ('roomNumber', ),
- '0.9.2342.19200300.100.1.7': ('photo', ),
- '0.9.2342.19200300.100.1.8': ('userClass', ),
- '0.9.2342.19200300.100.1.9': ('host', ),
- '0.9.2342.19200300.100.1.10': ('manager', ),
- '0.9.2342.19200300.100.1.11': ('documentIdentifier', ),
- '0.9.2342.19200300.100.1.12': ('documentTitle', ),
- '0.9.2342.19200300.100.1.13': ('documentVersion', ),
- '0.9.2342.19200300.100.1.14': ('documentAuthor', ),
- '0.9.2342.19200300.100.1.15': ('documentLocation', ),
- '0.9.2342.19200300.100.1.20': ('homeTelephoneNumber', ),
- '0.9.2342.19200300.100.1.21': ('secretary', ),
- '0.9.2342.19200300.100.1.22': ('otherMailbox', ),
- '0.9.2342.19200300.100.1.23': ('lastModifiedTime', ),
- '0.9.2342.19200300.100.1.24': ('lastModifiedBy', ),
- '0.9.2342.19200300.100.1.25': ('domainComponent', 'DC'),
- '0.9.2342.19200300.100.1.26': ('aRecord', ),
- '0.9.2342.19200300.100.1.27': ('pilotAttributeType27', ),
- '0.9.2342.19200300.100.1.28': ('mXRecord', ),
- '0.9.2342.19200300.100.1.29': ('nSRecord', ),
- '0.9.2342.19200300.100.1.30': ('sOARecord', ),
- '0.9.2342.19200300.100.1.31': ('cNAMERecord', ),
- '0.9.2342.19200300.100.1.37': ('associatedDomain', ),
- '0.9.2342.19200300.100.1.38': ('associatedName', ),
- '0.9.2342.19200300.100.1.39': ('homePostalAddress', ),
- '0.9.2342.19200300.100.1.40': ('personalTitle', ),
- '0.9.2342.19200300.100.1.41': ('mobileTelephoneNumber', ),
- '0.9.2342.19200300.100.1.42': ('pagerTelephoneNumber', ),
- '0.9.2342.19200300.100.1.43': ('friendlyCountryName', ),
- '0.9.2342.19200300.100.1.44': ('uniqueIdentifier', 'uid'),
- '0.9.2342.19200300.100.1.45': ('organizationalStatus', ),
- '0.9.2342.19200300.100.1.46': ('janetMailbox', ),
- '0.9.2342.19200300.100.1.47': ('mailPreferenceOption', ),
- '0.9.2342.19200300.100.1.48': ('buildingName', ),
- '0.9.2342.19200300.100.1.49': ('dSAQuality', ),
- '0.9.2342.19200300.100.1.50': ('singleLevelQuality', ),
- '0.9.2342.19200300.100.1.51': ('subtreeMinimumQuality', ),
- '0.9.2342.19200300.100.1.52': ('subtreeMaximumQuality', ),
- '0.9.2342.19200300.100.1.53': ('personalSignature', ),
- '0.9.2342.19200300.100.1.54': ('dITRedirect', ),
- '0.9.2342.19200300.100.1.55': ('audio', ),
- '0.9.2342.19200300.100.1.56': ('documentPublisher', ),
- '0.9.2342.19200300.100.3': ('pilotAttributeSyntax', ),
- '0.9.2342.19200300.100.3.4': ('iA5StringSyntax', ),
- '0.9.2342.19200300.100.3.5': ('caseIgnoreIA5StringSyntax', ),
- '0.9.2342.19200300.100.4': ('pilotObjectClass', ),
- '0.9.2342.19200300.100.4.3': ('pilotObject', ),
- '0.9.2342.19200300.100.4.4': ('pilotPerson', ),
- '0.9.2342.19200300.100.4.5': ('account', ),
- '0.9.2342.19200300.100.4.6': ('document', ),
- '0.9.2342.19200300.100.4.7': ('room', ),
- '0.9.2342.19200300.100.4.9': ('documentSeries', ),
- '0.9.2342.19200300.100.4.13': ('Domain', 'domain'),
- '0.9.2342.19200300.100.4.14': ('rFC822localPart', ),
- '0.9.2342.19200300.100.4.15': ('dNSDomain', ),
- '0.9.2342.19200300.100.4.17': ('domainRelatedObject', ),
- '0.9.2342.19200300.100.4.18': ('friendlyCountry', ),
- '0.9.2342.19200300.100.4.19': ('simpleSecurityObject', ),
- '0.9.2342.19200300.100.4.20': ('pilotOrganization', ),
- '0.9.2342.19200300.100.4.21': ('pilotDSA', ),
- '0.9.2342.19200300.100.4.22': ('qualityLabelledData', ),
- '0.9.2342.19200300.100.10': ('pilotGroups', ),
- '1': ('iso', 'ISO'),
- '1.0.9797.3.4': ('gmac', 'GMAC'),
- '1.0.10118.3.0.55': ('whirlpool', ),
- '1.2': ('ISO Member Body', 'member-body'),
- '1.2.156': ('ISO CN Member Body', 'ISO-CN'),
- '1.2.156.10197': ('oscca', ),
- '1.2.156.10197.1': ('sm-scheme', ),
- '1.2.156.10197.1.104.1': ('sm4-ecb', 'SM4-ECB'),
- '1.2.156.10197.1.104.2': ('sm4-cbc', 'SM4-CBC'),
- '1.2.156.10197.1.104.3': ('sm4-ofb', 'SM4-OFB'),
- '1.2.156.10197.1.104.4': ('sm4-cfb', 'SM4-CFB'),
- '1.2.156.10197.1.104.5': ('sm4-cfb1', 'SM4-CFB1'),
- '1.2.156.10197.1.104.6': ('sm4-cfb8', 'SM4-CFB8'),
- '1.2.156.10197.1.104.7': ('sm4-ctr', 'SM4-CTR'),
- '1.2.156.10197.1.301': ('sm2', 'SM2'),
- '1.2.156.10197.1.401': ('sm3', 'SM3'),
- '1.2.156.10197.1.501': ('SM2-with-SM3', 'SM2-SM3'),
- '1.2.156.10197.1.504': ('sm3WithRSAEncryption', 'RSA-SM3'),
- '1.2.392.200011.61.1.1.1.2': ('camellia-128-cbc', 'CAMELLIA-128-CBC'),
- '1.2.392.200011.61.1.1.1.3': ('camellia-192-cbc', 'CAMELLIA-192-CBC'),
- '1.2.392.200011.61.1.1.1.4': ('camellia-256-cbc', 'CAMELLIA-256-CBC'),
- '1.2.392.200011.61.1.1.3.2': ('id-camellia128-wrap', ),
- '1.2.392.200011.61.1.1.3.3': ('id-camellia192-wrap', ),
- '1.2.392.200011.61.1.1.3.4': ('id-camellia256-wrap', ),
- '1.2.410.200004': ('kisa', 'KISA'),
- '1.2.410.200004.1.3': ('seed-ecb', 'SEED-ECB'),
- '1.2.410.200004.1.4': ('seed-cbc', 'SEED-CBC'),
- '1.2.410.200004.1.5': ('seed-cfb', 'SEED-CFB'),
- '1.2.410.200004.1.6': ('seed-ofb', 'SEED-OFB'),
- '1.2.410.200046.1.1': ('aria', ),
- '1.2.410.200046.1.1.1': ('aria-128-ecb', 'ARIA-128-ECB'),
- '1.2.410.200046.1.1.2': ('aria-128-cbc', 'ARIA-128-CBC'),
- '1.2.410.200046.1.1.3': ('aria-128-cfb', 'ARIA-128-CFB'),
- '1.2.410.200046.1.1.4': ('aria-128-ofb', 'ARIA-128-OFB'),
- '1.2.410.200046.1.1.5': ('aria-128-ctr', 'ARIA-128-CTR'),
- '1.2.410.200046.1.1.6': ('aria-192-ecb', 'ARIA-192-ECB'),
- '1.2.410.200046.1.1.7': ('aria-192-cbc', 'ARIA-192-CBC'),
- '1.2.410.200046.1.1.8': ('aria-192-cfb', 'ARIA-192-CFB'),
- '1.2.410.200046.1.1.9': ('aria-192-ofb', 'ARIA-192-OFB'),
- '1.2.410.200046.1.1.10': ('aria-192-ctr', 'ARIA-192-CTR'),
- '1.2.410.200046.1.1.11': ('aria-256-ecb', 'ARIA-256-ECB'),
- '1.2.410.200046.1.1.12': ('aria-256-cbc', 'ARIA-256-CBC'),
- '1.2.410.200046.1.1.13': ('aria-256-cfb', 'ARIA-256-CFB'),
- '1.2.410.200046.1.1.14': ('aria-256-ofb', 'ARIA-256-OFB'),
- '1.2.410.200046.1.1.15': ('aria-256-ctr', 'ARIA-256-CTR'),
- '1.2.410.200046.1.1.34': ('aria-128-gcm', 'ARIA-128-GCM'),
- '1.2.410.200046.1.1.35': ('aria-192-gcm', 'ARIA-192-GCM'),
- '1.2.410.200046.1.1.36': ('aria-256-gcm', 'ARIA-256-GCM'),
- '1.2.410.200046.1.1.37': ('aria-128-ccm', 'ARIA-128-CCM'),
- '1.2.410.200046.1.1.38': ('aria-192-ccm', 'ARIA-192-CCM'),
- '1.2.410.200046.1.1.39': ('aria-256-ccm', 'ARIA-256-CCM'),
- '1.2.643.2.2': ('cryptopro', ),
- '1.2.643.2.2.3': ('GOST R 34.11-94 with GOST R 34.10-2001', 'id-GostR3411-94-with-GostR3410-2001'),
- '1.2.643.2.2.4': ('GOST R 34.11-94 with GOST R 34.10-94', 'id-GostR3411-94-with-GostR3410-94'),
- '1.2.643.2.2.9': ('GOST R 34.11-94', 'md_gost94'),
- '1.2.643.2.2.10': ('HMAC GOST 34.11-94', 'id-HMACGostR3411-94'),
- '1.2.643.2.2.14.0': ('id-Gost28147-89-None-KeyMeshing', ),
- '1.2.643.2.2.14.1': ('id-Gost28147-89-CryptoPro-KeyMeshing', ),
- '1.2.643.2.2.19': ('GOST R 34.10-2001', 'gost2001'),
- '1.2.643.2.2.20': ('GOST R 34.10-94', 'gost94'),
- '1.2.643.2.2.20.1': ('id-GostR3410-94-a', ),
- '1.2.643.2.2.20.2': ('id-GostR3410-94-aBis', ),
- '1.2.643.2.2.20.3': ('id-GostR3410-94-b', ),
- '1.2.643.2.2.20.4': ('id-GostR3410-94-bBis', ),
- '1.2.643.2.2.21': ('GOST 28147-89', 'gost89'),
- '1.2.643.2.2.22': ('GOST 28147-89 MAC', 'gost-mac'),
- '1.2.643.2.2.23': ('GOST R 34.11-94 PRF', 'prf-gostr3411-94'),
- '1.2.643.2.2.30.0': ('id-GostR3411-94-TestParamSet', ),
- '1.2.643.2.2.30.1': ('id-GostR3411-94-CryptoProParamSet', ),
- '1.2.643.2.2.31.0': ('id-Gost28147-89-TestParamSet', ),
- '1.2.643.2.2.31.1': ('id-Gost28147-89-CryptoPro-A-ParamSet', ),
- '1.2.643.2.2.31.2': ('id-Gost28147-89-CryptoPro-B-ParamSet', ),
- '1.2.643.2.2.31.3': ('id-Gost28147-89-CryptoPro-C-ParamSet', ),
- '1.2.643.2.2.31.4': ('id-Gost28147-89-CryptoPro-D-ParamSet', ),
- '1.2.643.2.2.31.5': ('id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet', ),
- '1.2.643.2.2.31.6': ('id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet', ),
- '1.2.643.2.2.31.7': ('id-Gost28147-89-CryptoPro-RIC-1-ParamSet', ),
- '1.2.643.2.2.32.0': ('id-GostR3410-94-TestParamSet', ),
- '1.2.643.2.2.32.2': ('id-GostR3410-94-CryptoPro-A-ParamSet', ),
- '1.2.643.2.2.32.3': ('id-GostR3410-94-CryptoPro-B-ParamSet', ),
- '1.2.643.2.2.32.4': ('id-GostR3410-94-CryptoPro-C-ParamSet', ),
- '1.2.643.2.2.32.5': ('id-GostR3410-94-CryptoPro-D-ParamSet', ),
- '1.2.643.2.2.33.1': ('id-GostR3410-94-CryptoPro-XchA-ParamSet', ),
- '1.2.643.2.2.33.2': ('id-GostR3410-94-CryptoPro-XchB-ParamSet', ),
- '1.2.643.2.2.33.3': ('id-GostR3410-94-CryptoPro-XchC-ParamSet', ),
- '1.2.643.2.2.35.0': ('id-GostR3410-2001-TestParamSet', ),
- '1.2.643.2.2.35.1': ('id-GostR3410-2001-CryptoPro-A-ParamSet', ),
- '1.2.643.2.2.35.2': ('id-GostR3410-2001-CryptoPro-B-ParamSet', ),
- '1.2.643.2.2.35.3': ('id-GostR3410-2001-CryptoPro-C-ParamSet', ),
- '1.2.643.2.2.36.0': ('id-GostR3410-2001-CryptoPro-XchA-ParamSet', ),
- '1.2.643.2.2.36.1': ('id-GostR3410-2001-CryptoPro-XchB-ParamSet', ),
- '1.2.643.2.2.98': ('GOST R 34.10-2001 DH', 'id-GostR3410-2001DH'),
- '1.2.643.2.2.99': ('GOST R 34.10-94 DH', 'id-GostR3410-94DH'),
- '1.2.643.2.9': ('cryptocom', ),
- '1.2.643.2.9.1.3.3': ('GOST R 34.11-94 with GOST R 34.10-94 Cryptocom', 'id-GostR3411-94-with-GostR3410-94-cc'),
- '1.2.643.2.9.1.3.4': ('GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom', 'id-GostR3411-94-with-GostR3410-2001-cc'),
- '1.2.643.2.9.1.5.3': ('GOST 34.10-94 Cryptocom', 'gost94cc'),
- '1.2.643.2.9.1.5.4': ('GOST 34.10-2001 Cryptocom', 'gost2001cc'),
- '1.2.643.2.9.1.6.1': ('GOST 28147-89 Cryptocom ParamSet', 'id-Gost28147-89-cc'),
- '1.2.643.2.9.1.8.1': ('GOST R 3410-2001 Parameter Set Cryptocom', 'id-GostR3410-2001-ParamSet-cc'),
- '1.2.643.3.131.1.1': ('INN', 'INN'),
- '1.2.643.7.1': ('id-tc26', ),
- '1.2.643.7.1.1': ('id-tc26-algorithms', ),
- '1.2.643.7.1.1.1': ('id-tc26-sign', ),
- '1.2.643.7.1.1.1.1': ('GOST R 34.10-2012 with 256 bit modulus', 'gost2012_256'),
- '1.2.643.7.1.1.1.2': ('GOST R 34.10-2012 with 512 bit modulus', 'gost2012_512'),
- '1.2.643.7.1.1.2': ('id-tc26-digest', ),
- '1.2.643.7.1.1.2.2': ('GOST R 34.11-2012 with 256 bit hash', 'md_gost12_256'),
- '1.2.643.7.1.1.2.3': ('GOST R 34.11-2012 with 512 bit hash', 'md_gost12_512'),
- '1.2.643.7.1.1.3': ('id-tc26-signwithdigest', ),
- '1.2.643.7.1.1.3.2': ('GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)', 'id-tc26-signwithdigest-gost3410-2012-256'),
- '1.2.643.7.1.1.3.3': ('GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)', 'id-tc26-signwithdigest-gost3410-2012-512'),
- '1.2.643.7.1.1.4': ('id-tc26-mac', ),
- '1.2.643.7.1.1.4.1': ('HMAC GOST 34.11-2012 256 bit', 'id-tc26-hmac-gost-3411-2012-256'),
- '1.2.643.7.1.1.4.2': ('HMAC GOST 34.11-2012 512 bit', 'id-tc26-hmac-gost-3411-2012-512'),
- '1.2.643.7.1.1.5': ('id-tc26-cipher', ),
- '1.2.643.7.1.1.5.1': ('id-tc26-cipher-gostr3412-2015-magma', ),
- '1.2.643.7.1.1.5.1.1': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm', ),
- '1.2.643.7.1.1.5.1.2': ('id-tc26-cipher-gostr3412-2015-magma-ctracpkm-omac', ),
- '1.2.643.7.1.1.5.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik', ),
- '1.2.643.7.1.1.5.2.1': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm', ),
- '1.2.643.7.1.1.5.2.2': ('id-tc26-cipher-gostr3412-2015-kuznyechik-ctracpkm-omac', ),
- '1.2.643.7.1.1.6': ('id-tc26-agreement', ),
- '1.2.643.7.1.1.6.1': ('id-tc26-agreement-gost-3410-2012-256', ),
- '1.2.643.7.1.1.6.2': ('id-tc26-agreement-gost-3410-2012-512', ),
- '1.2.643.7.1.1.7': ('id-tc26-wrap', ),
- '1.2.643.7.1.1.7.1': ('id-tc26-wrap-gostr3412-2015-magma', ),
- '1.2.643.7.1.1.7.1.1': ('id-tc26-wrap-gostr3412-2015-magma-kexp15', 'id-tc26-wrap-gostr3412-2015-kuznyechik-kexp15'),
- '1.2.643.7.1.1.7.2': ('id-tc26-wrap-gostr3412-2015-kuznyechik', ),
- '1.2.643.7.1.2': ('id-tc26-constants', ),
- '1.2.643.7.1.2.1': ('id-tc26-sign-constants', ),
- '1.2.643.7.1.2.1.1': ('id-tc26-gost-3410-2012-256-constants', ),
- '1.2.643.7.1.2.1.1.1': ('GOST R 34.10-2012 (256 bit) ParamSet A', 'id-tc26-gost-3410-2012-256-paramSetA'),
- '1.2.643.7.1.2.1.1.2': ('GOST R 34.10-2012 (256 bit) ParamSet B', 'id-tc26-gost-3410-2012-256-paramSetB'),
- '1.2.643.7.1.2.1.1.3': ('GOST R 34.10-2012 (256 bit) ParamSet C', 'id-tc26-gost-3410-2012-256-paramSetC'),
- '1.2.643.7.1.2.1.1.4': ('GOST R 34.10-2012 (256 bit) ParamSet D', 'id-tc26-gost-3410-2012-256-paramSetD'),
- '1.2.643.7.1.2.1.2': ('id-tc26-gost-3410-2012-512-constants', ),
- '1.2.643.7.1.2.1.2.0': ('GOST R 34.10-2012 (512 bit) testing parameter set', 'id-tc26-gost-3410-2012-512-paramSetTest'),
- '1.2.643.7.1.2.1.2.1': ('GOST R 34.10-2012 (512 bit) ParamSet A', 'id-tc26-gost-3410-2012-512-paramSetA'),
- '1.2.643.7.1.2.1.2.2': ('GOST R 34.10-2012 (512 bit) ParamSet B', 'id-tc26-gost-3410-2012-512-paramSetB'),
- '1.2.643.7.1.2.1.2.3': ('GOST R 34.10-2012 (512 bit) ParamSet C', 'id-tc26-gost-3410-2012-512-paramSetC'),
- '1.2.643.7.1.2.2': ('id-tc26-digest-constants', ),
- '1.2.643.7.1.2.5': ('id-tc26-cipher-constants', ),
- '1.2.643.7.1.2.5.1': ('id-tc26-gost-28147-constants', ),
- '1.2.643.7.1.2.5.1.1': ('GOST 28147-89 TC26 parameter set', 'id-tc26-gost-28147-param-Z'),
- '1.2.643.100.1': ('OGRN', 'OGRN'),
- '1.2.643.100.3': ('SNILS', 'SNILS'),
- '1.2.643.100.111': ('Signing Tool of Subject', 'subjectSignTool'),
- '1.2.643.100.112': ('Signing Tool of Issuer', 'issuerSignTool'),
- '1.2.804': ('ISO-UA', ),
- '1.2.804.2.1.1.1': ('ua-pki', ),
- '1.2.804.2.1.1.1.1.1.1': ('DSTU Gost 28147-2009', 'dstu28147'),
- '1.2.804.2.1.1.1.1.1.1.2': ('DSTU Gost 28147-2009 OFB mode', 'dstu28147-ofb'),
- '1.2.804.2.1.1.1.1.1.1.3': ('DSTU Gost 28147-2009 CFB mode', 'dstu28147-cfb'),
- '1.2.804.2.1.1.1.1.1.1.5': ('DSTU Gost 28147-2009 key wrap', 'dstu28147-wrap'),
- '1.2.804.2.1.1.1.1.1.2': ('HMAC DSTU Gost 34311-95', 'hmacWithDstu34311'),
- '1.2.804.2.1.1.1.1.2.1': ('DSTU Gost 34311-95', 'dstu34311'),
- '1.2.804.2.1.1.1.1.3.1.1': ('DSTU 4145-2002 little endian', 'dstu4145le'),
- '1.2.804.2.1.1.1.1.3.1.1.1.1': ('DSTU 4145-2002 big endian', 'dstu4145be'),
- '1.2.804.2.1.1.1.1.3.1.1.2.0': ('DSTU curve 0', 'uacurve0'),
- '1.2.804.2.1.1.1.1.3.1.1.2.1': ('DSTU curve 1', 'uacurve1'),
- '1.2.804.2.1.1.1.1.3.1.1.2.2': ('DSTU curve 2', 'uacurve2'),
- '1.2.804.2.1.1.1.1.3.1.1.2.3': ('DSTU curve 3', 'uacurve3'),
- '1.2.804.2.1.1.1.1.3.1.1.2.4': ('DSTU curve 4', 'uacurve4'),
- '1.2.804.2.1.1.1.1.3.1.1.2.5': ('DSTU curve 5', 'uacurve5'),
- '1.2.804.2.1.1.1.1.3.1.1.2.6': ('DSTU curve 6', 'uacurve6'),
- '1.2.804.2.1.1.1.1.3.1.1.2.7': ('DSTU curve 7', 'uacurve7'),
- '1.2.804.2.1.1.1.1.3.1.1.2.8': ('DSTU curve 8', 'uacurve8'),
- '1.2.804.2.1.1.1.1.3.1.1.2.9': ('DSTU curve 9', 'uacurve9'),
- '1.2.840': ('ISO US Member Body', 'ISO-US'),
- '1.2.840.10040': ('X9.57', 'X9-57'),
- '1.2.840.10040.2': ('holdInstruction', ),
- '1.2.840.10040.2.1': ('Hold Instruction None', 'holdInstructionNone'),
- '1.2.840.10040.2.2': ('Hold Instruction Call Issuer', 'holdInstructionCallIssuer'),
- '1.2.840.10040.2.3': ('Hold Instruction Reject', 'holdInstructionReject'),
- '1.2.840.10040.4': ('X9.57 CM ?', 'X9cm'),
- '1.2.840.10040.4.1': ('dsaEncryption', 'DSA'),
- '1.2.840.10040.4.3': ('dsaWithSHA1', 'DSA-SHA1'),
- '1.2.840.10045': ('ANSI X9.62', 'ansi-X9-62'),
- '1.2.840.10045.1': ('id-fieldType', ),
- '1.2.840.10045.1.1': ('prime-field', ),
- '1.2.840.10045.1.2': ('characteristic-two-field', ),
- '1.2.840.10045.1.2.3': ('id-characteristic-two-basis', ),
- '1.2.840.10045.1.2.3.1': ('onBasis', ),
- '1.2.840.10045.1.2.3.2': ('tpBasis', ),
- '1.2.840.10045.1.2.3.3': ('ppBasis', ),
- '1.2.840.10045.2': ('id-publicKeyType', ),
- '1.2.840.10045.2.1': ('id-ecPublicKey', ),
- '1.2.840.10045.3': ('ellipticCurve', ),
- '1.2.840.10045.3.0': ('c-TwoCurve', ),
- '1.2.840.10045.3.0.1': ('c2pnb163v1', ),
- '1.2.840.10045.3.0.2': ('c2pnb163v2', ),
- '1.2.840.10045.3.0.3': ('c2pnb163v3', ),
- '1.2.840.10045.3.0.4': ('c2pnb176v1', ),
- '1.2.840.10045.3.0.5': ('c2tnb191v1', ),
- '1.2.840.10045.3.0.6': ('c2tnb191v2', ),
- '1.2.840.10045.3.0.7': ('c2tnb191v3', ),
- '1.2.840.10045.3.0.8': ('c2onb191v4', ),
- '1.2.840.10045.3.0.9': ('c2onb191v5', ),
- '1.2.840.10045.3.0.10': ('c2pnb208w1', ),
- '1.2.840.10045.3.0.11': ('c2tnb239v1', ),
- '1.2.840.10045.3.0.12': ('c2tnb239v2', ),
- '1.2.840.10045.3.0.13': ('c2tnb239v3', ),
- '1.2.840.10045.3.0.14': ('c2onb239v4', ),
- '1.2.840.10045.3.0.15': ('c2onb239v5', ),
- '1.2.840.10045.3.0.16': ('c2pnb272w1', ),
- '1.2.840.10045.3.0.17': ('c2pnb304w1', ),
- '1.2.840.10045.3.0.18': ('c2tnb359v1', ),
- '1.2.840.10045.3.0.19': ('c2pnb368w1', ),
- '1.2.840.10045.3.0.20': ('c2tnb431r1', ),
- '1.2.840.10045.3.1': ('primeCurve', ),
- '1.2.840.10045.3.1.1': ('prime192v1', ),
- '1.2.840.10045.3.1.2': ('prime192v2', ),
- '1.2.840.10045.3.1.3': ('prime192v3', ),
- '1.2.840.10045.3.1.4': ('prime239v1', ),
- '1.2.840.10045.3.1.5': ('prime239v2', ),
- '1.2.840.10045.3.1.6': ('prime239v3', ),
- '1.2.840.10045.3.1.7': ('prime256v1', ),
- '1.2.840.10045.4': ('id-ecSigType', ),
- '1.2.840.10045.4.1': ('ecdsa-with-SHA1', ),
- '1.2.840.10045.4.2': ('ecdsa-with-Recommended', ),
- '1.2.840.10045.4.3': ('ecdsa-with-Specified', ),
- '1.2.840.10045.4.3.1': ('ecdsa-with-SHA224', ),
- '1.2.840.10045.4.3.2': ('ecdsa-with-SHA256', ),
- '1.2.840.10045.4.3.3': ('ecdsa-with-SHA384', ),
- '1.2.840.10045.4.3.4': ('ecdsa-with-SHA512', ),
- '1.2.840.10046.2.1': ('X9.42 DH', 'dhpublicnumber'),
- '1.2.840.113533.7.66.10': ('cast5-cbc', 'CAST5-CBC'),
- '1.2.840.113533.7.66.12': ('pbeWithMD5AndCast5CBC', ),
- '1.2.840.113533.7.66.13': ('password based MAC', 'id-PasswordBasedMAC'),
- '1.2.840.113533.7.66.30': ('Diffie-Hellman based MAC', 'id-DHBasedMac'),
- '1.2.840.113549': ('RSA Data Security, Inc.', 'rsadsi'),
- '1.2.840.113549.1': ('RSA Data Security, Inc. PKCS', 'pkcs'),
- '1.2.840.113549.1.1': ('pkcs1', ),
- '1.2.840.113549.1.1.1': ('rsaEncryption', ),
- '1.2.840.113549.1.1.2': ('md2WithRSAEncryption', 'RSA-MD2'),
- '1.2.840.113549.1.1.3': ('md4WithRSAEncryption', 'RSA-MD4'),
- '1.2.840.113549.1.1.4': ('md5WithRSAEncryption', 'RSA-MD5'),
- '1.2.840.113549.1.1.5': ('sha1WithRSAEncryption', 'RSA-SHA1'),
- '1.2.840.113549.1.1.6': ('rsaOAEPEncryptionSET', ),
- '1.2.840.113549.1.1.7': ('rsaesOaep', 'RSAES-OAEP'),
- '1.2.840.113549.1.1.8': ('mgf1', 'MGF1'),
- '1.2.840.113549.1.1.9': ('pSpecified', 'PSPECIFIED'),
- '1.2.840.113549.1.1.10': ('rsassaPss', 'RSASSA-PSS'),
- '1.2.840.113549.1.1.11': ('sha256WithRSAEncryption', 'RSA-SHA256'),
- '1.2.840.113549.1.1.12': ('sha384WithRSAEncryption', 'RSA-SHA384'),
- '1.2.840.113549.1.1.13': ('sha512WithRSAEncryption', 'RSA-SHA512'),
- '1.2.840.113549.1.1.14': ('sha224WithRSAEncryption', 'RSA-SHA224'),
- '1.2.840.113549.1.1.15': ('sha512-224WithRSAEncryption', 'RSA-SHA512/224'),
- '1.2.840.113549.1.1.16': ('sha512-256WithRSAEncryption', 'RSA-SHA512/256'),
- '1.2.840.113549.1.3': ('pkcs3', ),
- '1.2.840.113549.1.3.1': ('dhKeyAgreement', ),
- '1.2.840.113549.1.5': ('pkcs5', ),
- '1.2.840.113549.1.5.1': ('pbeWithMD2AndDES-CBC', 'PBE-MD2-DES'),
- '1.2.840.113549.1.5.3': ('pbeWithMD5AndDES-CBC', 'PBE-MD5-DES'),
- '1.2.840.113549.1.5.4': ('pbeWithMD2AndRC2-CBC', 'PBE-MD2-RC2-64'),
- '1.2.840.113549.1.5.6': ('pbeWithMD5AndRC2-CBC', 'PBE-MD5-RC2-64'),
- '1.2.840.113549.1.5.10': ('pbeWithSHA1AndDES-CBC', 'PBE-SHA1-DES'),
- '1.2.840.113549.1.5.11': ('pbeWithSHA1AndRC2-CBC', 'PBE-SHA1-RC2-64'),
- '1.2.840.113549.1.5.12': ('PBKDF2', ),
- '1.2.840.113549.1.5.13': ('PBES2', ),
- '1.2.840.113549.1.5.14': ('PBMAC1', ),
- '1.2.840.113549.1.7': ('pkcs7', ),
- '1.2.840.113549.1.7.1': ('pkcs7-data', ),
- '1.2.840.113549.1.7.2': ('pkcs7-signedData', ),
- '1.2.840.113549.1.7.3': ('pkcs7-envelopedData', ),
- '1.2.840.113549.1.7.4': ('pkcs7-signedAndEnvelopedData', ),
- '1.2.840.113549.1.7.5': ('pkcs7-digestData', ),
- '1.2.840.113549.1.7.6': ('pkcs7-encryptedData', ),
- '1.2.840.113549.1.9': ('pkcs9', ),
- '1.2.840.113549.1.9.1': ('emailAddress', ),
- '1.2.840.113549.1.9.2': ('unstructuredName', ),
- '1.2.840.113549.1.9.3': ('contentType', ),
- '1.2.840.113549.1.9.4': ('messageDigest', ),
- '1.2.840.113549.1.9.5': ('signingTime', ),
- '1.2.840.113549.1.9.6': ('countersignature', ),
- '1.2.840.113549.1.9.7': ('challengePassword', ),
- '1.2.840.113549.1.9.8': ('unstructuredAddress', ),
- '1.2.840.113549.1.9.9': ('extendedCertificateAttributes', ),
- '1.2.840.113549.1.9.14': ('Extension Request', 'extReq'),
- '1.2.840.113549.1.9.15': ('S/MIME Capabilities', 'SMIME-CAPS'),
- '1.2.840.113549.1.9.16': ('S/MIME', 'SMIME'),
- '1.2.840.113549.1.9.16.0': ('id-smime-mod', ),
- '1.2.840.113549.1.9.16.0.1': ('id-smime-mod-cms', ),
- '1.2.840.113549.1.9.16.0.2': ('id-smime-mod-ess', ),
- '1.2.840.113549.1.9.16.0.3': ('id-smime-mod-oid', ),
- '1.2.840.113549.1.9.16.0.4': ('id-smime-mod-msg-v3', ),
- '1.2.840.113549.1.9.16.0.5': ('id-smime-mod-ets-eSignature-88', ),
- '1.2.840.113549.1.9.16.0.6': ('id-smime-mod-ets-eSignature-97', ),
- '1.2.840.113549.1.9.16.0.7': ('id-smime-mod-ets-eSigPolicy-88', ),
- '1.2.840.113549.1.9.16.0.8': ('id-smime-mod-ets-eSigPolicy-97', ),
- '1.2.840.113549.1.9.16.1': ('id-smime-ct', ),
- '1.2.840.113549.1.9.16.1.1': ('id-smime-ct-receipt', ),
- '1.2.840.113549.1.9.16.1.2': ('id-smime-ct-authData', ),
- '1.2.840.113549.1.9.16.1.3': ('id-smime-ct-publishCert', ),
- '1.2.840.113549.1.9.16.1.4': ('id-smime-ct-TSTInfo', ),
- '1.2.840.113549.1.9.16.1.5': ('id-smime-ct-TDTInfo', ),
- '1.2.840.113549.1.9.16.1.6': ('id-smime-ct-contentInfo', ),
- '1.2.840.113549.1.9.16.1.7': ('id-smime-ct-DVCSRequestData', ),
- '1.2.840.113549.1.9.16.1.8': ('id-smime-ct-DVCSResponseData', ),
- '1.2.840.113549.1.9.16.1.9': ('id-smime-ct-compressedData', ),
- '1.2.840.113549.1.9.16.1.19': ('id-smime-ct-contentCollection', ),
- '1.2.840.113549.1.9.16.1.23': ('id-smime-ct-authEnvelopedData', ),
- '1.2.840.113549.1.9.16.1.27': ('id-ct-asciiTextWithCRLF', ),
- '1.2.840.113549.1.9.16.1.28': ('id-ct-xml', ),
- '1.2.840.113549.1.9.16.2': ('id-smime-aa', ),
- '1.2.840.113549.1.9.16.2.1': ('id-smime-aa-receiptRequest', ),
- '1.2.840.113549.1.9.16.2.2': ('id-smime-aa-securityLabel', ),
- '1.2.840.113549.1.9.16.2.3': ('id-smime-aa-mlExpandHistory', ),
- '1.2.840.113549.1.9.16.2.4': ('id-smime-aa-contentHint', ),
- '1.2.840.113549.1.9.16.2.5': ('id-smime-aa-msgSigDigest', ),
- '1.2.840.113549.1.9.16.2.6': ('id-smime-aa-encapContentType', ),
- '1.2.840.113549.1.9.16.2.7': ('id-smime-aa-contentIdentifier', ),
- '1.2.840.113549.1.9.16.2.8': ('id-smime-aa-macValue', ),
- '1.2.840.113549.1.9.16.2.9': ('id-smime-aa-equivalentLabels', ),
- '1.2.840.113549.1.9.16.2.10': ('id-smime-aa-contentReference', ),
- '1.2.840.113549.1.9.16.2.11': ('id-smime-aa-encrypKeyPref', ),
- '1.2.840.113549.1.9.16.2.12': ('id-smime-aa-signingCertificate', ),
- '1.2.840.113549.1.9.16.2.13': ('id-smime-aa-smimeEncryptCerts', ),
- '1.2.840.113549.1.9.16.2.14': ('id-smime-aa-timeStampToken', ),
- '1.2.840.113549.1.9.16.2.15': ('id-smime-aa-ets-sigPolicyId', ),
- '1.2.840.113549.1.9.16.2.16': ('id-smime-aa-ets-commitmentType', ),
- '1.2.840.113549.1.9.16.2.17': ('id-smime-aa-ets-signerLocation', ),
- '1.2.840.113549.1.9.16.2.18': ('id-smime-aa-ets-signerAttr', ),
- '1.2.840.113549.1.9.16.2.19': ('id-smime-aa-ets-otherSigCert', ),
- '1.2.840.113549.1.9.16.2.20': ('id-smime-aa-ets-contentTimestamp', ),
- '1.2.840.113549.1.9.16.2.21': ('id-smime-aa-ets-CertificateRefs', ),
- '1.2.840.113549.1.9.16.2.22': ('id-smime-aa-ets-RevocationRefs', ),
- '1.2.840.113549.1.9.16.2.23': ('id-smime-aa-ets-certValues', ),
- '1.2.840.113549.1.9.16.2.24': ('id-smime-aa-ets-revocationValues', ),
- '1.2.840.113549.1.9.16.2.25': ('id-smime-aa-ets-escTimeStamp', ),
- '1.2.840.113549.1.9.16.2.26': ('id-smime-aa-ets-certCRLTimestamp', ),
- '1.2.840.113549.1.9.16.2.27': ('id-smime-aa-ets-archiveTimeStamp', ),
- '1.2.840.113549.1.9.16.2.28': ('id-smime-aa-signatureType', ),
- '1.2.840.113549.1.9.16.2.29': ('id-smime-aa-dvcs-dvc', ),
- '1.2.840.113549.1.9.16.2.47': ('id-smime-aa-signingCertificateV2', ),
- '1.2.840.113549.1.9.16.3': ('id-smime-alg', ),
- '1.2.840.113549.1.9.16.3.1': ('id-smime-alg-ESDHwith3DES', ),
- '1.2.840.113549.1.9.16.3.2': ('id-smime-alg-ESDHwithRC2', ),
- '1.2.840.113549.1.9.16.3.3': ('id-smime-alg-3DESwrap', ),
- '1.2.840.113549.1.9.16.3.4': ('id-smime-alg-RC2wrap', ),
- '1.2.840.113549.1.9.16.3.5': ('id-smime-alg-ESDH', ),
- '1.2.840.113549.1.9.16.3.6': ('id-smime-alg-CMS3DESwrap', ),
- '1.2.840.113549.1.9.16.3.7': ('id-smime-alg-CMSRC2wrap', ),
- '1.2.840.113549.1.9.16.3.8': ('zlib compression', 'ZLIB'),
- '1.2.840.113549.1.9.16.3.9': ('id-alg-PWRI-KEK', ),
- '1.2.840.113549.1.9.16.4': ('id-smime-cd', ),
- '1.2.840.113549.1.9.16.4.1': ('id-smime-cd-ldap', ),
- '1.2.840.113549.1.9.16.5': ('id-smime-spq', ),
- '1.2.840.113549.1.9.16.5.1': ('id-smime-spq-ets-sqt-uri', ),
- '1.2.840.113549.1.9.16.5.2': ('id-smime-spq-ets-sqt-unotice', ),
- '1.2.840.113549.1.9.16.6': ('id-smime-cti', ),
- '1.2.840.113549.1.9.16.6.1': ('id-smime-cti-ets-proofOfOrigin', ),
- '1.2.840.113549.1.9.16.6.2': ('id-smime-cti-ets-proofOfReceipt', ),
- '1.2.840.113549.1.9.16.6.3': ('id-smime-cti-ets-proofOfDelivery', ),
- '1.2.840.113549.1.9.16.6.4': ('id-smime-cti-ets-proofOfSender', ),
- '1.2.840.113549.1.9.16.6.5': ('id-smime-cti-ets-proofOfApproval', ),
- '1.2.840.113549.1.9.16.6.6': ('id-smime-cti-ets-proofOfCreation', ),
- '1.2.840.113549.1.9.20': ('friendlyName', ),
- '1.2.840.113549.1.9.21': ('localKeyID', ),
- '1.2.840.113549.1.9.22': ('certTypes', ),
- '1.2.840.113549.1.9.22.1': ('x509Certificate', ),
- '1.2.840.113549.1.9.22.2': ('sdsiCertificate', ),
- '1.2.840.113549.1.9.23': ('crlTypes', ),
- '1.2.840.113549.1.9.23.1': ('x509Crl', ),
- '1.2.840.113549.1.12': ('pkcs12', ),
- '1.2.840.113549.1.12.1': ('pkcs12-pbeids', ),
- '1.2.840.113549.1.12.1.1': ('pbeWithSHA1And128BitRC4', 'PBE-SHA1-RC4-128'),
- '1.2.840.113549.1.12.1.2': ('pbeWithSHA1And40BitRC4', 'PBE-SHA1-RC4-40'),
- '1.2.840.113549.1.12.1.3': ('pbeWithSHA1And3-KeyTripleDES-CBC', 'PBE-SHA1-3DES'),
- '1.2.840.113549.1.12.1.4': ('pbeWithSHA1And2-KeyTripleDES-CBC', 'PBE-SHA1-2DES'),
- '1.2.840.113549.1.12.1.5': ('pbeWithSHA1And128BitRC2-CBC', 'PBE-SHA1-RC2-128'),
- '1.2.840.113549.1.12.1.6': ('pbeWithSHA1And40BitRC2-CBC', 'PBE-SHA1-RC2-40'),
- '1.2.840.113549.1.12.10': ('pkcs12-Version1', ),
- '1.2.840.113549.1.12.10.1': ('pkcs12-BagIds', ),
- '1.2.840.113549.1.12.10.1.1': ('keyBag', ),
- '1.2.840.113549.1.12.10.1.2': ('pkcs8ShroudedKeyBag', ),
- '1.2.840.113549.1.12.10.1.3': ('certBag', ),
- '1.2.840.113549.1.12.10.1.4': ('crlBag', ),
- '1.2.840.113549.1.12.10.1.5': ('secretBag', ),
- '1.2.840.113549.1.12.10.1.6': ('safeContentsBag', ),
- '1.2.840.113549.2.2': ('md2', 'MD2'),
- '1.2.840.113549.2.4': ('md4', 'MD4'),
- '1.2.840.113549.2.5': ('md5', 'MD5'),
- '1.2.840.113549.2.6': ('hmacWithMD5', ),
- '1.2.840.113549.2.7': ('hmacWithSHA1', ),
- '1.2.840.113549.2.8': ('hmacWithSHA224', ),
- '1.2.840.113549.2.9': ('hmacWithSHA256', ),
- '1.2.840.113549.2.10': ('hmacWithSHA384', ),
- '1.2.840.113549.2.11': ('hmacWithSHA512', ),
- '1.2.840.113549.2.12': ('hmacWithSHA512-224', ),
- '1.2.840.113549.2.13': ('hmacWithSHA512-256', ),
- '1.2.840.113549.3.2': ('rc2-cbc', 'RC2-CBC'),
- '1.2.840.113549.3.4': ('rc4', 'RC4'),
- '1.2.840.113549.3.7': ('des-ede3-cbc', 'DES-EDE3-CBC'),
- '1.2.840.113549.3.8': ('rc5-cbc', 'RC5-CBC'),
- '1.2.840.113549.3.10': ('des-cdmf', 'DES-CDMF'),
- '1.3': ('identified-organization', 'org', 'ORG'),
- '1.3.6': ('dod', 'DOD'),
- '1.3.6.1': ('iana', 'IANA', 'internet'),
- '1.3.6.1.1': ('Directory', 'directory'),
- '1.3.6.1.2': ('Management', 'mgmt'),
- '1.3.6.1.3': ('Experimental', 'experimental'),
- '1.3.6.1.4': ('Private', 'private'),
- '1.3.6.1.4.1': ('Enterprises', 'enterprises'),
- '1.3.6.1.4.1.188.7.1.1.2': ('idea-cbc', 'IDEA-CBC'),
- '1.3.6.1.4.1.311.2.1.14': ('Microsoft Extension Request', 'msExtReq'),
- '1.3.6.1.4.1.311.2.1.21': ('Microsoft Individual Code Signing', 'msCodeInd'),
- '1.3.6.1.4.1.311.2.1.22': ('Microsoft Commercial Code Signing', 'msCodeCom'),
- '1.3.6.1.4.1.311.10.3.1': ('Microsoft Trust List Signing', 'msCTLSign'),
- '1.3.6.1.4.1.311.10.3.3': ('Microsoft Server Gated Crypto', 'msSGC'),
- '1.3.6.1.4.1.311.10.3.4': ('Microsoft Encrypted File System', 'msEFS'),
- '1.3.6.1.4.1.311.17.1': ('Microsoft CSP Name', 'CSPName'),
- '1.3.6.1.4.1.311.17.2': ('Microsoft Local Key set', 'LocalKeySet'),
- '1.3.6.1.4.1.311.20.2.2': ('Microsoft Smartcardlogin', 'msSmartcardLogin'),
- '1.3.6.1.4.1.311.20.2.3': ('Microsoft Universal Principal Name', 'msUPN'),
- '1.3.6.1.4.1.311.60.2.1.1': ('jurisdictionLocalityName', 'jurisdictionL'),
- '1.3.6.1.4.1.311.60.2.1.2': ('jurisdictionStateOrProvinceName', 'jurisdictionST'),
- '1.3.6.1.4.1.311.60.2.1.3': ('jurisdictionCountryName', 'jurisdictionC'),
- '1.3.6.1.4.1.1466.344': ('dcObject', 'dcobject'),
- '1.3.6.1.4.1.1722.12.2.1.16': ('blake2b512', 'BLAKE2b512'),
- '1.3.6.1.4.1.1722.12.2.2.8': ('blake2s256', 'BLAKE2s256'),
- '1.3.6.1.4.1.3029.1.2': ('bf-cbc', 'BF-CBC'),
- '1.3.6.1.4.1.11129.2.4.2': ('CT Precertificate SCTs', 'ct_precert_scts'),
- '1.3.6.1.4.1.11129.2.4.3': ('CT Precertificate Poison', 'ct_precert_poison'),
- '1.3.6.1.4.1.11129.2.4.4': ('CT Precertificate Signer', 'ct_precert_signer'),
- '1.3.6.1.4.1.11129.2.4.5': ('CT Certificate SCTs', 'ct_cert_scts'),
- '1.3.6.1.4.1.11591.4.11': ('scrypt', 'id-scrypt'),
- '1.3.6.1.5': ('Security', 'security'),
- '1.3.6.1.5.2.3': ('id-pkinit', ),
- '1.3.6.1.5.2.3.4': ('PKINIT Client Auth', 'pkInitClientAuth'),
- '1.3.6.1.5.2.3.5': ('Signing KDC Response', 'pkInitKDC'),
- '1.3.6.1.5.5.7': ('PKIX', ),
- '1.3.6.1.5.5.7.0': ('id-pkix-mod', ),
- '1.3.6.1.5.5.7.0.1': ('id-pkix1-explicit-88', ),
- '1.3.6.1.5.5.7.0.2': ('id-pkix1-implicit-88', ),
- '1.3.6.1.5.5.7.0.3': ('id-pkix1-explicit-93', ),
- '1.3.6.1.5.5.7.0.4': ('id-pkix1-implicit-93', ),
- '1.3.6.1.5.5.7.0.5': ('id-mod-crmf', ),
- '1.3.6.1.5.5.7.0.6': ('id-mod-cmc', ),
- '1.3.6.1.5.5.7.0.7': ('id-mod-kea-profile-88', ),
- '1.3.6.1.5.5.7.0.8': ('id-mod-kea-profile-93', ),
- '1.3.6.1.5.5.7.0.9': ('id-mod-cmp', ),
- '1.3.6.1.5.5.7.0.10': ('id-mod-qualified-cert-88', ),
- '1.3.6.1.5.5.7.0.11': ('id-mod-qualified-cert-93', ),
- '1.3.6.1.5.5.7.0.12': ('id-mod-attribute-cert', ),
- '1.3.6.1.5.5.7.0.13': ('id-mod-timestamp-protocol', ),
- '1.3.6.1.5.5.7.0.14': ('id-mod-ocsp', ),
- '1.3.6.1.5.5.7.0.15': ('id-mod-dvcs', ),
- '1.3.6.1.5.5.7.0.16': ('id-mod-cmp2000', ),
- '1.3.6.1.5.5.7.1': ('id-pe', ),
- '1.3.6.1.5.5.7.1.1': ('Authority Information Access', 'authorityInfoAccess'),
- '1.3.6.1.5.5.7.1.2': ('Biometric Info', 'biometricInfo'),
- '1.3.6.1.5.5.7.1.3': ('qcStatements', ),
- '1.3.6.1.5.5.7.1.4': ('ac-auditEntity', ),
- '1.3.6.1.5.5.7.1.5': ('ac-targeting', ),
- '1.3.6.1.5.5.7.1.6': ('aaControls', ),
- '1.3.6.1.5.5.7.1.7': ('sbgp-ipAddrBlock', ),
- '1.3.6.1.5.5.7.1.8': ('sbgp-autonomousSysNum', ),
- '1.3.6.1.5.5.7.1.9': ('sbgp-routerIdentifier', ),
- '1.3.6.1.5.5.7.1.10': ('ac-proxying', ),
- '1.3.6.1.5.5.7.1.11': ('Subject Information Access', 'subjectInfoAccess'),
- '1.3.6.1.5.5.7.1.14': ('Proxy Certificate Information', 'proxyCertInfo'),
- '1.3.6.1.5.5.7.1.24': ('TLS Feature', 'tlsfeature'),
- '1.3.6.1.5.5.7.2': ('id-qt', ),
- '1.3.6.1.5.5.7.2.1': ('Policy Qualifier CPS', 'id-qt-cps'),
- '1.3.6.1.5.5.7.2.2': ('Policy Qualifier User Notice', 'id-qt-unotice'),
- '1.3.6.1.5.5.7.2.3': ('textNotice', ),
- '1.3.6.1.5.5.7.3': ('id-kp', ),
- '1.3.6.1.5.5.7.3.1': ('TLS Web Server Authentication', 'serverAuth'),
- '1.3.6.1.5.5.7.3.2': ('TLS Web Client Authentication', 'clientAuth'),
- '1.3.6.1.5.5.7.3.3': ('Code Signing', 'codeSigning'),
- '1.3.6.1.5.5.7.3.4': ('E-mail Protection', 'emailProtection'),
- '1.3.6.1.5.5.7.3.5': ('IPSec End System', 'ipsecEndSystem'),
- '1.3.6.1.5.5.7.3.6': ('IPSec Tunnel', 'ipsecTunnel'),
- '1.3.6.1.5.5.7.3.7': ('IPSec User', 'ipsecUser'),
- '1.3.6.1.5.5.7.3.8': ('Time Stamping', 'timeStamping'),
- '1.3.6.1.5.5.7.3.9': ('OCSP Signing', 'OCSPSigning'),
- '1.3.6.1.5.5.7.3.10': ('dvcs', 'DVCS'),
- '1.3.6.1.5.5.7.3.17': ('ipsec Internet Key Exchange', 'ipsecIKE'),
- '1.3.6.1.5.5.7.3.18': ('Ctrl/provision WAP Access', 'capwapAC'),
- '1.3.6.1.5.5.7.3.19': ('Ctrl/Provision WAP Termination', 'capwapWTP'),
- '1.3.6.1.5.5.7.3.21': ('SSH Client', 'secureShellClient'),
- '1.3.6.1.5.5.7.3.22': ('SSH Server', 'secureShellServer'),
- '1.3.6.1.5.5.7.3.23': ('Send Router', 'sendRouter'),
- '1.3.6.1.5.5.7.3.24': ('Send Proxied Router', 'sendProxiedRouter'),
- '1.3.6.1.5.5.7.3.25': ('Send Owner', 'sendOwner'),
- '1.3.6.1.5.5.7.3.26': ('Send Proxied Owner', 'sendProxiedOwner'),
- '1.3.6.1.5.5.7.3.27': ('CMC Certificate Authority', 'cmcCA'),
- '1.3.6.1.5.5.7.3.28': ('CMC Registration Authority', 'cmcRA'),
- '1.3.6.1.5.5.7.4': ('id-it', ),
- '1.3.6.1.5.5.7.4.1': ('id-it-caProtEncCert', ),
- '1.3.6.1.5.5.7.4.2': ('id-it-signKeyPairTypes', ),
- '1.3.6.1.5.5.7.4.3': ('id-it-encKeyPairTypes', ),
- '1.3.6.1.5.5.7.4.4': ('id-it-preferredSymmAlg', ),
- '1.3.6.1.5.5.7.4.5': ('id-it-caKeyUpdateInfo', ),
- '1.3.6.1.5.5.7.4.6': ('id-it-currentCRL', ),
- '1.3.6.1.5.5.7.4.7': ('id-it-unsupportedOIDs', ),
- '1.3.6.1.5.5.7.4.8': ('id-it-subscriptionRequest', ),
- '1.3.6.1.5.5.7.4.9': ('id-it-subscriptionResponse', ),
- '1.3.6.1.5.5.7.4.10': ('id-it-keyPairParamReq', ),
- '1.3.6.1.5.5.7.4.11': ('id-it-keyPairParamRep', ),
- '1.3.6.1.5.5.7.4.12': ('id-it-revPassphrase', ),
- '1.3.6.1.5.5.7.4.13': ('id-it-implicitConfirm', ),
- '1.3.6.1.5.5.7.4.14': ('id-it-confirmWaitTime', ),
- '1.3.6.1.5.5.7.4.15': ('id-it-origPKIMessage', ),
- '1.3.6.1.5.5.7.4.16': ('id-it-suppLangTags', ),
- '1.3.6.1.5.5.7.5': ('id-pkip', ),
- '1.3.6.1.5.5.7.5.1': ('id-regCtrl', ),
- '1.3.6.1.5.5.7.5.1.1': ('id-regCtrl-regToken', ),
- '1.3.6.1.5.5.7.5.1.2': ('id-regCtrl-authenticator', ),
- '1.3.6.1.5.5.7.5.1.3': ('id-regCtrl-pkiPublicationInfo', ),
- '1.3.6.1.5.5.7.5.1.4': ('id-regCtrl-pkiArchiveOptions', ),
- '1.3.6.1.5.5.7.5.1.5': ('id-regCtrl-oldCertID', ),
- '1.3.6.1.5.5.7.5.1.6': ('id-regCtrl-protocolEncrKey', ),
- '1.3.6.1.5.5.7.5.2': ('id-regInfo', ),
- '1.3.6.1.5.5.7.5.2.1': ('id-regInfo-utf8Pairs', ),
- '1.3.6.1.5.5.7.5.2.2': ('id-regInfo-certReq', ),
- '1.3.6.1.5.5.7.6': ('id-alg', ),
- '1.3.6.1.5.5.7.6.1': ('id-alg-des40', ),
- '1.3.6.1.5.5.7.6.2': ('id-alg-noSignature', ),
- '1.3.6.1.5.5.7.6.3': ('id-alg-dh-sig-hmac-sha1', ),
- '1.3.6.1.5.5.7.6.4': ('id-alg-dh-pop', ),
- '1.3.6.1.5.5.7.7': ('id-cmc', ),
- '1.3.6.1.5.5.7.7.1': ('id-cmc-statusInfo', ),
- '1.3.6.1.5.5.7.7.2': ('id-cmc-identification', ),
- '1.3.6.1.5.5.7.7.3': ('id-cmc-identityProof', ),
- '1.3.6.1.5.5.7.7.4': ('id-cmc-dataReturn', ),
- '1.3.6.1.5.5.7.7.5': ('id-cmc-transactionId', ),
- '1.3.6.1.5.5.7.7.6': ('id-cmc-senderNonce', ),
- '1.3.6.1.5.5.7.7.7': ('id-cmc-recipientNonce', ),
- '1.3.6.1.5.5.7.7.8': ('id-cmc-addExtensions', ),
- '1.3.6.1.5.5.7.7.9': ('id-cmc-encryptedPOP', ),
- '1.3.6.1.5.5.7.7.10': ('id-cmc-decryptedPOP', ),
- '1.3.6.1.5.5.7.7.11': ('id-cmc-lraPOPWitness', ),
- '1.3.6.1.5.5.7.7.15': ('id-cmc-getCert', ),
- '1.3.6.1.5.5.7.7.16': ('id-cmc-getCRL', ),
- '1.3.6.1.5.5.7.7.17': ('id-cmc-revokeRequest', ),
- '1.3.6.1.5.5.7.7.18': ('id-cmc-regInfo', ),
- '1.3.6.1.5.5.7.7.19': ('id-cmc-responseInfo', ),
- '1.3.6.1.5.5.7.7.21': ('id-cmc-queryPending', ),
- '1.3.6.1.5.5.7.7.22': ('id-cmc-popLinkRandom', ),
- '1.3.6.1.5.5.7.7.23': ('id-cmc-popLinkWitness', ),
- '1.3.6.1.5.5.7.7.24': ('id-cmc-confirmCertAcceptance', ),
- '1.3.6.1.5.5.7.8': ('id-on', ),
- '1.3.6.1.5.5.7.8.1': ('id-on-personalData', ),
- '1.3.6.1.5.5.7.8.3': ('Permanent Identifier', 'id-on-permanentIdentifier'),
- '1.3.6.1.5.5.7.9': ('id-pda', ),
- '1.3.6.1.5.5.7.9.1': ('id-pda-dateOfBirth', ),
- '1.3.6.1.5.5.7.9.2': ('id-pda-placeOfBirth', ),
- '1.3.6.1.5.5.7.9.3': ('id-pda-gender', ),
- '1.3.6.1.5.5.7.9.4': ('id-pda-countryOfCitizenship', ),
- '1.3.6.1.5.5.7.9.5': ('id-pda-countryOfResidence', ),
- '1.3.6.1.5.5.7.10': ('id-aca', ),
- '1.3.6.1.5.5.7.10.1': ('id-aca-authenticationInfo', ),
- '1.3.6.1.5.5.7.10.2': ('id-aca-accessIdentity', ),
- '1.3.6.1.5.5.7.10.3': ('id-aca-chargingIdentity', ),
- '1.3.6.1.5.5.7.10.4': ('id-aca-group', ),
- '1.3.6.1.5.5.7.10.5': ('id-aca-role', ),
- '1.3.6.1.5.5.7.10.6': ('id-aca-encAttrs', ),
- '1.3.6.1.5.5.7.11': ('id-qcs', ),
- '1.3.6.1.5.5.7.11.1': ('id-qcs-pkixQCSyntax-v1', ),
- '1.3.6.1.5.5.7.12': ('id-cct', ),
- '1.3.6.1.5.5.7.12.1': ('id-cct-crs', ),
- '1.3.6.1.5.5.7.12.2': ('id-cct-PKIData', ),
- '1.3.6.1.5.5.7.12.3': ('id-cct-PKIResponse', ),
- '1.3.6.1.5.5.7.21': ('id-ppl', ),
- '1.3.6.1.5.5.7.21.0': ('Any language', 'id-ppl-anyLanguage'),
- '1.3.6.1.5.5.7.21.1': ('Inherit all', 'id-ppl-inheritAll'),
- '1.3.6.1.5.5.7.21.2': ('Independent', 'id-ppl-independent'),
- '1.3.6.1.5.5.7.48': ('id-ad', ),
- '1.3.6.1.5.5.7.48.1': ('OCSP', 'OCSP', 'id-pkix-OCSP'),
- '1.3.6.1.5.5.7.48.1.1': ('Basic OCSP Response', 'basicOCSPResponse'),
- '1.3.6.1.5.5.7.48.1.2': ('OCSP Nonce', 'Nonce'),
- '1.3.6.1.5.5.7.48.1.3': ('OCSP CRL ID', 'CrlID'),
- '1.3.6.1.5.5.7.48.1.4': ('Acceptable OCSP Responses', 'acceptableResponses'),
- '1.3.6.1.5.5.7.48.1.5': ('OCSP No Check', 'noCheck'),
- '1.3.6.1.5.5.7.48.1.6': ('OCSP Archive Cutoff', 'archiveCutoff'),
- '1.3.6.1.5.5.7.48.1.7': ('OCSP Service Locator', 'serviceLocator'),
- '1.3.6.1.5.5.7.48.1.8': ('Extended OCSP Status', 'extendedStatus'),
- '1.3.6.1.5.5.7.48.1.9': ('valid', ),
- '1.3.6.1.5.5.7.48.1.10': ('path', ),
- '1.3.6.1.5.5.7.48.1.11': ('Trust Root', 'trustRoot'),
- '1.3.6.1.5.5.7.48.2': ('CA Issuers', 'caIssuers'),
- '1.3.6.1.5.5.7.48.3': ('AD Time Stamping', 'ad_timestamping'),
- '1.3.6.1.5.5.7.48.4': ('ad dvcs', 'AD_DVCS'),
- '1.3.6.1.5.5.7.48.5': ('CA Repository', 'caRepository'),
- '1.3.6.1.5.5.8.1.1': ('hmac-md5', 'HMAC-MD5'),
- '1.3.6.1.5.5.8.1.2': ('hmac-sha1', 'HMAC-SHA1'),
- '1.3.6.1.6': ('SNMPv2', 'snmpv2'),
- '1.3.6.1.7': ('Mail', ),
- '1.3.6.1.7.1': ('MIME MHS', 'mime-mhs'),
- '1.3.6.1.7.1.1': ('mime-mhs-headings', 'mime-mhs-headings'),
- '1.3.6.1.7.1.1.1': ('id-hex-partial-message', 'id-hex-partial-message'),
- '1.3.6.1.7.1.1.2': ('id-hex-multipart-message', 'id-hex-multipart-message'),
- '1.3.6.1.7.1.2': ('mime-mhs-bodies', 'mime-mhs-bodies'),
- '1.3.14.3.2': ('algorithm', 'algorithm'),
- '1.3.14.3.2.3': ('md5WithRSA', 'RSA-NP-MD5'),
- '1.3.14.3.2.6': ('des-ecb', 'DES-ECB'),
- '1.3.14.3.2.7': ('des-cbc', 'DES-CBC'),
- '1.3.14.3.2.8': ('des-ofb', 'DES-OFB'),
- '1.3.14.3.2.9': ('des-cfb', 'DES-CFB'),
- '1.3.14.3.2.11': ('rsaSignature', ),
- '1.3.14.3.2.12': ('dsaEncryption-old', 'DSA-old'),
- '1.3.14.3.2.13': ('dsaWithSHA', 'DSA-SHA'),
- '1.3.14.3.2.15': ('shaWithRSAEncryption', 'RSA-SHA'),
- '1.3.14.3.2.17': ('des-ede', 'DES-EDE'),
- '1.3.14.3.2.18': ('sha', 'SHA'),
- '1.3.14.3.2.26': ('sha1', 'SHA1'),
- '1.3.14.3.2.27': ('dsaWithSHA1-old', 'DSA-SHA1-old'),
- '1.3.14.3.2.29': ('sha1WithRSA', 'RSA-SHA1-2'),
- '1.3.36.3.2.1': ('ripemd160', 'RIPEMD160'),
- '1.3.36.3.3.1.2': ('ripemd160WithRSA', 'RSA-RIPEMD160'),
- '1.3.36.3.3.2.8.1.1.1': ('brainpoolP160r1', ),
- '1.3.36.3.3.2.8.1.1.2': ('brainpoolP160t1', ),
- '1.3.36.3.3.2.8.1.1.3': ('brainpoolP192r1', ),
- '1.3.36.3.3.2.8.1.1.4': ('brainpoolP192t1', ),
- '1.3.36.3.3.2.8.1.1.5': ('brainpoolP224r1', ),
- '1.3.36.3.3.2.8.1.1.6': ('brainpoolP224t1', ),
- '1.3.36.3.3.2.8.1.1.7': ('brainpoolP256r1', ),
- '1.3.36.3.3.2.8.1.1.8': ('brainpoolP256t1', ),
- '1.3.36.3.3.2.8.1.1.9': ('brainpoolP320r1', ),
- '1.3.36.3.3.2.8.1.1.10': ('brainpoolP320t1', ),
- '1.3.36.3.3.2.8.1.1.11': ('brainpoolP384r1', ),
- '1.3.36.3.3.2.8.1.1.12': ('brainpoolP384t1', ),
- '1.3.36.3.3.2.8.1.1.13': ('brainpoolP512r1', ),
- '1.3.36.3.3.2.8.1.1.14': ('brainpoolP512t1', ),
- '1.3.36.8.3.3': ('Professional Information or basis for Admission', 'x509ExtAdmission'),
- '1.3.101.1.4.1': ('Strong Extranet ID', 'SXNetID'),
- '1.3.101.110': ('X25519', ),
- '1.3.101.111': ('X448', ),
- '1.3.101.112': ('ED25519', ),
- '1.3.101.113': ('ED448', ),
- '1.3.111': ('ieee', ),
- '1.3.111.2.1619': ('IEEE Security in Storage Working Group', 'ieee-siswg'),
- '1.3.111.2.1619.0.1.1': ('aes-128-xts', 'AES-128-XTS'),
- '1.3.111.2.1619.0.1.2': ('aes-256-xts', 'AES-256-XTS'),
- '1.3.132': ('certicom-arc', ),
- '1.3.132.0': ('secg_ellipticCurve', ),
- '1.3.132.0.1': ('sect163k1', ),
- '1.3.132.0.2': ('sect163r1', ),
- '1.3.132.0.3': ('sect239k1', ),
- '1.3.132.0.4': ('sect113r1', ),
- '1.3.132.0.5': ('sect113r2', ),
- '1.3.132.0.6': ('secp112r1', ),
- '1.3.132.0.7': ('secp112r2', ),
- '1.3.132.0.8': ('secp160r1', ),
- '1.3.132.0.9': ('secp160k1', ),
- '1.3.132.0.10': ('secp256k1', ),
- '1.3.132.0.15': ('sect163r2', ),
- '1.3.132.0.16': ('sect283k1', ),
- '1.3.132.0.17': ('sect283r1', ),
- '1.3.132.0.22': ('sect131r1', ),
- '1.3.132.0.23': ('sect131r2', ),
- '1.3.132.0.24': ('sect193r1', ),
- '1.3.132.0.25': ('sect193r2', ),
- '1.3.132.0.26': ('sect233k1', ),
- '1.3.132.0.27': ('sect233r1', ),
- '1.3.132.0.28': ('secp128r1', ),
- '1.3.132.0.29': ('secp128r2', ),
- '1.3.132.0.30': ('secp160r2', ),
- '1.3.132.0.31': ('secp192k1', ),
- '1.3.132.0.32': ('secp224k1', ),
- '1.3.132.0.33': ('secp224r1', ),
- '1.3.132.0.34': ('secp384r1', ),
- '1.3.132.0.35': ('secp521r1', ),
- '1.3.132.0.36': ('sect409k1', ),
- '1.3.132.0.37': ('sect409r1', ),
- '1.3.132.0.38': ('sect571k1', ),
- '1.3.132.0.39': ('sect571r1', ),
- '1.3.132.1': ('secg-scheme', ),
- '1.3.132.1.11.0': ('dhSinglePass-stdDH-sha224kdf-scheme', ),
- '1.3.132.1.11.1': ('dhSinglePass-stdDH-sha256kdf-scheme', ),
- '1.3.132.1.11.2': ('dhSinglePass-stdDH-sha384kdf-scheme', ),
- '1.3.132.1.11.3': ('dhSinglePass-stdDH-sha512kdf-scheme', ),
- '1.3.132.1.14.0': ('dhSinglePass-cofactorDH-sha224kdf-scheme', ),
- '1.3.132.1.14.1': ('dhSinglePass-cofactorDH-sha256kdf-scheme', ),
- '1.3.132.1.14.2': ('dhSinglePass-cofactorDH-sha384kdf-scheme', ),
- '1.3.132.1.14.3': ('dhSinglePass-cofactorDH-sha512kdf-scheme', ),
- '1.3.133.16.840.63.0': ('x9-63-scheme', ),
- '1.3.133.16.840.63.0.2': ('dhSinglePass-stdDH-sha1kdf-scheme', ),
- '1.3.133.16.840.63.0.3': ('dhSinglePass-cofactorDH-sha1kdf-scheme', ),
- '2': ('joint-iso-itu-t', 'JOINT-ISO-ITU-T', 'joint-iso-ccitt'),
- '2.5': ('directory services (X.500)', 'X500'),
- '2.5.1.5': ('Selected Attribute Types', 'selected-attribute-types'),
- '2.5.1.5.55': ('clearance', ),
- '2.5.4': ('X509', ),
- '2.5.4.3': ('commonName', 'CN'),
- '2.5.4.4': ('surname', 'SN'),
- '2.5.4.5': ('serialNumber', ),
- '2.5.4.6': ('countryName', 'C'),
- '2.5.4.7': ('localityName', 'L'),
- '2.5.4.8': ('stateOrProvinceName', 'ST'),
- '2.5.4.9': ('streetAddress', 'street'),
- '2.5.4.10': ('organizationName', 'O'),
- '2.5.4.11': ('organizationalUnitName', 'OU'),
- '2.5.4.12': ('title', 'title'),
- '2.5.4.13': ('description', ),
- '2.5.4.14': ('searchGuide', ),
- '2.5.4.15': ('businessCategory', ),
- '2.5.4.16': ('postalAddress', ),
- '2.5.4.17': ('postalCode', ),
- '2.5.4.18': ('postOfficeBox', ),
- '2.5.4.19': ('physicalDeliveryOfficeName', ),
- '2.5.4.20': ('telephoneNumber', ),
- '2.5.4.21': ('telexNumber', ),
- '2.5.4.22': ('teletexTerminalIdentifier', ),
- '2.5.4.23': ('facsimileTelephoneNumber', ),
- '2.5.4.24': ('x121Address', ),
- '2.5.4.25': ('internationaliSDNNumber', ),
- '2.5.4.26': ('registeredAddress', ),
- '2.5.4.27': ('destinationIndicator', ),
- '2.5.4.28': ('preferredDeliveryMethod', ),
- '2.5.4.29': ('presentationAddress', ),
- '2.5.4.30': ('supportedApplicationContext', ),
- '2.5.4.31': ('member', ),
- '2.5.4.32': ('owner', ),
- '2.5.4.33': ('roleOccupant', ),
- '2.5.4.34': ('seeAlso', ),
- '2.5.4.35': ('userPassword', ),
- '2.5.4.36': ('userCertificate', ),
- '2.5.4.37': ('cACertificate', ),
- '2.5.4.38': ('authorityRevocationList', ),
- '2.5.4.39': ('certificateRevocationList', ),
- '2.5.4.40': ('crossCertificatePair', ),
- '2.5.4.41': ('name', 'name'),
- '2.5.4.42': ('givenName', 'GN'),
- '2.5.4.43': ('initials', 'initials'),
- '2.5.4.44': ('generationQualifier', ),
- '2.5.4.45': ('x500UniqueIdentifier', ),
- '2.5.4.46': ('dnQualifier', 'dnQualifier'),
- '2.5.4.47': ('enhancedSearchGuide', ),
- '2.5.4.48': ('protocolInformation', ),
- '2.5.4.49': ('distinguishedName', ),
- '2.5.4.50': ('uniqueMember', ),
- '2.5.4.51': ('houseIdentifier', ),
- '2.5.4.52': ('supportedAlgorithms', ),
- '2.5.4.53': ('deltaRevocationList', ),
- '2.5.4.54': ('dmdName', ),
- '2.5.4.65': ('pseudonym', ),
- '2.5.4.72': ('role', 'role'),
- '2.5.4.97': ('organizationIdentifier', ),
- '2.5.4.98': ('countryCode3c', 'c3'),
- '2.5.4.99': ('countryCode3n', 'n3'),
- '2.5.4.100': ('dnsName', ),
- '2.5.8': ('directory services - algorithms', 'X500algorithms'),
- '2.5.8.1.1': ('rsa', 'RSA'),
- '2.5.8.3.100': ('mdc2WithRSA', 'RSA-MDC2'),
- '2.5.8.3.101': ('mdc2', 'MDC2'),
- '2.5.29': ('id-ce', ),
- '2.5.29.9': ('X509v3 Subject Directory Attributes', 'subjectDirectoryAttributes'),
- '2.5.29.14': ('X509v3 Subject Key Identifier', 'subjectKeyIdentifier'),
- '2.5.29.15': ('X509v3 Key Usage', 'keyUsage'),
- '2.5.29.16': ('X509v3 Private Key Usage Period', 'privateKeyUsagePeriod'),
- '2.5.29.17': ('X509v3 Subject Alternative Name', 'subjectAltName'),
- '2.5.29.18': ('X509v3 Issuer Alternative Name', 'issuerAltName'),
- '2.5.29.19': ('X509v3 Basic Constraints', 'basicConstraints'),
- '2.5.29.20': ('X509v3 CRL Number', 'crlNumber'),
- '2.5.29.21': ('X509v3 CRL Reason Code', 'CRLReason'),
- '2.5.29.23': ('Hold Instruction Code', 'holdInstructionCode'),
- '2.5.29.24': ('Invalidity Date', 'invalidityDate'),
- '2.5.29.27': ('X509v3 Delta CRL Indicator', 'deltaCRL'),
- '2.5.29.28': ('X509v3 Issuing Distribution Point', 'issuingDistributionPoint'),
- '2.5.29.29': ('X509v3 Certificate Issuer', 'certificateIssuer'),
- '2.5.29.30': ('X509v3 Name Constraints', 'nameConstraints'),
- '2.5.29.31': ('X509v3 CRL Distribution Points', 'crlDistributionPoints'),
- '2.5.29.32': ('X509v3 Certificate Policies', 'certificatePolicies'),
- '2.5.29.32.0': ('X509v3 Any Policy', 'anyPolicy'),
- '2.5.29.33': ('X509v3 Policy Mappings', 'policyMappings'),
- '2.5.29.35': ('X509v3 Authority Key Identifier', 'authorityKeyIdentifier'),
- '2.5.29.36': ('X509v3 Policy Constraints', 'policyConstraints'),
- '2.5.29.37': ('X509v3 Extended Key Usage', 'extendedKeyUsage'),
- '2.5.29.37.0': ('Any Extended Key Usage', 'anyExtendedKeyUsage'),
- '2.5.29.46': ('X509v3 Freshest CRL', 'freshestCRL'),
- '2.5.29.54': ('X509v3 Inhibit Any Policy', 'inhibitAnyPolicy'),
- '2.5.29.55': ('X509v3 AC Targeting', 'targetInformation'),
- '2.5.29.56': ('X509v3 No Revocation Available', 'noRevAvail'),
- '2.16.840.1.101.3': ('csor', ),
- '2.16.840.1.101.3.4': ('nistAlgorithms', ),
- '2.16.840.1.101.3.4.1': ('aes', ),
- '2.16.840.1.101.3.4.1.1': ('aes-128-ecb', 'AES-128-ECB'),
- '2.16.840.1.101.3.4.1.2': ('aes-128-cbc', 'AES-128-CBC'),
- '2.16.840.1.101.3.4.1.3': ('aes-128-ofb', 'AES-128-OFB'),
- '2.16.840.1.101.3.4.1.4': ('aes-128-cfb', 'AES-128-CFB'),
- '2.16.840.1.101.3.4.1.5': ('id-aes128-wrap', ),
- '2.16.840.1.101.3.4.1.6': ('aes-128-gcm', 'id-aes128-GCM'),
- '2.16.840.1.101.3.4.1.7': ('aes-128-ccm', 'id-aes128-CCM'),
- '2.16.840.1.101.3.4.1.8': ('id-aes128-wrap-pad', ),
- '2.16.840.1.101.3.4.1.21': ('aes-192-ecb', 'AES-192-ECB'),
- '2.16.840.1.101.3.4.1.22': ('aes-192-cbc', 'AES-192-CBC'),
- '2.16.840.1.101.3.4.1.23': ('aes-192-ofb', 'AES-192-OFB'),
- '2.16.840.1.101.3.4.1.24': ('aes-192-cfb', 'AES-192-CFB'),
- '2.16.840.1.101.3.4.1.25': ('id-aes192-wrap', ),
- '2.16.840.1.101.3.4.1.26': ('aes-192-gcm', 'id-aes192-GCM'),
- '2.16.840.1.101.3.4.1.27': ('aes-192-ccm', 'id-aes192-CCM'),
- '2.16.840.1.101.3.4.1.28': ('id-aes192-wrap-pad', ),
- '2.16.840.1.101.3.4.1.41': ('aes-256-ecb', 'AES-256-ECB'),
- '2.16.840.1.101.3.4.1.42': ('aes-256-cbc', 'AES-256-CBC'),
- '2.16.840.1.101.3.4.1.43': ('aes-256-ofb', 'AES-256-OFB'),
- '2.16.840.1.101.3.4.1.44': ('aes-256-cfb', 'AES-256-CFB'),
- '2.16.840.1.101.3.4.1.45': ('id-aes256-wrap', ),
- '2.16.840.1.101.3.4.1.46': ('aes-256-gcm', 'id-aes256-GCM'),
- '2.16.840.1.101.3.4.1.47': ('aes-256-ccm', 'id-aes256-CCM'),
- '2.16.840.1.101.3.4.1.48': ('id-aes256-wrap-pad', ),
- '2.16.840.1.101.3.4.2': ('nist_hashalgs', ),
- '2.16.840.1.101.3.4.2.1': ('sha256', 'SHA256'),
- '2.16.840.1.101.3.4.2.2': ('sha384', 'SHA384'),
- '2.16.840.1.101.3.4.2.3': ('sha512', 'SHA512'),
- '2.16.840.1.101.3.4.2.4': ('sha224', 'SHA224'),
- '2.16.840.1.101.3.4.2.5': ('sha512-224', 'SHA512-224'),
- '2.16.840.1.101.3.4.2.6': ('sha512-256', 'SHA512-256'),
- '2.16.840.1.101.3.4.2.7': ('sha3-224', 'SHA3-224'),
- '2.16.840.1.101.3.4.2.8': ('sha3-256', 'SHA3-256'),
- '2.16.840.1.101.3.4.2.9': ('sha3-384', 'SHA3-384'),
- '2.16.840.1.101.3.4.2.10': ('sha3-512', 'SHA3-512'),
- '2.16.840.1.101.3.4.2.11': ('shake128', 'SHAKE128'),
- '2.16.840.1.101.3.4.2.12': ('shake256', 'SHAKE256'),
- '2.16.840.1.101.3.4.2.13': ('hmac-sha3-224', 'id-hmacWithSHA3-224'),
- '2.16.840.1.101.3.4.2.14': ('hmac-sha3-256', 'id-hmacWithSHA3-256'),
- '2.16.840.1.101.3.4.2.15': ('hmac-sha3-384', 'id-hmacWithSHA3-384'),
- '2.16.840.1.101.3.4.2.16': ('hmac-sha3-512', 'id-hmacWithSHA3-512'),
- '2.16.840.1.101.3.4.3': ('dsa_with_sha2', 'sigAlgs'),
- '2.16.840.1.101.3.4.3.1': ('dsa_with_SHA224', ),
- '2.16.840.1.101.3.4.3.2': ('dsa_with_SHA256', ),
- '2.16.840.1.101.3.4.3.3': ('dsa_with_SHA384', 'id-dsa-with-sha384'),
- '2.16.840.1.101.3.4.3.4': ('dsa_with_SHA512', 'id-dsa-with-sha512'),
- '2.16.840.1.101.3.4.3.5': ('dsa_with_SHA3-224', 'id-dsa-with-sha3-224'),
- '2.16.840.1.101.3.4.3.6': ('dsa_with_SHA3-256', 'id-dsa-with-sha3-256'),
- '2.16.840.1.101.3.4.3.7': ('dsa_with_SHA3-384', 'id-dsa-with-sha3-384'),
- '2.16.840.1.101.3.4.3.8': ('dsa_with_SHA3-512', 'id-dsa-with-sha3-512'),
- '2.16.840.1.101.3.4.3.9': ('ecdsa_with_SHA3-224', 'id-ecdsa-with-sha3-224'),
- '2.16.840.1.101.3.4.3.10': ('ecdsa_with_SHA3-256', 'id-ecdsa-with-sha3-256'),
- '2.16.840.1.101.3.4.3.11': ('ecdsa_with_SHA3-384', 'id-ecdsa-with-sha3-384'),
- '2.16.840.1.101.3.4.3.12': ('ecdsa_with_SHA3-512', 'id-ecdsa-with-sha3-512'),
- '2.16.840.1.101.3.4.3.13': ('RSA-SHA3-224', 'id-rsassa-pkcs1-v1_5-with-sha3-224'),
- '2.16.840.1.101.3.4.3.14': ('RSA-SHA3-256', 'id-rsassa-pkcs1-v1_5-with-sha3-256'),
- '2.16.840.1.101.3.4.3.15': ('RSA-SHA3-384', 'id-rsassa-pkcs1-v1_5-with-sha3-384'),
- '2.16.840.1.101.3.4.3.16': ('RSA-SHA3-512', 'id-rsassa-pkcs1-v1_5-with-sha3-512'),
- '2.16.840.1.113730': ('Netscape Communications Corp.', 'Netscape'),
- '2.16.840.1.113730.1': ('Netscape Certificate Extension', 'nsCertExt'),
- '2.16.840.1.113730.1.1': ('Netscape Cert Type', 'nsCertType'),
- '2.16.840.1.113730.1.2': ('Netscape Base Url', 'nsBaseUrl'),
- '2.16.840.1.113730.1.3': ('Netscape Revocation Url', 'nsRevocationUrl'),
- '2.16.840.1.113730.1.4': ('Netscape CA Revocation Url', 'nsCaRevocationUrl'),
- '2.16.840.1.113730.1.7': ('Netscape Renewal Url', 'nsRenewalUrl'),
- '2.16.840.1.113730.1.8': ('Netscape CA Policy Url', 'nsCaPolicyUrl'),
- '2.16.840.1.113730.1.12': ('Netscape SSL Server Name', 'nsSslServerName'),
- '2.16.840.1.113730.1.13': ('Netscape Comment', 'nsComment'),
- '2.16.840.1.113730.2': ('Netscape Data Type', 'nsDataType'),
- '2.16.840.1.113730.2.5': ('Netscape Certificate Sequence', 'nsCertSequence'),
- '2.16.840.1.113730.4.1': ('Netscape Server Gated Crypto', 'nsSGC'),
- '2.23': ('International Organizations', 'international-organizations'),
- '2.23.42': ('Secure Electronic Transactions', 'id-set'),
- '2.23.42.0': ('content types', 'set-ctype'),
- '2.23.42.0.0': ('setct-PANData', ),
- '2.23.42.0.1': ('setct-PANToken', ),
- '2.23.42.0.2': ('setct-PANOnly', ),
- '2.23.42.0.3': ('setct-OIData', ),
- '2.23.42.0.4': ('setct-PI', ),
- '2.23.42.0.5': ('setct-PIData', ),
- '2.23.42.0.6': ('setct-PIDataUnsigned', ),
- '2.23.42.0.7': ('setct-HODInput', ),
- '2.23.42.0.8': ('setct-AuthResBaggage', ),
- '2.23.42.0.9': ('setct-AuthRevReqBaggage', ),
- '2.23.42.0.10': ('setct-AuthRevResBaggage', ),
- '2.23.42.0.11': ('setct-CapTokenSeq', ),
- '2.23.42.0.12': ('setct-PInitResData', ),
- '2.23.42.0.13': ('setct-PI-TBS', ),
- '2.23.42.0.14': ('setct-PResData', ),
- '2.23.42.0.16': ('setct-AuthReqTBS', ),
- '2.23.42.0.17': ('setct-AuthResTBS', ),
- '2.23.42.0.18': ('setct-AuthResTBSX', ),
- '2.23.42.0.19': ('setct-AuthTokenTBS', ),
- '2.23.42.0.20': ('setct-CapTokenData', ),
- '2.23.42.0.21': ('setct-CapTokenTBS', ),
- '2.23.42.0.22': ('setct-AcqCardCodeMsg', ),
- '2.23.42.0.23': ('setct-AuthRevReqTBS', ),
- '2.23.42.0.24': ('setct-AuthRevResData', ),
- '2.23.42.0.25': ('setct-AuthRevResTBS', ),
- '2.23.42.0.26': ('setct-CapReqTBS', ),
- '2.23.42.0.27': ('setct-CapReqTBSX', ),
- '2.23.42.0.28': ('setct-CapResData', ),
- '2.23.42.0.29': ('setct-CapRevReqTBS', ),
- '2.23.42.0.30': ('setct-CapRevReqTBSX', ),
- '2.23.42.0.31': ('setct-CapRevResData', ),
- '2.23.42.0.32': ('setct-CredReqTBS', ),
- '2.23.42.0.33': ('setct-CredReqTBSX', ),
- '2.23.42.0.34': ('setct-CredResData', ),
- '2.23.42.0.35': ('setct-CredRevReqTBS', ),
- '2.23.42.0.36': ('setct-CredRevReqTBSX', ),
- '2.23.42.0.37': ('setct-CredRevResData', ),
- '2.23.42.0.38': ('setct-PCertReqData', ),
- '2.23.42.0.39': ('setct-PCertResTBS', ),
- '2.23.42.0.40': ('setct-BatchAdminReqData', ),
- '2.23.42.0.41': ('setct-BatchAdminResData', ),
- '2.23.42.0.42': ('setct-CardCInitResTBS', ),
- '2.23.42.0.43': ('setct-MeAqCInitResTBS', ),
- '2.23.42.0.44': ('setct-RegFormResTBS', ),
- '2.23.42.0.45': ('setct-CertReqData', ),
- '2.23.42.0.46': ('setct-CertReqTBS', ),
- '2.23.42.0.47': ('setct-CertResData', ),
- '2.23.42.0.48': ('setct-CertInqReqTBS', ),
- '2.23.42.0.49': ('setct-ErrorTBS', ),
- '2.23.42.0.50': ('setct-PIDualSignedTBE', ),
- '2.23.42.0.51': ('setct-PIUnsignedTBE', ),
- '2.23.42.0.52': ('setct-AuthReqTBE', ),
- '2.23.42.0.53': ('setct-AuthResTBE', ),
- '2.23.42.0.54': ('setct-AuthResTBEX', ),
- '2.23.42.0.55': ('setct-AuthTokenTBE', ),
- '2.23.42.0.56': ('setct-CapTokenTBE', ),
- '2.23.42.0.57': ('setct-CapTokenTBEX', ),
- '2.23.42.0.58': ('setct-AcqCardCodeMsgTBE', ),
- '2.23.42.0.59': ('setct-AuthRevReqTBE', ),
- '2.23.42.0.60': ('setct-AuthRevResTBE', ),
- '2.23.42.0.61': ('setct-AuthRevResTBEB', ),
- '2.23.42.0.62': ('setct-CapReqTBE', ),
- '2.23.42.0.63': ('setct-CapReqTBEX', ),
- '2.23.42.0.64': ('setct-CapResTBE', ),
- '2.23.42.0.65': ('setct-CapRevReqTBE', ),
- '2.23.42.0.66': ('setct-CapRevReqTBEX', ),
- '2.23.42.0.67': ('setct-CapRevResTBE', ),
- '2.23.42.0.68': ('setct-CredReqTBE', ),
- '2.23.42.0.69': ('setct-CredReqTBEX', ),
- '2.23.42.0.70': ('setct-CredResTBE', ),
- '2.23.42.0.71': ('setct-CredRevReqTBE', ),
- '2.23.42.0.72': ('setct-CredRevReqTBEX', ),
- '2.23.42.0.73': ('setct-CredRevResTBE', ),
- '2.23.42.0.74': ('setct-BatchAdminReqTBE', ),
- '2.23.42.0.75': ('setct-BatchAdminResTBE', ),
- '2.23.42.0.76': ('setct-RegFormReqTBE', ),
- '2.23.42.0.77': ('setct-CertReqTBE', ),
- '2.23.42.0.78': ('setct-CertReqTBEX', ),
- '2.23.42.0.79': ('setct-CertResTBE', ),
- '2.23.42.0.80': ('setct-CRLNotificationTBS', ),
- '2.23.42.0.81': ('setct-CRLNotificationResTBS', ),
- '2.23.42.0.82': ('setct-BCIDistributionTBS', ),
- '2.23.42.1': ('message extensions', 'set-msgExt'),
- '2.23.42.1.1': ('generic cryptogram', 'setext-genCrypt'),
- '2.23.42.1.3': ('merchant initiated auth', 'setext-miAuth'),
- '2.23.42.1.4': ('setext-pinSecure', ),
- '2.23.42.1.5': ('setext-pinAny', ),
- '2.23.42.1.7': ('setext-track2', ),
- '2.23.42.1.8': ('additional verification', 'setext-cv'),
- '2.23.42.3': ('set-attr', ),
- '2.23.42.3.0': ('setAttr-Cert', ),
- '2.23.42.3.0.0': ('set-rootKeyThumb', ),
- '2.23.42.3.0.1': ('set-addPolicy', ),
- '2.23.42.3.1': ('payment gateway capabilities', 'setAttr-PGWYcap'),
- '2.23.42.3.2': ('setAttr-TokenType', ),
- '2.23.42.3.2.1': ('setAttr-Token-EMV', ),
- '2.23.42.3.2.2': ('setAttr-Token-B0Prime', ),
- '2.23.42.3.3': ('issuer capabilities', 'setAttr-IssCap'),
- '2.23.42.3.3.3': ('setAttr-IssCap-CVM', ),
- '2.23.42.3.3.3.1': ('generate cryptogram', 'setAttr-GenCryptgrm'),
- '2.23.42.3.3.4': ('setAttr-IssCap-T2', ),
- '2.23.42.3.3.4.1': ('encrypted track 2', 'setAttr-T2Enc'),
- '2.23.42.3.3.4.2': ('cleartext track 2', 'setAttr-T2cleartxt'),
- '2.23.42.3.3.5': ('setAttr-IssCap-Sig', ),
- '2.23.42.3.3.5.1': ('ICC or token signature', 'setAttr-TokICCsig'),
- '2.23.42.3.3.5.2': ('secure device signature', 'setAttr-SecDevSig'),
- '2.23.42.5': ('set-policy', ),
- '2.23.42.5.0': ('set-policy-root', ),
- '2.23.42.7': ('certificate extensions', 'set-certExt'),
- '2.23.42.7.0': ('setCext-hashedRoot', ),
- '2.23.42.7.1': ('setCext-certType', ),
- '2.23.42.7.2': ('setCext-merchData', ),
- '2.23.42.7.3': ('setCext-cCertRequired', ),
- '2.23.42.7.4': ('setCext-tunneling', ),
- '2.23.42.7.5': ('setCext-setExt', ),
- '2.23.42.7.6': ('setCext-setQualf', ),
- '2.23.42.7.7': ('setCext-PGWYcapabilities', ),
- '2.23.42.7.8': ('setCext-TokenIdentifier', ),
- '2.23.42.7.9': ('setCext-Track2Data', ),
- '2.23.42.7.10': ('setCext-TokenType', ),
- '2.23.42.7.11': ('setCext-IssuerCapabilities', ),
- '2.23.42.8': ('set-brand', ),
- '2.23.42.8.1': ('set-brand-IATA-ATA', ),
- '2.23.42.8.4': ('set-brand-Visa', ),
- '2.23.42.8.5': ('set-brand-MasterCard', ),
- '2.23.42.8.30': ('set-brand-Diners', ),
- '2.23.42.8.34': ('set-brand-AmericanExpress', ),
- '2.23.42.8.35': ('set-brand-JCB', ),
- '2.23.42.8.6011': ('set-brand-Novus', ),
- '2.23.43': ('wap', ),
- '2.23.43.1': ('wap-wsg', ),
- '2.23.43.1.4': ('wap-wsg-idm-ecid', ),
- '2.23.43.1.4.1': ('wap-wsg-idm-ecid-wtls1', ),
- '2.23.43.1.4.3': ('wap-wsg-idm-ecid-wtls3', ),
- '2.23.43.1.4.4': ('wap-wsg-idm-ecid-wtls4', ),
- '2.23.43.1.4.5': ('wap-wsg-idm-ecid-wtls5', ),
- '2.23.43.1.4.6': ('wap-wsg-idm-ecid-wtls6', ),
- '2.23.43.1.4.7': ('wap-wsg-idm-ecid-wtls7', ),
- '2.23.43.1.4.8': ('wap-wsg-idm-ecid-wtls8', ),
- '2.23.43.1.4.9': ('wap-wsg-idm-ecid-wtls9', ),
- '2.23.43.1.4.10': ('wap-wsg-idm-ecid-wtls10', ),
- '2.23.43.1.4.11': ('wap-wsg-idm-ecid-wtls11', ),
- '2.23.43.1.4.12': ('wap-wsg-idm-ecid-wtls12', ),
-}
-# #####################################################################################
-# #####################################################################################
-
-_OID_LOOKUP = dict()
-_NORMALIZE_NAMES = dict()
-_NORMALIZE_NAMES_SHORT = dict()
-
-for dotted, names in _OID_MAP.items():
- for name in names:
- if name in _NORMALIZE_NAMES and _OID_LOOKUP[name] != dotted:
- raise AssertionError(
- 'Name collision during setup: "{0}" for OIDs {1} and {2}'
- .format(name, dotted, _OID_LOOKUP[name])
- )
- _NORMALIZE_NAMES[name] = names[0]
- _NORMALIZE_NAMES_SHORT[name] = names[-1]
- _OID_LOOKUP[name] = dotted
-for alias, original in [('userID', 'userId')]:
- if alias in _NORMALIZE_NAMES:
- raise AssertionError(
- 'Name collision during adding aliases: "{0}" (alias for "{1}") is already mapped to OID {2}'
- .format(alias, original, _OID_LOOKUP[alias])
- )
- _NORMALIZE_NAMES[alias] = original
- _NORMALIZE_NAMES_SHORT[alias] = _NORMALIZE_NAMES_SHORT[original]
- _OID_LOOKUP[alias] = _OID_LOOKUP[original]
-
-
-def pyopenssl_normalize_name(name, short=False):
- nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(name))
- if nid != 0:
- b_name = OpenSSL._util.lib.OBJ_nid2ln(nid)
- name = to_text(OpenSSL._util.ffi.string(b_name))
- if short:
- return _NORMALIZE_NAMES_SHORT.get(name, name)
- else:
- return _NORMALIZE_NAMES.get(name, name)
-
-
-# #####################################################################################
-# #####################################################################################
-# # This excerpt is dual licensed under the terms of the Apache License, Version
-# # 2.0, and the BSD License. See the LICENSE file at
-# # https://github.com/pyca/cryptography/blob/master/LICENSE for complete details.
-# #
-# # Adapted from cryptography's hazmat/backends/openssl/decode_asn1.py
-# #
-# # Copyright (c) 2015, 2016 Paul Kehrer (@reaperhulk)
-# # Copyright (c) 2017 Fraser Tweedale (@frasertweedale)
-# #
-# # Relevant commits from cryptography project (https://github.com/pyca/cryptography):
-# # pyca/cryptography@719d536dd691e84e208534798f2eb4f82aaa2e07
-# # pyca/cryptography@5ab6d6a5c05572bd1c75f05baf264a2d0001894a
-# # pyca/cryptography@2e776e20eb60378e0af9b7439000d0e80da7c7e3
-# # pyca/cryptography@fb309ed24647d1be9e319b61b1f2aa8ebb87b90b
-# # pyca/cryptography@2917e460993c475c72d7146c50dc3bbc2414280d
-# # pyca/cryptography@3057f91ea9a05fb593825006d87a391286a4d828
-# # pyca/cryptography@d607dd7e5bc5c08854ec0c9baff70ba4a35be36f
-def _obj2txt(openssl_lib, openssl_ffi, obj):
- # Set to 80 on the recommendation of
- # https://www.openssl.org/docs/crypto/OBJ_nid2ln.html#return_values
- #
- # But OIDs longer than this occur in real life (e.g. Active
- # Directory makes some very long OIDs). So we need to detect
- # and properly handle the case where the default buffer is not
- # big enough.
- #
- buf_len = 80
- buf = openssl_ffi.new("char[]", buf_len)
-
- # 'res' is the number of bytes that *would* be written if the
- # buffer is large enough. If 'res' > buf_len - 1, we need to
- # alloc a big-enough buffer and go again.
- res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
- if res > buf_len - 1: # account for terminating null byte
- buf_len = res + 1
- buf = openssl_ffi.new("char[]", buf_len)
- res = openssl_lib.OBJ_obj2txt(buf, buf_len, obj, 1)
- return openssl_ffi.buffer(buf, res)[:].decode()
-# #####################################################################################
-# #####################################################################################
-
-
-def cryptography_get_extensions_from_cert(cert):
- # Since cryptography won't give us the DER value for an extension
- # (that is only stored for unrecognized extensions), we have to re-do
- # the extension parsing outselves.
- result = dict()
- backend = cert._backend
- x509_obj = cert._x509
-
- for i in range(backend._lib.X509_get_ext_count(x509_obj)):
- ext = backend._lib.X509_get_ext(x509_obj, i)
- if ext == backend._ffi.NULL:
- continue
- crit = backend._lib.X509_EXTENSION_get_critical(ext)
- data = backend._lib.X509_EXTENSION_get_data(ext)
- backend.openssl_assert(data != backend._ffi.NULL)
- der = backend._ffi.buffer(data.data, data.length)[:]
- entry = dict(
- critical=(crit == 1),
- value=base64.b64encode(der),
- )
- oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
- result[oid] = entry
- return result
-
-
-def cryptography_get_extensions_from_csr(csr):
- # Since cryptography won't give us the DER value for an extension
- # (that is only stored for unrecognized extensions), we have to re-do
- # the extension parsing outselves.
- result = dict()
- backend = csr._backend
-
- extensions = backend._lib.X509_REQ_get_extensions(csr._x509_req)
- extensions = backend._ffi.gc(
- extensions,
- lambda ext: backend._lib.sk_X509_EXTENSION_pop_free(
- ext,
- backend._ffi.addressof(backend._lib._original_lib, "X509_EXTENSION_free")
- )
- )
-
- for i in range(backend._lib.sk_X509_EXTENSION_num(extensions)):
- ext = backend._lib.sk_X509_EXTENSION_value(extensions, i)
- if ext == backend._ffi.NULL:
- continue
- crit = backend._lib.X509_EXTENSION_get_critical(ext)
- data = backend._lib.X509_EXTENSION_get_data(ext)
- backend.openssl_assert(data != backend._ffi.NULL)
- der = backend._ffi.buffer(data.data, data.length)[:]
- entry = dict(
- critical=(crit == 1),
- value=base64.b64encode(der),
- )
- oid = _obj2txt(backend._lib, backend._ffi, backend._lib.X509_EXTENSION_get_object(ext))
- result[oid] = entry
- return result
-
-
-def pyopenssl_get_extensions_from_cert(cert):
- # While pyOpenSSL allows us to get an extension's DER value, it won't
- # give us the dotted string for an OID. So we have to do some magic to
- # get hold of it.
- result = dict()
- ext_count = cert.get_extension_count()
- for i in range(0, ext_count):
- ext = cert.get_extension(i)
- entry = dict(
- critical=bool(ext.get_critical()),
- value=base64.b64encode(ext.get_data()),
- )
- oid = _obj2txt(
- OpenSSL._util.lib,
- OpenSSL._util.ffi,
- OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
- )
- # This could also be done a bit simpler:
- #
- # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
- #
- # Unfortunately this gives the wrong result in case the linked OpenSSL
- # doesn't know the OID. That's why we have to get the OID dotted string
- # similarly to how cryptography does it.
- result[oid] = entry
- return result
-
-
-def pyopenssl_get_extensions_from_csr(csr):
- # While pyOpenSSL allows us to get an extension's DER value, it won't
- # give us the dotted string for an OID. So we have to do some magic to
- # get hold of it.
- result = dict()
- for ext in csr.get_extensions():
- entry = dict(
- critical=bool(ext.get_critical()),
- value=base64.b64encode(ext.get_data()),
- )
- oid = _obj2txt(
- OpenSSL._util.lib,
- OpenSSL._util.ffi,
- OpenSSL._util.lib.X509_EXTENSION_get_object(ext._extension)
- )
- # This could also be done a bit simpler:
- #
- # oid = _obj2txt(OpenSSL._util.lib, OpenSSL._util.ffi, OpenSSL._util.lib.OBJ_nid2obj(ext._nid))
- #
- # Unfortunately this gives the wrong result in case the linked OpenSSL
- # doesn't know the OID. That's why we have to get the OID dotted string
- # similarly to how cryptography does it.
- result[oid] = entry
- return result
-
-
-def cryptography_name_to_oid(name):
- dotted = _OID_LOOKUP.get(name)
- if dotted is None:
- raise OpenSSLObjectError('Cannot find OID for "{0}"'.format(name))
- return x509.oid.ObjectIdentifier(dotted)
-
-
-def cryptography_oid_to_name(oid, short=False):
- dotted_string = oid.dotted_string
- names = _OID_MAP.get(dotted_string)
- name = names[0] if names else oid._name
- if short:
- return _NORMALIZE_NAMES_SHORT.get(name, name)
- else:
- return _NORMALIZE_NAMES.get(name, name)
-
-
-def cryptography_get_name(name):
- '''
- Given a name string, returns a cryptography x509.Name object.
- Raises an OpenSSLObjectError if the name is unknown or cannot be parsed.
- '''
- try:
- if name.startswith('DNS:'):
- return x509.DNSName(to_text(name[4:]))
- if name.startswith('IP:'):
- return x509.IPAddress(ipaddress.ip_address(to_text(name[3:])))
- if name.startswith('email:'):
- return x509.RFC822Name(to_text(name[6:]))
- if name.startswith('URI:'):
- return x509.UniformResourceIdentifier(to_text(name[4:]))
- except Exception as e:
- raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}": {1}'.format(name, e))
- if ':' not in name:
- raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (forgot "DNS:" prefix?)'.format(name))
- raise OpenSSLObjectError('Cannot parse Subject Alternative Name "{0}" (potentially unsupported by cryptography backend)'.format(name))
-
-
-def _get_hex(bytesstr):
- if bytesstr is None:
- return bytesstr
- data = binascii.hexlify(bytesstr)
- data = to_text(b':'.join(data[i:i + 2] for i in range(0, len(data), 2)))
- return data
-
-
-def cryptography_decode_name(name):
- '''
- Given a cryptography x509.Name object, returns a string.
- Raises an OpenSSLObjectError if the name is not supported.
- '''
- if isinstance(name, x509.DNSName):
- return 'DNS:{0}'.format(name.value)
- if isinstance(name, x509.IPAddress):
- return 'IP:{0}'.format(name.value.compressed)
- if isinstance(name, x509.RFC822Name):
- return 'email:{0}'.format(name.value)
- if isinstance(name, x509.UniformResourceIdentifier):
- return 'URI:{0}'.format(name.value)
- if isinstance(name, x509.DirectoryName):
- # FIXME: test
- return 'DirName:' + ''.join(['/{0}:{1}'.format(attribute.oid._name, attribute.value) for attribute in name.value])
- if isinstance(name, x509.RegisteredID):
- # FIXME: test
- return 'RegisteredID:{0}'.format(name.value)
- if isinstance(name, x509.OtherName):
- # FIXME: test
- return '{0}:{1}'.format(name.type_id.dotted_string, _get_hex(name.value))
- raise OpenSSLObjectError('Cannot decode name "{0}"'.format(name))
-
-
-def _cryptography_get_keyusage(usage):
- '''
- Given a key usage identifier string, returns the parameter name used by cryptography's x509.KeyUsage().
- Raises an OpenSSLObjectError if the identifier is unknown.
- '''
- if usage in ('Digital Signature', 'digitalSignature'):
- return 'digital_signature'
- if usage in ('Non Repudiation', 'nonRepudiation'):
- return 'content_commitment'
- if usage in ('Key Encipherment', 'keyEncipherment'):
- return 'key_encipherment'
- if usage in ('Data Encipherment', 'dataEncipherment'):
- return 'data_encipherment'
- if usage in ('Key Agreement', 'keyAgreement'):
- return 'key_agreement'
- if usage in ('Certificate Sign', 'keyCertSign'):
- return 'key_cert_sign'
- if usage in ('CRL Sign', 'cRLSign'):
- return 'crl_sign'
- if usage in ('Encipher Only', 'encipherOnly'):
- return 'encipher_only'
- if usage in ('Decipher Only', 'decipherOnly'):
- return 'decipher_only'
- raise OpenSSLObjectError('Unknown key usage "{0}"'.format(usage))
-
-
-def cryptography_parse_key_usage_params(usages):
- '''
- Given a list of key usage identifier strings, returns the parameters for cryptography's x509.KeyUsage().
- Raises an OpenSSLObjectError if an identifier is unknown.
- '''
- params = dict(
- digital_signature=False,
- content_commitment=False,
- key_encipherment=False,
- data_encipherment=False,
- key_agreement=False,
- key_cert_sign=False,
- crl_sign=False,
- encipher_only=False,
- decipher_only=False,
- )
- for usage in usages:
- params[_cryptography_get_keyusage(usage)] = True
- return params
-
-
-def cryptography_get_basic_constraints(constraints):
- '''
- Given a list of constraints, returns a tuple (ca, path_length).
- Raises an OpenSSLObjectError if a constraint is unknown or cannot be parsed.
- '''
- ca = False
- path_length = None
- if constraints:
- for constraint in constraints:
- if constraint.startswith('CA:'):
- if constraint == 'CA:TRUE':
- ca = True
- elif constraint == 'CA:FALSE':
- ca = False
- else:
- raise OpenSSLObjectError('Unknown basic constraint value "{0}" for CA'.format(constraint[3:]))
- elif constraint.startswith('pathlen:'):
- v = constraint[len('pathlen:'):]
- try:
- path_length = int(v)
- except Exception as e:
- raise OpenSSLObjectError('Cannot parse path length constraint "{0}" ({1})'.format(v, e))
- else:
- raise OpenSSLObjectError('Unknown basic constraint "{0}"'.format(constraint))
- return ca, path_length
-
-
-def binary_exp_mod(f, e, m):
- '''Computes f^e mod m in O(log e) multiplications modulo m.'''
- # Compute len_e = floor(log_2(e))
- len_e = -1
- x = e
- while x > 0:
- x >>= 1
- len_e += 1
- # Compute f**e mod m
- result = 1
- for k in range(len_e, -1, -1):
- result = (result * result) % m
- if ((e >> k) & 1) != 0:
- result = (result * f) % m
- return result
-
-
-def simple_gcd(a, b):
- '''Compute GCD of its two inputs.'''
- while b != 0:
- a, b = b, a % b
- return a
-
-
-def quick_is_not_prime(n):
- '''Does some quick checks to see if we can poke a hole into the primality of n.
-
- A result of `False` does **not** mean that the number is prime; it just means
- that we couldn't detect quickly whether it is not prime.
- '''
- if n <= 2:
- return True
- # The constant in the next line is the product of all primes < 200
- if simple_gcd(n, 7799922041683461553249199106329813876687996789903550945093032474868511536164700810) > 1:
- return True
- # TODO: maybe do some iterations of Miller-Rabin to increase confidence
- # (https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test)
- return False
-
-
-python_version = (sys.version_info[0], sys.version_info[1])
-if python_version >= (2, 7) or python_version >= (3, 1):
- # Ansible still supports Python 2.6 on remote nodes
- def count_bits(no):
- no = abs(no)
- if no == 0:
- return 0
- return no.bit_length()
-else:
- # Slow, but works
- def count_bits(no):
- no = abs(no)
- count = 0
- while no > 0:
- no >>= 1
- count += 1
- return count
-
-
-PEM_START = '-----BEGIN '
-PEM_END = '-----'
-PKCS8_PRIVATEKEY_NAMES = ('PRIVATE KEY', 'ENCRYPTED PRIVATE KEY')
-PKCS1_PRIVATEKEY_SUFFIX = ' PRIVATE KEY'
-
-
-def identify_private_key_format(content):
- '''Given the contents of a private key file, identifies its format.'''
- # See https://github.com/openssl/openssl/blob/master/crypto/pem/pem_pkey.c#L40-L85
- # (PEM_read_bio_PrivateKey)
- # and https://github.com/openssl/openssl/blob/master/include/openssl/pem.h#L46-L47
- # (PEM_STRING_PKCS8, PEM_STRING_PKCS8INF)
- try:
- lines = content.decode('utf-8').splitlines(False)
- if lines[0].startswith(PEM_START) and lines[0].endswith(PEM_END) and len(lines[0]) > len(PEM_START) + len(PEM_END):
- name = lines[0][len(PEM_START):-len(PEM_END)]
- if name in PKCS8_PRIVATEKEY_NAMES:
- return 'pkcs8'
- if len(name) > len(PKCS1_PRIVATEKEY_SUFFIX) and name.endswith(PKCS1_PRIVATEKEY_SUFFIX):
- return 'pkcs1'
- return 'unknown-pem'
- except UnicodeDecodeError:
- pass
- return 'raw'
-
-
-def cryptography_key_needs_digest_for_signing(key):
- '''Tests whether the given private key requires a digest algorithm for signing.
-
- Ed25519 and Ed448 keys do not; they need None to be passed as the digest algorithm.
- '''
- if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
- return False
- if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
- return False
- return True
-
-
-def cryptography_compare_public_keys(key1, key2):
- '''Tests whether two public keys are the same.
-
- Needs special logic for Ed25519 and Ed448 keys, since they do not have public_numbers().
- '''
- if CRYPTOGRAPHY_HAS_ED25519:
- a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
- b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PublicKey)
- if a or b:
- if not a or not b:
- return False
- a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
- b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
- return a == b
- if CRYPTOGRAPHY_HAS_ED448:
- a = isinstance(key1, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
- b = isinstance(key2, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PublicKey)
- if a or b:
- if not a or not b:
- return False
- a = key1.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
- b = key2.public_bytes(serialization.Encoding.Raw, serialization.PublicFormat.Raw)
- return a == b
- return key1.public_numbers() == key2.public_numbers()
-
-
-if HAS_CRYPTOGRAPHY:
- REVOCATION_REASON_MAP = {
- 'unspecified': x509.ReasonFlags.unspecified,
- 'key_compromise': x509.ReasonFlags.key_compromise,
- 'ca_compromise': x509.ReasonFlags.ca_compromise,
- 'affiliation_changed': x509.ReasonFlags.affiliation_changed,
- 'superseded': x509.ReasonFlags.superseded,
- 'cessation_of_operation': x509.ReasonFlags.cessation_of_operation,
- 'certificate_hold': x509.ReasonFlags.certificate_hold,
- 'privilege_withdrawn': x509.ReasonFlags.privilege_withdrawn,
- 'aa_compromise': x509.ReasonFlags.aa_compromise,
- 'remove_from_crl': x509.ReasonFlags.remove_from_crl,
- }
- REVOCATION_REASON_MAP_INVERSE = dict()
- for k, v in REVOCATION_REASON_MAP.items():
- REVOCATION_REASON_MAP_INVERSE[v] = k
-
-
-def cryptography_decode_revoked_certificate(cert):
- result = {
- 'serial_number': cert.serial_number,
- 'revocation_date': cert.revocation_date,
- 'issuer': None,
- 'issuer_critical': False,
- 'reason': None,
- 'reason_critical': False,
- 'invalidity_date': None,
- 'invalidity_date_critical': False,
- }
- try:
- ext = cert.extensions.get_extension_for_class(x509.CertificateIssuer)
- result['issuer'] = list(ext.value)
- result['issuer_critical'] = ext.critical
- except x509.ExtensionNotFound:
- pass
- try:
- ext = cert.extensions.get_extension_for_class(x509.CRLReason)
- result['reason'] = ext.value.reason
- result['reason_critical'] = ext.critical
- except x509.ExtensionNotFound:
- pass
- try:
- ext = cert.extensions.get_extension_for_class(x509.InvalidityDate)
- result['invalidity_date'] = ext.value.invalidity_date
- result['invalidity_date_critical'] = ext.critical
- except x509.ExtensionNotFound:
- pass
- return result
diff --git a/test/support/integration/plugins/module_utils/database.py b/test/support/integration/plugins/module_utils/database.py
deleted file mode 100644
index 014939a2..00000000
--- a/test/support/integration/plugins/module_utils/database.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class SQLParseError(Exception):
- pass
-
-
-class UnclosedQuoteError(SQLParseError):
- pass
-
-
-# maps a type of identifier to the maximum number of dot levels that are
-# allowed to specify that identifier. For example, a database column can be
-# specified by up to 4 levels: database.schema.table.column
-_PG_IDENTIFIER_TO_DOT_LEVEL = dict(
- database=1,
- schema=2,
- table=3,
- column=4,
- role=1,
- tablespace=1,
- sequence=3,
- publication=1,
-)
-_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
-
-
-def _find_end_quote(identifier, quote_char):
- accumulate = 0
- while True:
- try:
- quote = identifier.index(quote_char)
- except ValueError:
- raise UnclosedQuoteError
- accumulate = accumulate + quote
- try:
- next_char = identifier[quote + 1]
- except IndexError:
- return accumulate
- if next_char == quote_char:
- try:
- identifier = identifier[quote + 2:]
- accumulate = accumulate + 2
- except IndexError:
- raise UnclosedQuoteError
- else:
- return accumulate
-
-
-def _identifier_parse(identifier, quote_char):
- if not identifier:
- raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
-
- already_quoted = False
- if identifier.startswith(quote_char):
- already_quoted = True
- try:
- end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
- except UnclosedQuoteError:
- already_quoted = False
- else:
- if end_quote < len(identifier) - 1:
- if identifier[end_quote + 1] == '.':
- dot = end_quote + 1
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot + 1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- further_identifiers.insert(0, first_identifier)
- else:
- raise SQLParseError('User escaped identifiers must escape extra quotes')
- else:
- further_identifiers = [identifier]
-
- if not already_quoted:
- try:
- dot = identifier.index('.')
- except ValueError:
- identifier = identifier.replace(quote_char, quote_char * 2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- if dot == 0 or dot >= len(identifier) - 1:
- identifier = identifier.replace(quote_char, quote_char * 2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot + 1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- first_identifier = first_identifier.replace(quote_char, quote_char * 2)
- first_identifier = ''.join((quote_char, first_identifier, quote_char))
- further_identifiers.insert(0, first_identifier)
-
- return further_identifiers
-
-
-def pg_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='"')
- if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
- return '.'.join(identifier_fragments)
-
-
-def mysql_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='`')
- if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
-
- special_cased_fragments = []
- for fragment in identifier_fragments:
- if fragment == '`*`':
- special_cased_fragments.append('*')
- else:
- special_cased_fragments.append(fragment)
-
- return '.'.join(special_cased_fragments)
diff --git a/test/support/integration/plugins/module_utils/ecs/__init__.py b/test/support/integration/plugins/module_utils/ecs/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/test/support/integration/plugins/module_utils/ecs/__init__.py
+++ /dev/null
diff --git a/test/support/integration/plugins/module_utils/ecs/api.py b/test/support/integration/plugins/module_utils/ecs/api.py
deleted file mode 100644
index d89b0333..00000000
--- a/test/support/integration/plugins/module_utils/ecs/api.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is licensed under the
-# Modified BSD License. Modules you write using this snippet, which is embedded
-# dynamically by Ansible, still belong to the author of the module, and may assign
-# their own license to the complete work.
-#
-# Copyright (c), Entrust Datacard Corporation, 2019
-# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
-
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-# 1. Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-import json
-import os
-import re
-import time
-import traceback
-
-from ansible.module_utils._text import to_text, to_native
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.six.moves.urllib.parse import urlencode
-from ansible.module_utils.six.moves.urllib.error import HTTPError
-from ansible.module_utils.urls import Request
-
-YAML_IMP_ERR = None
-try:
- import yaml
-except ImportError:
- YAML_FOUND = False
- YAML_IMP_ERR = traceback.format_exc()
-else:
- YAML_FOUND = True
-
-valid_file_format = re.compile(r".*(\.)(yml|yaml|json)$")
-
-
-def ecs_client_argument_spec():
- return dict(
- entrust_api_user=dict(type='str', required=True),
- entrust_api_key=dict(type='str', required=True, no_log=True),
- entrust_api_client_cert_path=dict(type='path', required=True),
- entrust_api_client_cert_key_path=dict(type='path', required=True, no_log=True),
- entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
- )
-
-
-class SessionConfigurationException(Exception):
- """ Raised if we cannot configure a session with the API """
-
- pass
-
-
-class RestOperationException(Exception):
- """ Encapsulate a REST API error """
-
- def __init__(self, error):
- self.status = to_native(error.get("status", None))
- self.errors = [to_native(err.get("message")) for err in error.get("errors", {})]
- self.message = to_native(" ".join(self.errors))
-
-
-def generate_docstring(operation_spec):
- """Generate a docstring for an operation defined in operation_spec (swagger)"""
- # Description of the operation
- docs = operation_spec.get("description", "No Description")
- docs += "\n\n"
-
- # Parameters of the operation
- parameters = operation_spec.get("parameters", [])
- if len(parameters) != 0:
- docs += "\tArguments:\n\n"
- for parameter in parameters:
- docs += "{0} ({1}:{2}): {3}\n".format(
- parameter.get("name"),
- parameter.get("type", "No Type"),
- "Required" if parameter.get("required", False) else "Not Required",
- parameter.get("description"),
- )
-
- return docs
-
-
-def bind(instance, method, operation_spec):
- def binding_scope_fn(*args, **kwargs):
- return method(instance, *args, **kwargs)
-
- # Make sure we don't confuse users; add the proper name and documentation to the function.
- # Users can use !help(<function>) to get help on the function from interactive python or pdb
- operation_name = operation_spec.get("operationId").split("Using")[0]
- binding_scope_fn.__name__ = str(operation_name)
- binding_scope_fn.__doc__ = generate_docstring(operation_spec)
-
- return binding_scope_fn
-
-
-class RestOperation(object):
- def __init__(self, session, uri, method, parameters=None):
- self.session = session
- self.method = method
- if parameters is None:
- self.parameters = {}
- else:
- self.parameters = parameters
- self.url = "{scheme}://{host}{base_path}{uri}".format(scheme="https", host=session._spec.get("host"), base_path=session._spec.get("basePath"), uri=uri)
-
- def restmethod(self, *args, **kwargs):
- """Do the hard work of making the request here"""
-
- # gather named path parameters and do substitution on the URL
- if self.parameters:
- path_parameters = {}
- body_parameters = {}
- query_parameters = {}
- for x in self.parameters:
- expected_location = x.get("in")
- key_name = x.get("name", None)
- key_value = kwargs.get(key_name, None)
- if expected_location == "path" and key_name and key_value:
- path_parameters.update({key_name: key_value})
- elif expected_location == "body" and key_name and key_value:
- body_parameters.update({key_name: key_value})
- elif expected_location == "query" and key_name and key_value:
- query_parameters.update({key_name: key_value})
-
- if len(body_parameters.keys()) >= 1:
- body_parameters = body_parameters.get(list(body_parameters.keys())[0])
- else:
- body_parameters = None
- else:
- path_parameters = {}
- query_parameters = {}
- body_parameters = None
-
- # This will fail if we have not set path parameters with a KeyError
- url = self.url.format(**path_parameters)
- if query_parameters:
- # modify the URL to add path parameters
- url = url + "?" + urlencode(query_parameters)
-
- try:
- if body_parameters:
- body_parameters_json = json.dumps(body_parameters)
- response = self.session.request.open(method=self.method, url=url, data=body_parameters_json)
- else:
- response = self.session.request.open(method=self.method, url=url)
- request_error = False
- except HTTPError as e:
- # An HTTPError has the same methods available as a valid response from request.open
- response = e
- request_error = True
-
- # Return the result if JSON and success ({} for empty responses)
- # Raise an exception if there was a failure.
- try:
- result_code = response.getcode()
- result = json.loads(response.read())
- except ValueError:
- result = {}
-
- if result or result == {}:
- if result_code and result_code < 400:
- return result
- else:
- raise RestOperationException(result)
-
- # Raise a generic RestOperationException if this fails
- raise RestOperationException({"status": result_code, "errors": [{"message": "REST Operation Failed"}]})
-
-
-class Resource(object):
- """ Implement basic CRUD operations against a path. """
-
- def __init__(self, session):
- self.session = session
- self.parameters = {}
-
- for url in session._spec.get("paths").keys():
- methods = session._spec.get("paths").get(url)
- for method in methods.keys():
- operation_spec = methods.get(method)
- operation_name = operation_spec.get("operationId", None)
- parameters = operation_spec.get("parameters")
-
- if not operation_name:
- if method.lower() == "post":
- operation_name = "Create"
- elif method.lower() == "get":
- operation_name = "Get"
- elif method.lower() == "put":
- operation_name = "Update"
- elif method.lower() == "delete":
- operation_name = "Delete"
- elif method.lower() == "patch":
- operation_name = "Patch"
- else:
- raise SessionConfigurationException(to_native("Invalid REST method type {0}".format(method)))
-
- # Get the non-parameter parts of the URL and append to the operation name
- # e.g /application/version -> GetApplicationVersion
- # e.g. /application/{id} -> GetApplication
- # This may lead to duplicates, which we must prevent.
- operation_name += re.sub(r"{(.*)}", "", url).replace("/", " ").title().replace(" ", "")
- operation_spec["operationId"] = operation_name
-
- op = RestOperation(session, url, method, parameters)
- setattr(self, operation_name, bind(self, op.restmethod, operation_spec))
-
-
-# Session to encapsulate the connection parameters of the module_utils Request object, the api spec, etc
-class ECSSession(object):
- def __init__(self, name, **kwargs):
- """
- Initialize our session
- """
-
- self._set_config(name, **kwargs)
-
- def client(self):
- resource = Resource(self)
- return resource
-
- def _set_config(self, name, **kwargs):
- headers = {
- "Content-Type": "application/json",
- "Connection": "keep-alive",
- }
- self.request = Request(headers=headers, timeout=60)
-
- configurators = [self._read_config_vars]
- for configurator in configurators:
- self._config = configurator(name, **kwargs)
- if self._config:
- break
- if self._config is None:
- raise SessionConfigurationException(to_native("No Configuration Found."))
-
- # set up auth if passed
- entrust_api_user = self.get_config("entrust_api_user")
- entrust_api_key = self.get_config("entrust_api_key")
- if entrust_api_user and entrust_api_key:
- self.request.url_username = entrust_api_user
- self.request.url_password = entrust_api_key
- else:
- raise SessionConfigurationException(to_native("User and key must be provided."))
-
- # set up client certificate if passed (support all-in one or cert + key)
- entrust_api_cert = self.get_config("entrust_api_cert")
- entrust_api_cert_key = self.get_config("entrust_api_cert_key")
- if entrust_api_cert:
- self.request.client_cert = entrust_api_cert
- if entrust_api_cert_key:
- self.request.client_key = entrust_api_cert_key
- else:
- raise SessionConfigurationException(to_native("Client certificate for authentication to the API must be provided."))
-
- # set up the spec
- entrust_api_specification_path = self.get_config("entrust_api_specification_path")
-
- if not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path):
- raise SessionConfigurationException(to_native("OpenAPI specification was not found at location {0}.".format(entrust_api_specification_path)))
- if not valid_file_format.match(entrust_api_specification_path):
- raise SessionConfigurationException(to_native("OpenAPI specification filename must end in .json, .yml or .yaml"))
-
- self.verify = True
-
- if entrust_api_specification_path.startswith("http"):
- try:
- http_response = Request().open(method="GET", url=entrust_api_specification_path)
- http_response_contents = http_response.read()
- if entrust_api_specification_path.endswith(".json"):
- self._spec = json.load(http_response_contents)
- elif entrust_api_specification_path.endswith(".yml") or entrust_api_specification_path.endswith(".yaml"):
- self._spec = yaml.safe_load(http_response_contents)
- except HTTPError as e:
- raise SessionConfigurationException(to_native("Error downloading specification from address '{0}', received error code '{1}'".format(
- entrust_api_specification_path, e.getcode())))
- else:
- with open(entrust_api_specification_path) as f:
- if ".json" in entrust_api_specification_path:
- self._spec = json.load(f)
- elif ".yml" in entrust_api_specification_path or ".yaml" in entrust_api_specification_path:
- self._spec = yaml.safe_load(f)
-
- def get_config(self, item):
- return self._config.get(item, None)
-
- def _read_config_vars(self, name, **kwargs):
- """ Read configuration from variables passed to the module. """
- config = {}
-
- entrust_api_specification_path = kwargs.get("entrust_api_specification_path")
- if not entrust_api_specification_path or (not entrust_api_specification_path.startswith("http") and not os.path.isfile(entrust_api_specification_path)):
- raise SessionConfigurationException(
- to_native(
- "Parameter provided for entrust_api_specification_path of value '{0}' was not a valid file path or HTTPS address.".format(
- entrust_api_specification_path
- )
- )
- )
-
- for required_file in ["entrust_api_cert", "entrust_api_cert_key"]:
- file_path = kwargs.get(required_file)
- if not file_path or not os.path.isfile(file_path):
- raise SessionConfigurationException(
- to_native("Parameter provided for {0} of value '{1}' was not a valid file path.".format(required_file, file_path))
- )
-
- for required_var in ["entrust_api_user", "entrust_api_key"]:
- if not kwargs.get(required_var):
- raise SessionConfigurationException(to_native("Parameter provided for {0} was missing.".format(required_var)))
-
- config["entrust_api_cert"] = kwargs.get("entrust_api_cert")
- config["entrust_api_cert_key"] = kwargs.get("entrust_api_cert_key")
- config["entrust_api_specification_path"] = kwargs.get("entrust_api_specification_path")
- config["entrust_api_user"] = kwargs.get("entrust_api_user")
- config["entrust_api_key"] = kwargs.get("entrust_api_key")
-
- return config
-
-
-def ECSClient(entrust_api_user=None, entrust_api_key=None, entrust_api_cert=None, entrust_api_cert_key=None, entrust_api_specification_path=None):
- """Create an ECS client"""
-
- if not YAML_FOUND:
- raise SessionConfigurationException(missing_required_lib("PyYAML"), exception=YAML_IMP_ERR)
-
- if entrust_api_specification_path is None:
- entrust_api_specification_path = "https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml"
-
- # Not functionally necessary with current uses of this module_util, but better to be explicit for future use cases
- entrust_api_user = to_text(entrust_api_user)
- entrust_api_key = to_text(entrust_api_key)
- entrust_api_cert_key = to_text(entrust_api_cert_key)
- entrust_api_specification_path = to_text(entrust_api_specification_path)
-
- return ECSSession(
- "ecs",
- entrust_api_user=entrust_api_user,
- entrust_api_key=entrust_api_key,
- entrust_api_cert=entrust_api_cert,
- entrust_api_cert_key=entrust_api_cert_key,
- entrust_api_specification_path=entrust_api_specification_path,
- ).client()
diff --git a/test/support/integration/plugins/module_utils/mysql.py b/test/support/integration/plugins/module_utils/mysql.py
deleted file mode 100644
index 46198f36..00000000
--- a/test/support/integration/plugins/module_utils/mysql.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015
-# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-
-try:
- import pymysql as mysql_driver
- _mysql_cursor_param = 'cursor'
-except ImportError:
- try:
- import MySQLdb as mysql_driver
- import MySQLdb.cursors
- _mysql_cursor_param = 'cursorclass'
- except ImportError:
- mysql_driver = None
-
-mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.'
-
-
-def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
- connect_timeout=30, autocommit=False):
- config = {}
-
- if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:
- config['ssl'] = {}
-
- if module.params['login_unix_socket']:
- config['unix_socket'] = module.params['login_unix_socket']
- else:
- config['host'] = module.params['login_host']
- config['port'] = module.params['login_port']
-
- if os.path.exists(config_file):
- config['read_default_file'] = config_file
-
- # If login_user or login_password are given, they should override the
- # config file
- if login_user is not None:
- config['user'] = login_user
- if login_password is not None:
- config['passwd'] = login_password
- if ssl_cert is not None:
- config['ssl']['cert'] = ssl_cert
- if ssl_key is not None:
- config['ssl']['key'] = ssl_key
- if ssl_ca is not None:
- config['ssl']['ca'] = ssl_ca
- if db is not None:
- config['db'] = db
- if connect_timeout is not None:
- config['connect_timeout'] = connect_timeout
-
- if _mysql_cursor_param == 'cursor':
- # In case of PyMySQL driver:
- db_connection = mysql_driver.connect(autocommit=autocommit, **config)
- else:
- # In case of MySQLdb driver
- db_connection = mysql_driver.connect(**config)
- if autocommit:
- db_connection.autocommit(True)
-
- if cursor_class == 'DictCursor':
- return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection
- else:
- return db_connection.cursor(), db_connection
-
-
-def mysql_common_argument_spec():
- return dict(
- login_user=dict(type='str', default=None),
- login_password=dict(type='str', no_log=True),
- login_host=dict(type='str', default='localhost'),
- login_port=dict(type='int', default=3306),
- login_unix_socket=dict(type='str'),
- config_file=dict(type='path', default='~/.my.cnf'),
- connect_timeout=dict(type='int', default=30),
- client_cert=dict(type='path', aliases=['ssl_cert']),
- client_key=dict(type='path', aliases=['ssl_key']),
- ca_cert=dict(type='path', aliases=['ssl_ca']),
- )
diff --git a/test/support/integration/plugins/module_utils/postgres.py b/test/support/integration/plugins/module_utils/postgres.py
deleted file mode 100644
index 0ccc6ed7..00000000
--- a/test/support/integration/plugins/module_utils/postgres.py
+++ /dev/null
@@ -1,330 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Ted Timmons <ted@timmons.me>, 2017.
-# Most of this was originally added by other creators in the postgresql_user module.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-psycopg2 = None # This line needs for unit tests
-try:
- import psycopg2
- HAS_PSYCOPG2 = True
-except ImportError:
- HAS_PSYCOPG2 = False
-
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.compat.version import LooseVersion
-
-
-def postgres_common_argument_spec():
- """
- Return a dictionary with connection options.
-
- The options are commonly used by most of PostgreSQL modules.
- """
- return dict(
- login_user=dict(default='postgres'),
- login_password=dict(default='', no_log=True),
- login_host=dict(default=''),
- login_unix_socket=dict(default=''),
- port=dict(type='int', default=5432, aliases=['login_port']),
- ssl_mode=dict(default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
- ca_cert=dict(aliases=['ssl_rootcert']),
- )
-
-
-def ensure_required_libs(module):
- """Check required libraries."""
- if not HAS_PSYCOPG2:
- module.fail_json(msg=missing_required_lib('psycopg2'))
-
- if module.params.get('ca_cert') and LooseVersion(psycopg2.__version__) < LooseVersion('2.4.3'):
- module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to use the ca_cert parameter')
-
-
-def connect_to_db(module, conn_params, autocommit=False, fail_on_conn=True):
- """Connect to a PostgreSQL database.
-
- Return psycopg2 connection object.
-
- Args:
- module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
- conn_params (dict) -- dictionary with connection parameters
-
- Kwargs:
- autocommit (bool) -- commit automatically (default False)
- fail_on_conn (bool) -- fail if connection failed or just warn and return None (default True)
- """
- ensure_required_libs(module)
-
- db_connection = None
- try:
- db_connection = psycopg2.connect(**conn_params)
- if autocommit:
- if LooseVersion(psycopg2.__version__) >= LooseVersion('2.4.2'):
- db_connection.set_session(autocommit=True)
- else:
- db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
-
- # Switch role, if specified:
- if module.params.get('session_role'):
- cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
-
- try:
- cursor.execute('SET ROLE "%s"' % module.params['session_role'])
- except Exception as e:
- module.fail_json(msg="Could not switch role: %s" % to_native(e))
- finally:
- cursor.close()
-
- except TypeError as e:
- if 'sslrootcert' in e.args[0]:
- module.fail_json(msg='Postgresql server must be at least '
- 'version 8.4 to support sslrootcert')
-
- if fail_on_conn:
- module.fail_json(msg="unable to connect to database: %s" % to_native(e))
- else:
- module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
- db_connection = None
-
- except Exception as e:
- if fail_on_conn:
- module.fail_json(msg="unable to connect to database: %s" % to_native(e))
- else:
- module.warn("PostgreSQL server is unavailable: %s" % to_native(e))
- db_connection = None
-
- return db_connection
-
-
-def exec_sql(obj, query, query_params=None, ddl=False, add_to_executed=True, dont_exec=False):
- """Execute SQL.
-
- Auxiliary function for PostgreSQL user classes.
-
- Returns a query result if possible or True/False if ddl=True arg was passed.
- It necessary for statements that don't return any result (like DDL queries).
-
- Args:
- obj (obj) -- must be an object of a user class.
- The object must have module (AnsibleModule class object) and
- cursor (psycopg cursor object) attributes
- query (str) -- SQL query to execute
-
- Kwargs:
- query_params (dict or tuple) -- Query parameters to prevent SQL injections,
- could be a dict or tuple
- ddl (bool) -- must return True or False instead of rows (typical for DDL queries)
- (default False)
- add_to_executed (bool) -- append the query to obj.executed_queries attribute
- dont_exec (bool) -- used with add_to_executed=True to generate a query, add it
- to obj.executed_queries list and return True (default False)
- """
-
- if dont_exec:
- # This is usually needed to return queries in check_mode
- # without execution
- query = obj.cursor.mogrify(query, query_params)
- if add_to_executed:
- obj.executed_queries.append(query)
-
- return True
-
- try:
- if query_params is not None:
- obj.cursor.execute(query, query_params)
- else:
- obj.cursor.execute(query)
-
- if add_to_executed:
- if query_params is not None:
- obj.executed_queries.append(obj.cursor.mogrify(query, query_params))
- else:
- obj.executed_queries.append(query)
-
- if not ddl:
- res = obj.cursor.fetchall()
- return res
- return True
- except Exception as e:
- obj.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
- return False
-
-
-def get_conn_params(module, params_dict, warn_db_default=True):
- """Get connection parameters from the passed dictionary.
-
- Return a dictionary with parameters to connect to PostgreSQL server.
-
- Args:
- module (AnsibleModule) -- object of ansible.module_utils.basic.AnsibleModule class
- params_dict (dict) -- dictionary with variables
-
- Kwargs:
- warn_db_default (bool) -- warn that the default DB is used (default True)
- """
- # To use defaults values, keyword arguments must be absent, so
- # check which values are empty and don't include in the return dictionary
- params_map = {
- "login_host": "host",
- "login_user": "user",
- "login_password": "password",
- "port": "port",
- "ssl_mode": "sslmode",
- "ca_cert": "sslrootcert"
- }
-
- # Might be different in the modules:
- if params_dict.get('db'):
- params_map['db'] = 'database'
- elif params_dict.get('database'):
- params_map['database'] = 'database'
- elif params_dict.get('login_db'):
- params_map['login_db'] = 'database'
- else:
- if warn_db_default:
- module.warn('Database name has not been passed, '
- 'used default database to connect to.')
-
- kw = dict((params_map[k], v) for (k, v) in iteritems(params_dict)
- if k in params_map and v != '' and v is not None)
-
- # If a login_unix_socket is specified, incorporate it here.
- is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
- if is_localhost and params_dict["login_unix_socket"] != "":
- kw["host"] = params_dict["login_unix_socket"]
-
- return kw
-
-
-class PgMembership(object):
- def __init__(self, module, cursor, groups, target_roles, fail_on_role=True):
- self.module = module
- self.cursor = cursor
- self.target_roles = [r.strip() for r in target_roles]
- self.groups = [r.strip() for r in groups]
- self.executed_queries = []
- self.granted = {}
- self.revoked = {}
- self.fail_on_role = fail_on_role
- self.non_existent_roles = []
- self.changed = False
- self.__check_roles_exist()
-
- def grant(self):
- for group in self.groups:
- self.granted[group] = []
-
- for role in self.target_roles:
- # If role is in a group now, pass:
- if self.__check_membership(group, role):
- continue
-
- query = 'GRANT "%s" TO "%s"' % (group, role)
- self.changed = exec_sql(self, query, ddl=True)
-
- if self.changed:
- self.granted[group].append(role)
-
- return self.changed
-
- def revoke(self):
- for group in self.groups:
- self.revoked[group] = []
-
- for role in self.target_roles:
- # If role is not in a group now, pass:
- if not self.__check_membership(group, role):
- continue
-
- query = 'REVOKE "%s" FROM "%s"' % (group, role)
- self.changed = exec_sql(self, query, ddl=True)
-
- if self.changed:
- self.revoked[group].append(role)
-
- return self.changed
-
- def __check_membership(self, src_role, dst_role):
- query = ("SELECT ARRAY(SELECT b.rolname FROM "
- "pg_catalog.pg_auth_members m "
- "JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid) "
- "WHERE m.member = r.oid) "
- "FROM pg_catalog.pg_roles r "
- "WHERE r.rolname = %(dst_role)s")
-
- res = exec_sql(self, query, query_params={'dst_role': dst_role}, add_to_executed=False)
- membership = []
- if res:
- membership = res[0][0]
-
- if not membership:
- return False
-
- if src_role in membership:
- return True
-
- return False
-
- def __check_roles_exist(self):
- existent_groups = self.__roles_exist(self.groups)
- existent_roles = self.__roles_exist(self.target_roles)
-
- for group in self.groups:
- if group not in existent_groups:
- if self.fail_on_role:
- self.module.fail_json(msg="Role %s does not exist" % group)
- else:
- self.module.warn("Role %s does not exist, pass" % group)
- self.non_existent_roles.append(group)
-
- for role in self.target_roles:
- if role not in existent_roles:
- if self.fail_on_role:
- self.module.fail_json(msg="Role %s does not exist" % role)
- else:
- self.module.warn("Role %s does not exist, pass" % role)
-
- if role not in self.groups:
- self.non_existent_roles.append(role)
-
- else:
- if self.fail_on_role:
- self.module.exit_json(msg="Role role '%s' is a member of role '%s'" % (role, role))
- else:
- self.module.warn("Role role '%s' is a member of role '%s', pass" % (role, role))
-
- # Update role lists, excluding non existent roles:
- self.groups = [g for g in self.groups if g not in self.non_existent_roles]
-
- self.target_roles = [r for r in self.target_roles if r not in self.non_existent_roles]
-
- def __roles_exist(self, roles):
- tmp = ["'" + x + "'" for x in roles]
- query = "SELECT rolname FROM pg_roles WHERE rolname IN (%s)" % ','.join(tmp)
- return [x[0] for x in exec_sql(self, query, add_to_executed=False)]
diff --git a/test/support/integration/plugins/module_utils/rabbitmq.py b/test/support/integration/plugins/module_utils/rabbitmq.py
deleted file mode 100644
index cf764006..00000000
--- a/test/support/integration/plugins/module_utils/rabbitmq.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright: (c) 2016, Jorge Rodriguez <jorge.rodriguez@tiriel.eu>
-# Copyright: (c) 2018, John Imison <john+github@imison.net>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import missing_required_lib
-from ansible.module_utils.six.moves.urllib import parse as urllib_parse
-from mimetypes import MimeTypes
-
-import os
-import json
-import traceback
-
-PIKA_IMP_ERR = None
-try:
- import pika
- import pika.exceptions
- from pika import spec
- HAS_PIKA = True
-except ImportError:
- PIKA_IMP_ERR = traceback.format_exc()
- HAS_PIKA = False
-
-
-def rabbitmq_argument_spec():
- return dict(
- login_user=dict(type='str', default='guest'),
- login_password=dict(type='str', default='guest', no_log=True),
- login_host=dict(type='str', default='localhost'),
- login_port=dict(type='str', default='15672'),
- login_protocol=dict(type='str', default='http', choices=['http', 'https']),
- ca_cert=dict(type='path', aliases=['cacert']),
- client_cert=dict(type='path', aliases=['cert']),
- client_key=dict(type='path', aliases=['key']),
- vhost=dict(type='str', default='/'),
- )
-
-
-# notification/rabbitmq_basic_publish.py
-class RabbitClient():
- def __init__(self, module):
- self.module = module
- self.params = module.params
- self.check_required_library()
- self.check_host_params()
- self.url = self.params['url']
- self.proto = self.params['proto']
- self.username = self.params['username']
- self.password = self.params['password']
- self.host = self.params['host']
- self.port = self.params['port']
- self.vhost = self.params['vhost']
- self.queue = self.params['queue']
- self.headers = self.params['headers']
- self.cafile = self.params['cafile']
- self.certfile = self.params['certfile']
- self.keyfile = self.params['keyfile']
-
- if self.host is not None:
- self.build_url()
-
- if self.cafile is not None:
- self.append_ssl_certs()
-
- self.connect_to_rabbitmq()
-
- def check_required_library(self):
- if not HAS_PIKA:
- self.module.fail_json(msg=missing_required_lib("pika"), exception=PIKA_IMP_ERR)
-
- def check_host_params(self):
- # Fail if url is specified and other conflicting parameters have been specified
- if self.params['url'] is not None and any(self.params[k] is not None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
- self.module.fail_json(msg="url and proto, host, port, vhost, username or password cannot be specified at the same time.")
-
- # Fail if url not specified and there is a missing parameter to build the url
- if self.params['url'] is None and any(self.params[k] is None for k in ['proto', 'host', 'port', 'password', 'username', 'vhost']):
- self.module.fail_json(msg="Connection parameters must be passed via url, or, proto, host, port, vhost, username or password.")
-
- def append_ssl_certs(self):
- ssl_options = {}
- if self.cafile:
- ssl_options['cafile'] = self.cafile
- if self.certfile:
- ssl_options['certfile'] = self.certfile
- if self.keyfile:
- ssl_options['keyfile'] = self.keyfile
-
- self.url = self.url + '?ssl_options=' + urllib_parse.quote(json.dumps(ssl_options))
-
- @staticmethod
- def rabbitmq_argument_spec():
- return dict(
- url=dict(type='str'),
- proto=dict(type='str', choices=['amqp', 'amqps']),
- host=dict(type='str'),
- port=dict(type='int'),
- username=dict(type='str'),
- password=dict(type='str', no_log=True),
- vhost=dict(type='str'),
- queue=dict(type='str')
- )
-
- ''' Consider some file size limits here '''
- def _read_file(self, path):
- try:
- with open(path, "rb") as file_handle:
- return file_handle.read()
- except IOError as e:
- self.module.fail_json(msg="Unable to open file %s: %s" % (path, to_native(e)))
-
- @staticmethod
- def _check_file_mime_type(path):
- mime = MimeTypes()
- return mime.guess_type(path)
-
- def build_url(self):
- self.url = '{0}://{1}:{2}@{3}:{4}/{5}'.format(self.proto,
- self.username,
- self.password,
- self.host,
- self.port,
- self.vhost)
-
- def connect_to_rabbitmq(self):
- """
- Function to connect to rabbitmq using username and password
- """
- try:
- parameters = pika.URLParameters(self.url)
- except Exception as e:
- self.module.fail_json(msg="URL malformed: %s" % to_native(e))
-
- try:
- self.connection = pika.BlockingConnection(parameters)
- except Exception as e:
- self.module.fail_json(msg="Connection issue: %s" % to_native(e))
-
- try:
- self.conn_channel = self.connection.channel()
- except pika.exceptions.AMQPChannelError as e:
- self.close_connection()
- self.module.fail_json(msg="Channel issue: %s" % to_native(e))
-
- def close_connection(self):
- try:
- self.connection.close()
- except pika.exceptions.AMQPConnectionError:
- pass
-
- def basic_publish(self):
- self.content_type = self.params.get("content_type")
-
- if self.params.get("body") is not None:
- args = dict(
- body=self.params.get("body"),
- exchange=self.params.get("exchange"),
- routing_key=self.params.get("routing_key"),
- properties=pika.BasicProperties(content_type=self.content_type, delivery_mode=1, headers=self.headers))
-
- # If src (file) is defined and content_type is left as default, do a mime lookup on the file
- if self.params.get("src") is not None and self.content_type == 'text/plain':
- self.content_type = RabbitClient._check_file_mime_type(self.params.get("src"))[0]
- self.headers.update(
- filename=os.path.basename(self.params.get("src"))
- )
-
- args = dict(
- body=self._read_file(self.params.get("src")),
- exchange=self.params.get("exchange"),
- routing_key=self.params.get("routing_key"),
- properties=pika.BasicProperties(content_type=self.content_type,
- delivery_mode=1,
- headers=self.headers
- ))
- elif self.params.get("src") is not None:
- args = dict(
- body=self._read_file(self.params.get("src")),
- exchange=self.params.get("exchange"),
- routing_key=self.params.get("routing_key"),
- properties=pika.BasicProperties(content_type=self.content_type,
- delivery_mode=1,
- headers=self.headers
- ))
-
- try:
- # If queue is not defined, RabbitMQ will return the queue name of the automatically generated queue.
- if self.queue is None:
- result = self.conn_channel.queue_declare(durable=self.params.get("durable"),
- exclusive=self.params.get("exclusive"),
- auto_delete=self.params.get("auto_delete"))
- self.conn_channel.confirm_delivery()
- self.queue = result.method.queue
- else:
- self.conn_channel.queue_declare(queue=self.queue,
- durable=self.params.get("durable"),
- exclusive=self.params.get("exclusive"),
- auto_delete=self.params.get("auto_delete"))
- self.conn_channel.confirm_delivery()
- except Exception as e:
- self.module.fail_json(msg="Queue declare issue: %s" % to_native(e))
-
- # https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/cloudstack.py#L150
- if args['routing_key'] is None:
- args['routing_key'] = self.queue
-
- if args['exchange'] is None:
- args['exchange'] = ''
-
- try:
- self.conn_channel.basic_publish(**args)
- return True
- except pika.exceptions.UnroutableError:
- return False
diff --git a/test/support/integration/plugins/modules/aws_s3.py b/test/support/integration/plugins/modules/aws_s3.py
deleted file mode 100644
index 54874f05..00000000
--- a/test/support/integration/plugins/modules/aws_s3.py
+++ /dev/null
@@ -1,925 +0,0 @@
-#!/usr/bin/python
-# This file is part of Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: aws_s3
-short_description: manage objects in S3.
-description:
- - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and
- deleting both objects and buckets, retrieving objects as files or strings and generating download links.
- This module has a dependency on boto3 and botocore.
-notes:
- - In 2.4, this module has been renamed from C(s3) into M(aws_s3).
-version_added: "1.1"
-options:
- bucket:
- description:
- - Bucket name.
- required: true
- type: str
- dest:
- description:
- - The destination file path when downloading an object/key with a GET operation.
- version_added: "1.3"
- type: path
- encrypt:
- description:
- - When set for PUT mode, asks for server-side encryption.
- default: true
- version_added: "2.0"
- type: bool
- encryption_mode:
- description:
- - What encryption mode to use if I(encrypt=true).
- default: AES256
- choices:
- - AES256
- - aws:kms
- version_added: "2.7"
- type: str
- expiry:
- description:
- - Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a I(mode=put) or I(mode=geturl) operation.
- default: 600
- aliases: ['expiration']
- type: int
- headers:
- description:
- - Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
- version_added: "2.0"
- type: dict
- marker:
- description:
- - Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
- version_added: "2.0"
- type: str
- max_keys:
- description:
- - Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
- default: 1000
- version_added: "2.0"
- type: int
- metadata:
- description:
- - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
- version_added: "1.6"
- type: dict
- mode:
- description:
- - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
- getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket),
- and delobj (delete object, Ansible 2.0+).
- required: true
- choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
- type: str
- object:
- description:
- - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
- type: str
- permission:
- description:
- - This option lets the user set the canned permissions on the object/bucket that are created.
- The permissions that can be set are C(private), C(public-read), C(public-read-write), C(authenticated-read) for a bucket or
- C(private), C(public-read), C(public-read-write), C(aws-exec-read), C(authenticated-read), C(bucket-owner-read),
- C(bucket-owner-full-control) for an object. Multiple permissions can be specified as a list.
- default: ['private']
- version_added: "2.0"
- type: list
- elements: str
- prefix:
- description:
- - Limits the response to keys that begin with the specified prefix for list mode.
- default: ""
- version_added: "2.0"
- type: str
- version:
- description:
- - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
- version_added: "2.0"
- type: str
- overwrite:
- description:
- - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
- Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0.
- When this is set to 'different', the md5 sum of the local file is compared with the 'ETag' of the object/key in S3.
- The ETag may or may not be an MD5 digest of the object data. See the ETag response header here
- U(https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html)
- default: 'always'
- aliases: ['force']
- version_added: "1.2"
- type: str
- retries:
- description:
- - On recoverable failure, how many times to retry before actually failing.
- default: 0
- version_added: "2.0"
- type: int
- aliases: ['retry']
- s3_url:
- description:
- - S3 URL endpoint for usage with Ceph, Eucalyptus and fakes3 etc. Otherwise assumes AWS.
- aliases: [ S3_URL ]
- type: str
- dualstack:
- description:
- - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6.
- - Requires at least botocore version 1.4.45.
- type: bool
- default: false
- version_added: "2.7"
- rgw:
- description:
- - Enable Ceph RGW S3 support. This option requires an explicit url via I(s3_url).
- default: false
- version_added: "2.2"
- type: bool
- src:
- description:
- - The source file path when performing a PUT operation.
- version_added: "1.3"
- type: str
- ignore_nonexistent_bucket:
- description:
- - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the
- GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying
- I(ignore_nonexistent_bucket=true)."
- version_added: "2.3"
- type: bool
- encryption_kms_key_id:
- description:
- - KMS key id to use when encrypting objects using I(encrypting=aws:kms). Ignored if I(encryption) is not C(aws:kms)
- version_added: "2.7"
- type: str
-requirements: [ "boto3", "botocore" ]
-author:
- - "Lester Wade (@lwade)"
- - "Sloane Hertel (@s-hertel)"
-extends_documentation_fragment:
- - aws
- - ec2
-'''
-
-EXAMPLES = '''
-- name: Simple PUT operation
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
-
-- name: Simple PUT operation in Ceph RGW S3
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
- rgw: true
- s3_url: "http://localhost:8000"
-
-- name: Simple GET operation
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- dest: /usr/local/myfile.txt
- mode: get
-
-- name: Get a specific version of an object.
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- version: 48c9ee5131af7a716edc22df9772aa6f
- dest: /usr/local/myfile.txt
- mode: get
-
-- name: PUT/upload with metadata
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
- metadata: 'Content-Encoding=gzip,Cache-Control=no-cache'
-
-- name: PUT/upload with custom headers
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- src: /usr/local/myfile.txt
- mode: put
- headers: 'x-amz-grant-full-control=emailAddress=owner@example.com'
-
-- name: List keys simple
- aws_s3:
- bucket: mybucket
- mode: list
-
-- name: List keys all options
- aws_s3:
- bucket: mybucket
- mode: list
- prefix: /my/desired/
- marker: /my/desired/0023.txt
- max_keys: 472
-
-- name: Create an empty bucket
- aws_s3:
- bucket: mybucket
- mode: create
- permission: public-read
-
-- name: Create a bucket with key as directory, in the EU region
- aws_s3:
- bucket: mybucket
- object: /my/directory/path
- mode: create
- region: eu-west-1
-
-- name: Delete a bucket and all contents
- aws_s3:
- bucket: mybucket
- mode: delete
-
-- name: GET an object but don't download if the file checksums match. New in 2.0
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- dest: /usr/local/myfile.txt
- mode: get
- overwrite: different
-
-- name: Delete an object from a bucket
- aws_s3:
- bucket: mybucket
- object: /my/desired/key.txt
- mode: delobj
-'''
-
-RETURN = '''
-msg:
- description: Message indicating the status of the operation.
- returned: always
- type: str
- sample: PUT operation complete
-url:
- description: URL of the object.
- returned: (for put and geturl operations)
- type: str
- sample: https://my-bucket.s3.amazonaws.com/my-key.txt?AWSAccessKeyId=<access-key>&Expires=1506888865&Signature=<signature>
-expiry:
- description: Number of seconds the presigned url is valid for.
- returned: (for geturl operation)
- type: int
- sample: 600
-contents:
- description: Contents of the object as string.
- returned: (for getstr operation)
- type: str
- sample: "Hello, world!"
-s3_keys:
- description: List of object keys.
- returned: (for list operation)
- type: list
- elements: str
- sample:
- - prefix1/
- - prefix1/key1
- - prefix1/key2
-'''
-
-import mimetypes
-import os
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ssl import SSLError
-from ansible.module_utils.basic import to_text, to_native
-from ansible.module_utils.aws.core import AnsibleAWSModule
-from ansible.module_utils.aws.s3 import calculate_etag, HAS_MD5
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
-
-try:
- import botocore
-except ImportError:
- pass # will be detected by imported AnsibleAWSModule
-
-IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented']
-
-
-class Sigv4Required(Exception):
- pass
-
-
-def key_check(module, s3, bucket, obj, version=None, validate=True):
- exists = True
- try:
- if version:
- s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
- else:
- s3.head_object(Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- # if a client error is thrown, check if it's a 404 error
- # if it's a 404 error, then the object does not exist
- error_code = int(e.response['Error']['Code'])
- if error_code == 404:
- exists = False
- elif error_code == 403 and validate is False:
- pass
- else:
- module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj)
- return exists
-
-
-def etag_compare(module, local_file, s3, bucket, obj, version=None):
- s3_etag = get_etag(s3, bucket, obj, version=version)
- local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version)
-
- return s3_etag == local_etag
-
-
-def get_etag(s3, bucket, obj, version=None):
- if version:
- key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version)
- else:
- key_check = s3.head_object(Bucket=bucket, Key=obj)
- if not key_check:
- return None
- return key_check['ETag']
-
-
-def bucket_check(module, s3, bucket, validate=True):
- exists = True
- try:
- s3.head_bucket(Bucket=bucket)
- except botocore.exceptions.ClientError as e:
- # If a client error is thrown, then check that it was a 404 error.
- # If it was a 404 error, then the bucket does not exist.
- error_code = int(e.response['Error']['Code'])
- if error_code == 404:
- exists = False
- elif error_code == 403 and validate is False:
- pass
- else:
- module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
- except botocore.exceptions.EndpointConnectionError as e:
- module.fail_json_aws(e, msg="Invalid endpoint provided")
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket)
- return exists
-
-
-def create_bucket(module, s3, bucket, location=None):
- if module.check_mode:
- module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True)
- configuration = {}
- if location not in ('us-east-1', None):
- configuration['LocationConstraint'] = location
- try:
- if len(configuration) > 0:
- s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration)
- else:
- s3.create_bucket(Bucket=bucket)
- if module.params.get('permission'):
- # Wait for the bucket to exist before setting ACLs
- s3.get_waiter('bucket_exists').wait(Bucket=bucket)
- for acl in module.params.get('permission'):
- s3.put_bucket_acl(ACL=acl, Bucket=bucket)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
- module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
- else:
- module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).")
-
- if bucket:
- return True
-
-
-def paginated_list(s3, **pagination_params):
- pg = s3.get_paginator('list_objects_v2')
- for page in pg.paginate(**pagination_params):
- yield [data['Key'] for data in page.get('Contents', [])]
-
-
-def paginated_versioned_list_with_fallback(s3, **pagination_params):
- try:
- versioned_pg = s3.get_paginator('list_object_versions')
- for page in versioned_pg.paginate(**pagination_params):
- delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])]
- current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])]
- yield delete_markers + current_objects
- except botocore.exceptions.ClientError as e:
- if to_text(e.response['Error']['Code']) in IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']:
- for page in paginated_list(s3, **pagination_params):
- yield [{'Key': data['Key']} for data in page]
-
-
-def list_keys(module, s3, bucket, prefix, marker, max_keys):
- pagination_params = {'Bucket': bucket}
- for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)):
- pagination_params[param_name] = param_value
- try:
- keys = sum(paginated_list(s3, **pagination_params), [])
- module.exit_json(msg="LIST operation complete", s3_keys=keys)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket))
-
-
-def delete_bucket(module, s3, bucket):
- if module.check_mode:
- module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
- try:
- exists = bucket_check(module, s3, bucket)
- if exists is False:
- return False
- # if there are contents then we need to delete them before we can delete the bucket
- for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket):
- if keys:
- s3.delete_objects(Bucket=bucket, Delete={'Objects': keys})
- s3.delete_bucket(Bucket=bucket)
- return True
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket)
-
-
-def delete_key(module, s3, bucket, obj):
- if module.check_mode:
- module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True)
- try:
- s3.delete_object(Bucket=bucket, Key=obj)
- module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj)
-
-
-def create_dirkey(module, s3, bucket, obj, encrypt):
- if module.check_mode:
- module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
- try:
- params = {'Bucket': bucket, 'Key': obj, 'Body': b''}
- if encrypt:
- params['ServerSideEncryption'] = module.params['encryption_mode']
- if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
- params['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
-
- s3.put_object(**params)
- for acl in module.params.get('permission'):
- s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
- module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning")
- else:
- module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while creating object %s." % obj)
- module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), changed=True)
-
-
-def path_check(path):
- if os.path.exists(path):
- return True
- else:
- return False
-
-
-def option_in_extra_args(option):
- temp_option = option.replace('-', '').lower()
-
- allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition',
- 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage',
- 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl',
- 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP',
- 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption',
- 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey',
- 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'}
-
- if temp_option in allowed_extra_args:
- return allowed_extra_args[temp_option]
-
-
-def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
- if module.check_mode:
- module.exit_json(msg="PUT operation skipped - running in check mode", changed=True)
- try:
- extra = {}
- if encrypt:
- extra['ServerSideEncryption'] = module.params['encryption_mode']
- if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms':
- extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id']
- if metadata:
- extra['Metadata'] = {}
-
- # determine object metadata and extra arguments
- for option in metadata:
- extra_args_option = option_in_extra_args(option)
- if extra_args_option is not None:
- extra[extra_args_option] = metadata[option]
- else:
- extra['Metadata'][option] = metadata[option]
-
- if 'ContentType' not in extra:
- content_type = mimetypes.guess_type(src)[0]
- if content_type is None:
- # s3 default content type
- content_type = 'binary/octet-stream'
- extra['ContentType'] = content_type
-
- s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to complete PUT operation.")
- try:
- for acl in module.params.get('permission'):
- s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] in IGNORE_S3_DROP_IN_EXCEPTIONS:
- module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning")
- else:
- module.fail_json_aws(e, msg="Unable to set object ACL")
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Unable to set object ACL")
- try:
- url = s3.generate_presigned_url(ClientMethod='put_object',
- Params={'Bucket': bucket, 'Key': obj},
- ExpiresIn=expiry)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Unable to generate presigned URL")
- module.exit_json(msg="PUT operation complete", url=url, changed=True)
-
-
-def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
- if module.check_mode:
- module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
- # retries is the number of loops; range/xrange needs to be one
- # more to get that count of loops.
- try:
- if version:
- key = s3.get_object(Bucket=bucket, Key=obj, VersionId=version)
- else:
- key = s3.get_object(Bucket=bucket, Key=obj)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
- raise Sigv4Required()
- elif e.response['Error']['Code'] not in ("403", "404"):
- # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but
- # user does not have the s3:GetObject permission. 404 errors are handled by download_file().
- module.fail_json_aws(e, msg="Could not find the key %s." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Could not find the key %s." % obj)
-
- optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {}
- for x in range(0, retries + 1):
- try:
- s3.download_file(bucket, obj, dest, **optional_kwargs)
- module.exit_json(msg="GET operation complete", changed=True)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- # actually fail on last pass through the loop.
- if x >= retries:
- module.fail_json_aws(e, msg="Failed while downloading %s." % obj)
- # otherwise, try again, this may be a transient timeout.
- except SSLError as e: # will ClientError catch SSLError?
- # actually fail on last pass through the loop.
- if x >= retries:
- module.fail_json_aws(e, msg="s3 download failed")
- # otherwise, try again, this may be a transient timeout.
-
-
-def download_s3str(module, s3, bucket, obj, version=None, validate=True):
- if module.check_mode:
- module.exit_json(msg="GET operation skipped - running in check mode", changed=True)
- try:
- if version:
- contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read())
- else:
- contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read())
- module.exit_json(msg="GET operation complete", contents=contents, changed=True)
- except botocore.exceptions.ClientError as e:
- if e.response['Error']['Code'] == 'InvalidArgument' and 'require AWS Signature Version 4' in to_text(e):
- raise Sigv4Required()
- else:
- module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
- except botocore.exceptions.BotoCoreError as e:
- module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj)
-
-
-def get_download_url(module, s3, bucket, obj, expiry, changed=True):
- try:
- url = s3.generate_presigned_url(ClientMethod='get_object',
- Params={'Bucket': bucket, 'Key': obj},
- ExpiresIn=expiry)
- module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
- except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed while getting download url.")
-
-
-def is_fakes3(s3_url):
- """ Return True if s3_url has scheme fakes3:// """
- if s3_url is not None:
- return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
- else:
- return False
-
-
-def get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=False):
- if s3_url and rgw: # TODO - test this
- rgw = urlparse(s3_url)
- params = dict(module=module, conn_type='client', resource='s3', use_ssl=rgw.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
- elif is_fakes3(s3_url):
- fakes3 = urlparse(s3_url)
- port = fakes3.port
- if fakes3.scheme == 'fakes3s':
- protocol = "https"
- if port is None:
- port = 443
- else:
- protocol = "http"
- if port is None:
- port = 80
- params = dict(module=module, conn_type='client', resource='s3', region=location,
- endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
- use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
- else:
- params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
- if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms':
- params['config'] = botocore.client.Config(signature_version='s3v4')
- elif module.params['mode'] in ('get', 'getstr') and sig_4:
- params['config'] = botocore.client.Config(signature_version='s3v4')
- if module.params['dualstack']:
- dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True})
- if 'config' in params:
- params['config'] = params['config'].merge(dualconf)
- else:
- params['config'] = dualconf
- return boto3_conn(**params)
-
-
-def main():
- argument_spec = dict(
- bucket=dict(required=True),
- dest=dict(default=None, type='path'),
- encrypt=dict(default=True, type='bool'),
- encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'),
- expiry=dict(default=600, type='int', aliases=['expiration']),
- headers=dict(type='dict'),
- marker=dict(default=""),
- max_keys=dict(default=1000, type='int'),
- metadata=dict(type='dict'),
- mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
- object=dict(),
- permission=dict(type='list', default=['private']),
- version=dict(default=None),
- overwrite=dict(aliases=['force'], default='always'),
- prefix=dict(default=""),
- retries=dict(aliases=['retry'], type='int', default=0),
- s3_url=dict(aliases=['S3_URL']),
- dualstack=dict(default='no', type='bool'),
- rgw=dict(default='no', type='bool'),
- src=dict(),
- ignore_nonexistent_bucket=dict(default=False, type='bool'),
- encryption_kms_key_id=dict()
- )
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- required_if=[['mode', 'put', ['src', 'object']],
- ['mode', 'get', ['dest', 'object']],
- ['mode', 'getstr', ['object']],
- ['mode', 'geturl', ['object']]],
- )
-
- bucket = module.params.get('bucket')
- encrypt = module.params.get('encrypt')
- expiry = module.params.get('expiry')
- dest = module.params.get('dest', '')
- headers = module.params.get('headers')
- marker = module.params.get('marker')
- max_keys = module.params.get('max_keys')
- metadata = module.params.get('metadata')
- mode = module.params.get('mode')
- obj = module.params.get('object')
- version = module.params.get('version')
- overwrite = module.params.get('overwrite')
- prefix = module.params.get('prefix')
- retries = module.params.get('retries')
- s3_url = module.params.get('s3_url')
- dualstack = module.params.get('dualstack')
- rgw = module.params.get('rgw')
- src = module.params.get('src')
- ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket')
-
- object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"]
- bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"]
-
- if overwrite not in ['always', 'never', 'different']:
- if module.boolean(overwrite):
- overwrite = 'always'
- else:
- overwrite = 'never'
-
- if overwrite == 'different' and not HAS_MD5:
- module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support')
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
-
- if region in ('us-east-1', '', None):
- # default to US Standard region
- location = 'us-east-1'
- else:
- # Boto uses symbolic names for locations but region strings will
- # actually work fine for everything except us-east-1 (US Standard)
- location = region
-
- if module.params.get('object'):
- obj = module.params['object']
- # If there is a top level object, do nothing - if the object starts with /
- # remove the leading character to maintain compatibility with Ansible versions < 2.4
- if obj.startswith('/'):
- obj = obj[1:]
-
- # Bucket deletion does not require obj. Prevents ambiguity with delobj.
- if obj and mode == "delete":
- module.fail_json(msg='Parameter obj cannot be used with mode=delete')
-
- # allow eucarc environment variables to be used if ansible vars aren't set
- if not s3_url and 'S3_URL' in os.environ:
- s3_url = os.environ['S3_URL']
-
- if dualstack and s3_url is not None and 'amazonaws.com' not in s3_url:
- module.fail_json(msg='dualstack only applies to AWS S3')
-
- if dualstack and not module.botocore_at_least('1.4.45'):
- module.fail_json(msg='dualstack requires botocore >= 1.4.45')
-
- # rgw requires an explicit url
- if rgw and not s3_url:
- module.fail_json(msg='rgw flavour requires s3_url')
-
- # Look at s3_url and tweak connection settings
- # if connecting to RGW, Walrus or fakes3
- if s3_url:
- for key in ['validate_certs', 'security_token', 'profile_name']:
- aws_connect_kwargs.pop(key, None)
- s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url)
-
- validate = not ignore_nonexistent_bucket
-
- # separate types of ACLs
- bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl]
- object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl]
- error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl]
- if error_acl:
- module.fail_json(msg='Unknown permission specified: %s' % error_acl)
-
- # First, we check to see if the bucket exists, we get "bucket" returned.
- bucketrtn = bucket_check(module, s3, bucket, validate=validate)
-
- if validate and mode not in ('create', 'put', 'delete') and not bucketrtn:
- module.fail_json(msg="Source bucket cannot be found.")
-
- if mode == 'get':
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- if keyrtn is False:
- if version:
- module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
- else:
- module.fail_json(msg="Key %s does not exist." % obj)
-
- if path_check(dest) and overwrite != 'always':
- if overwrite == 'never':
- module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False)
- if etag_compare(module, dest, s3, bucket, obj, version=version):
- module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
-
- try:
- download_s3file(module, s3, bucket, obj, dest, retries, version=version)
- except Sigv4Required:
- s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
- download_s3file(module, s3, bucket, obj, dest, retries, version=version)
-
- if mode == 'put':
-
- # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified
- # these were separated into the variables bucket_acl and object_acl above
-
- if not path_check(src):
- module.fail_json(msg="Local object for PUT does not exist")
-
- if bucketrtn:
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- else:
- # If the bucket doesn't exist we should create it.
- # only use valid bucket acls for create_bucket function
- module.params['permission'] = bucket_acl
- create_bucket(module, s3, bucket, location)
-
- if keyrtn and overwrite != 'always':
- if overwrite == 'never' or etag_compare(module, src, s3, bucket, obj):
- # Return the download URL for the existing object
- get_download_url(module, s3, bucket, obj, expiry, changed=False)
-
- # only use valid object acls for the upload_s3file function
- module.params['permission'] = object_acl
- upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
-
- # Delete an object from a bucket, not the entire bucket
- if mode == 'delobj':
- if obj is None:
- module.fail_json(msg="object parameter is required")
- if bucket:
- deletertn = delete_key(module, s3, bucket, obj)
- if deletertn is True:
- module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True)
- else:
- module.fail_json(msg="Bucket parameter is required.")
-
- # Delete an entire bucket, including all objects in the bucket
- if mode == 'delete':
- if bucket:
- deletertn = delete_bucket(module, s3, bucket)
- if deletertn is True:
- module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True)
- else:
- module.fail_json(msg="Bucket parameter is required.")
-
- # Support for listing a set of keys
- if mode == 'list':
- exists = bucket_check(module, s3, bucket)
-
- # If the bucket does not exist then bail out
- if not exists:
- module.fail_json(msg="Target bucket (%s) cannot be found" % bucket)
-
- list_keys(module, s3, bucket, prefix, marker, max_keys)
-
- # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
- # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
- if mode == 'create':
-
- # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified
- # these were separated above into the variables bucket_acl and object_acl
-
- if bucket and not obj:
- if bucketrtn:
- module.exit_json(msg="Bucket already exists.", changed=False)
- else:
- # only use valid bucket acls when creating the bucket
- module.params['permission'] = bucket_acl
- module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
- if bucket and obj:
- if obj.endswith('/'):
- dirobj = obj
- else:
- dirobj = obj + "/"
- if bucketrtn:
- if key_check(module, s3, bucket, dirobj):
- module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False)
- else:
- # setting valid object acls for the create_dirkey function
- module.params['permission'] = object_acl
- create_dirkey(module, s3, bucket, dirobj, encrypt)
- else:
- # only use valid bucket acls for the create_bucket function
- module.params['permission'] = bucket_acl
- created = create_bucket(module, s3, bucket, location)
- # only use valid object acls for the create_dirkey function
- module.params['permission'] = object_acl
- create_dirkey(module, s3, bucket, dirobj, encrypt)
-
- # Support for grabbing the time-expired URL for an object in S3/Walrus.
- if mode == 'geturl':
- if not bucket and not obj:
- module.fail_json(msg="Bucket and Object parameters must be set")
-
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- if keyrtn:
- get_download_url(module, s3, bucket, obj, expiry)
- else:
- module.fail_json(msg="Key %s does not exist." % obj)
-
- if mode == 'getstr':
- if bucket and obj:
- keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate)
- if keyrtn:
- try:
- download_s3str(module, s3, bucket, obj, version=version)
- except Sigv4Required:
- s3 = get_s3_connection(module, aws_connect_kwargs, location, rgw, s3_url, sig_4=True)
- download_s3str(module, s3, bucket, obj, version=version)
- elif version is not None:
- module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version))
- else:
- module.fail_json(msg="Key %s does not exist." % obj)
-
- module.exit_json(failed=False)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/cloud_init_data_facts.py b/test/support/integration/plugins/modules/cloud_init_data_facts.py
deleted file mode 100644
index 4f871b99..00000000
--- a/test/support/integration/plugins/modules/cloud_init_data_facts.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# (c) 2018, René Moser <mail@renemoser.net>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: cloud_init_data_facts
-short_description: Retrieve facts of cloud-init.
-description:
- - Gathers facts by reading the status.json and result.json of cloud-init.
-version_added: 2.6
-author: René Moser (@resmo)
-options:
- filter:
- description:
- - Filter facts
- choices: [ status, result ]
-notes:
- - See http://cloudinit.readthedocs.io/ for more information about cloud-init.
-'''
-
-EXAMPLES = '''
-- name: Gather all facts of cloud init
- cloud_init_data_facts:
- register: result
-
-- debug:
- var: result
-
-- name: Wait for cloud init to finish
- cloud_init_data_facts:
- filter: status
- register: res
- until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage"
- retries: 50
- delay: 5
-'''
-
-RETURN = '''
----
-cloud_init_data_facts:
- description: Facts of result and status.
- returned: success
- type: dict
- sample: '{
- "status": {
- "v1": {
- "datasource": "DataSourceCloudStack",
- "errors": []
- },
- "result": {
- "v1": {
- "datasource": "DataSourceCloudStack",
- "init": {
- "errors": [],
- "finished": 1522066377.0185432,
- "start": 1522066375.2648022
- },
- "init-local": {
- "errors": [],
- "finished": 1522066373.70919,
- "start": 1522066373.4726632
- },
- "modules-config": {
- "errors": [],
- "finished": 1522066380.9097016,
- "start": 1522066379.0011985
- },
- "modules-final": {
- "errors": [],
- "finished": 1522066383.56594,
- "start": 1522066382.3449218
- },
- "stage": null
- }
- }'
-'''
-
-import os
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_text
-
-
-CLOUD_INIT_PATH = "/var/lib/cloud/data/"
-
-
-def gather_cloud_init_data_facts(module):
- res = {
- 'cloud_init_data_facts': dict()
- }
-
- for i in ['result', 'status']:
- filter = module.params.get('filter')
- if filter is None or filter == i:
- res['cloud_init_data_facts'][i] = dict()
- json_file = CLOUD_INIT_PATH + i + '.json'
-
- if os.path.exists(json_file):
- f = open(json_file, 'rb')
- contents = to_text(f.read(), errors='surrogate_or_strict')
- f.close()
-
- if contents:
- res['cloud_init_data_facts'][i] = module.from_json(contents)
- return res
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- filter=dict(choices=['result', 'status']),
- ),
- supports_check_mode=True,
- )
-
- facts = gather_cloud_init_data_facts(module)
- result = dict(changed=False, ansible_facts=facts, **facts)
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/deploy_helper.py b/test/support/integration/plugins/modules/deploy_helper.py
deleted file mode 100644
index 38594dde..00000000
--- a/test/support/integration/plugins/modules/deploy_helper.py
+++ /dev/null
@@ -1,521 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2014, Jasper N. Brouwer <jasper@nerdsweide.nl>
-# (c) 2014, Ramon de la Fuente <ramon@delafuente.nl>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: deploy_helper
-version_added: "2.0"
-author: "Ramon de la Fuente (@ramondelafuente)"
-short_description: Manages some of the steps common in deploying projects.
-description:
- - The Deploy Helper manages some of the steps common in deploying software.
- It creates a folder structure, manages a symlink for the current release
- and cleans up old releases.
- - "Running it with the C(state=query) or C(state=present) will return the C(deploy_helper) fact.
- C(project_path), whatever you set in the path parameter,
- C(current_path), the path to the symlink that points to the active release,
- C(releases_path), the path to the folder to keep releases in,
- C(shared_path), the path to the folder to keep shared resources in,
- C(unfinished_filename), the file to check for to recognize unfinished builds,
- C(previous_release), the release the 'current' symlink is pointing to,
- C(previous_release_path), the full path to the 'current' symlink target,
- C(new_release), either the 'release' parameter or a generated timestamp,
- C(new_release_path), the path to the new release folder (not created by the module)."
-
-options:
- path:
- required: True
- aliases: ['dest']
- description:
- - the root path of the project. Alias I(dest).
- Returned in the C(deploy_helper.project_path) fact.
-
- state:
- description:
- - the state of the project.
- C(query) will only gather facts,
- C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders,
- C(finalize) will remove the unfinished_filename file, create a symlink to the newly
- deployed release and optionally clean old releases,
- C(clean) will remove failed & old releases,
- C(absent) will remove the project folder (synonymous to the M(file) module with C(state=absent))
- choices: [ present, finalize, absent, clean, query ]
- default: present
-
- release:
- description:
- - the release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359').
- This parameter is optional during C(state=present), but needs to be set explicitly for C(state=finalize).
- You can use the generated fact C(release={{ deploy_helper.new_release }}).
-
- releases_path:
- description:
- - the name of the folder that will hold the releases. This can be relative to C(path) or absolute.
- Returned in the C(deploy_helper.releases_path) fact.
- default: releases
-
- shared_path:
- description:
- - the name of the folder that will hold the shared resources. This can be relative to C(path) or absolute.
- If this is set to an empty string, no shared folder will be created.
- Returned in the C(deploy_helper.shared_path) fact.
- default: shared
-
- current_path:
- description:
- - the name of the symlink that is created when the deploy is finalized. Used in C(finalize) and C(clean).
- Returned in the C(deploy_helper.current_path) fact.
- default: current
-
- unfinished_filename:
- description:
- - the name of the file that indicates a deploy has not finished. All folders in the releases_path that
- contain this file will be deleted on C(state=finalize) with clean=True, or C(state=clean). This file is
- automatically deleted from the I(new_release_path) during C(state=finalize).
- default: DEPLOY_UNFINISHED
-
- clean:
- description:
- - Whether to run the clean procedure in case of C(state=finalize).
- type: bool
- default: 'yes'
-
- keep_releases:
- description:
- - the number of old releases to keep when cleaning. Used in C(finalize) and C(clean). Any unfinished builds
- will be deleted first, so only correct releases will count. The current version will not count.
- default: 5
-
-notes:
- - Facts are only returned for C(state=query) and C(state=present). If you use both, you should pass any overridden
- parameters to both calls, otherwise the second call will overwrite the facts of the first one.
- - When using C(state=clean), the releases are ordered by I(creation date). You should be able to switch to a
- new naming strategy without problems.
- - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent
- unless you pass your own release name with C(release). Due to the nature of deploying software, this should not
- be much of a problem.
-'''
-
-EXAMPLES = '''
-
-# General explanation, starting with an example folder structure for a project:
-
-# root:
-# releases:
-# - 20140415234508
-# - 20140415235146
-# - 20140416082818
-#
-# shared:
-# - sessions
-# - uploads
-#
-# current: releases/20140416082818
-
-
-# The 'releases' folder holds all the available releases. A release is a complete build of the application being
-# deployed. This can be a clone of a repository for example, or a sync of a local folder on your filesystem.
-# Having timestamped folders is one way of having distinct releases, but you could choose your own strategy like
-# git tags or commit hashes.
-#
-# During a deploy, a new folder should be created in the releases folder and any build steps required should be
-# performed. Once the new build is ready, the deploy procedure is 'finalized' by replacing the 'current' symlink
-# with a link to this build.
-#
-# The 'shared' folder holds any resource that is shared between releases. Examples of this are web-server
-# session files, or files uploaded by users of your application. It's quite common to have symlinks from a release
-# folder pointing to a shared/subfolder, and creating these links would be automated as part of the build steps.
-#
-# The 'current' symlink points to one of the releases. Probably the latest one, unless a deploy is in progress.
-# The web-server's root for the project will go through this symlink, so the 'downtime' when switching to a new
-# release is reduced to the time it takes to switch the link.
-#
-# To distinguish between successful builds and unfinished ones, a file can be placed in the folder of the release
-# that is currently in progress. The existence of this file will mark it as unfinished, and allow an automated
-# procedure to remove it during cleanup.
-
-
-# Typical usage
-- name: Initialize the deploy root and gather facts
- deploy_helper:
- path: /path/to/root
-- name: Clone the project to the new release folder
- git:
- repo: git://foosball.example.org/path/to/repo.git
- dest: '{{ deploy_helper.new_release_path }}'
- version: v1.1.1
-- name: Add an unfinished file, to allow cleanup on successful finalize
- file:
- path: '{{ deploy_helper.new_release_path }}/{{ deploy_helper.unfinished_filename }}'
- state: touch
-- name: Perform some build steps, like running your dependency manager for example
- composer:
- command: install
- working_dir: '{{ deploy_helper.new_release_path }}'
-- name: Create some folders in the shared folder
- file:
- path: '{{ deploy_helper.shared_path }}/{{ item }}'
- state: directory
- with_items:
- - sessions
- - uploads
-- name: Add symlinks from the new release to the shared folder
- file:
- path: '{{ deploy_helper.new_release_path }}/{{ item.path }}'
- src: '{{ deploy_helper.shared_path }}/{{ item.src }}'
- state: link
- with_items:
- - path: app/sessions
- src: sessions
- - path: web/uploads
- src: uploads
-- name: Finalize the deploy, removing the unfinished file and switching the symlink
- deploy_helper:
- path: /path/to/root
- release: '{{ deploy_helper.new_release }}'
- state: finalize
-
-# Retrieving facts before running a deploy
-- name: Run 'state=query' to gather facts without changing anything
- deploy_helper:
- path: /path/to/root
- state: query
-# Remember to set the 'release' parameter when you actually call 'state=present' later
-- name: Initialize the deploy root
- deploy_helper:
- path: /path/to/root
- release: '{{ deploy_helper.new_release }}'
- state: present
-
-# all paths can be absolute or relative (to the 'path' parameter)
-- deploy_helper:
- path: /path/to/root
- releases_path: /var/www/project/releases
- shared_path: /var/www/shared
- current_path: /var/www/active
-
-# Using your own naming strategy for releases (a version tag in this case):
-- deploy_helper:
- path: /path/to/root
- release: v1.1.1
- state: present
-- deploy_helper:
- path: /path/to/root
- release: '{{ deploy_helper.new_release }}'
- state: finalize
-
-# Using a different unfinished_filename:
-- deploy_helper:
- path: /path/to/root
- unfinished_filename: README.md
- release: '{{ deploy_helper.new_release }}'
- state: finalize
-
-# Postponing the cleanup of older builds:
-- deploy_helper:
- path: /path/to/root
- release: '{{ deploy_helper.new_release }}'
- state: finalize
- clean: False
-- deploy_helper:
- path: /path/to/root
- state: clean
-# Or running the cleanup ahead of the new deploy
-- deploy_helper:
- path: /path/to/root
- state: clean
-- deploy_helper:
- path: /path/to/root
- state: present
-
-# Keeping more old releases:
-- deploy_helper:
- path: /path/to/root
- release: '{{ deploy_helper.new_release }}'
- state: finalize
- keep_releases: 10
-# Or, if you use 'clean=false' on finalize:
-- deploy_helper:
- path: /path/to/root
- state: clean
- keep_releases: 10
-
-# Removing the entire project root folder
-- deploy_helper:
- path: /path/to/root
- state: absent
-
-# Debugging the facts returned by the module
-- deploy_helper:
- path: /path/to/root
-- debug:
- var: deploy_helper
-'''
-import os
-import shutil
-import time
-import traceback
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-
-
-class DeployHelper(object):
-
- def __init__(self, module):
- self.module = module
- self.file_args = module.load_file_common_arguments(module.params)
-
- self.clean = module.params['clean']
- self.current_path = module.params['current_path']
- self.keep_releases = module.params['keep_releases']
- self.path = module.params['path']
- self.release = module.params['release']
- self.releases_path = module.params['releases_path']
- self.shared_path = module.params['shared_path']
- self.state = module.params['state']
- self.unfinished_filename = module.params['unfinished_filename']
-
- def gather_facts(self):
- current_path = os.path.join(self.path, self.current_path)
- releases_path = os.path.join(self.path, self.releases_path)
- if self.shared_path:
- shared_path = os.path.join(self.path, self.shared_path)
- else:
- shared_path = None
-
- previous_release, previous_release_path = self._get_last_release(current_path)
-
- if not self.release and (self.state == 'query' or self.state == 'present'):
- self.release = time.strftime("%Y%m%d%H%M%S")
-
- if self.release:
- new_release_path = os.path.join(releases_path, self.release)
- else:
- new_release_path = None
-
- return {
- 'project_path': self.path,
- 'current_path': current_path,
- 'releases_path': releases_path,
- 'shared_path': shared_path,
- 'previous_release': previous_release,
- 'previous_release_path': previous_release_path,
- 'new_release': self.release,
- 'new_release_path': new_release_path,
- 'unfinished_filename': self.unfinished_filename
- }
-
- def delete_path(self, path):
- if not os.path.lexists(path):
- return False
-
- if not os.path.isdir(path):
- self.module.fail_json(msg="%s exists but is not a directory" % path)
-
- if not self.module.check_mode:
- try:
- shutil.rmtree(path, ignore_errors=False)
- except Exception as e:
- self.module.fail_json(msg="rmtree failed: %s" % to_native(e), exception=traceback.format_exc())
-
- return True
-
- def create_path(self, path):
- changed = False
-
- if not os.path.lexists(path):
- changed = True
- if not self.module.check_mode:
- os.makedirs(path)
-
- elif not os.path.isdir(path):
- self.module.fail_json(msg="%s exists but is not a directory" % path)
-
- changed += self.module.set_directory_attributes_if_different(self._get_file_args(path), changed)
-
- return changed
-
- def check_link(self, path):
- if os.path.lexists(path):
- if not os.path.islink(path):
- self.module.fail_json(msg="%s exists but is not a symbolic link" % path)
-
- def create_link(self, source, link_name):
- changed = False
-
- if os.path.islink(link_name):
- norm_link = os.path.normpath(os.path.realpath(link_name))
- norm_source = os.path.normpath(os.path.realpath(source))
- if norm_link == norm_source:
- changed = False
- else:
- changed = True
- if not self.module.check_mode:
- if not os.path.lexists(source):
- self.module.fail_json(msg="the symlink target %s doesn't exists" % source)
- tmp_link_name = link_name + '.' + self.unfinished_filename
- if os.path.islink(tmp_link_name):
- os.unlink(tmp_link_name)
- os.symlink(source, tmp_link_name)
- os.rename(tmp_link_name, link_name)
- else:
- changed = True
- if not self.module.check_mode:
- os.symlink(source, link_name)
-
- return changed
-
- def remove_unfinished_file(self, new_release_path):
- changed = False
- unfinished_file_path = os.path.join(new_release_path, self.unfinished_filename)
- if os.path.lexists(unfinished_file_path):
- changed = True
- if not self.module.check_mode:
- os.remove(unfinished_file_path)
-
- return changed
-
- def remove_unfinished_builds(self, releases_path):
- changes = 0
-
- for release in os.listdir(releases_path):
- if os.path.isfile(os.path.join(releases_path, release, self.unfinished_filename)):
- if self.module.check_mode:
- changes += 1
- else:
- changes += self.delete_path(os.path.join(releases_path, release))
-
- return changes
-
- def remove_unfinished_link(self, path):
- changed = False
-
- tmp_link_name = os.path.join(path, self.release + '.' + self.unfinished_filename)
- if not self.module.check_mode and os.path.exists(tmp_link_name):
- changed = True
- os.remove(tmp_link_name)
-
- return changed
-
- def cleanup(self, releases_path, reserve_version):
- changes = 0
-
- if os.path.lexists(releases_path):
- releases = [f for f in os.listdir(releases_path) if os.path.isdir(os.path.join(releases_path, f))]
- try:
- releases.remove(reserve_version)
- except ValueError:
- pass
-
- if not self.module.check_mode:
- releases.sort(key=lambda x: os.path.getctime(os.path.join(releases_path, x)), reverse=True)
- for release in releases[self.keep_releases:]:
- changes += self.delete_path(os.path.join(releases_path, release))
- elif len(releases) > self.keep_releases:
- changes += (len(releases) - self.keep_releases)
-
- return changes
-
- def _get_file_args(self, path):
- file_args = self.file_args.copy()
- file_args['path'] = path
- return file_args
-
- def _get_last_release(self, current_path):
- previous_release = None
- previous_release_path = None
-
- if os.path.lexists(current_path):
- previous_release_path = os.path.realpath(current_path)
- previous_release = os.path.basename(previous_release_path)
-
- return previous_release, previous_release_path
-
-
-def main():
-
- module = AnsibleModule(
- argument_spec=dict(
- path=dict(aliases=['dest'], required=True, type='path'),
- release=dict(required=False, type='str', default=None),
- releases_path=dict(required=False, type='str', default='releases'),
- shared_path=dict(required=False, type='path', default='shared'),
- current_path=dict(required=False, type='path', default='current'),
- keep_releases=dict(required=False, type='int', default=5),
- clean=dict(required=False, type='bool', default=True),
- unfinished_filename=dict(required=False, type='str', default='DEPLOY_UNFINISHED'),
- state=dict(required=False, choices=['present', 'absent', 'clean', 'finalize', 'query'], default='present')
- ),
- add_file_common_args=True,
- supports_check_mode=True
- )
-
- deploy_helper = DeployHelper(module)
- facts = deploy_helper.gather_facts()
-
- result = {
- 'state': deploy_helper.state
- }
-
- changes = 0
-
- if deploy_helper.state == 'query':
- result['ansible_facts'] = {'deploy_helper': facts}
-
- elif deploy_helper.state == 'present':
- deploy_helper.check_link(facts['current_path'])
- changes += deploy_helper.create_path(facts['project_path'])
- changes += deploy_helper.create_path(facts['releases_path'])
- if deploy_helper.shared_path:
- changes += deploy_helper.create_path(facts['shared_path'])
-
- result['ansible_facts'] = {'deploy_helper': facts}
-
- elif deploy_helper.state == 'finalize':
- if not deploy_helper.release:
- module.fail_json(msg="'release' is a required parameter for state=finalize (try the 'deploy_helper.new_release' fact)")
- if deploy_helper.keep_releases <= 0:
- module.fail_json(msg="'keep_releases' should be at least 1")
-
- changes += deploy_helper.remove_unfinished_file(facts['new_release_path'])
- changes += deploy_helper.create_link(facts['new_release_path'], facts['current_path'])
- if deploy_helper.clean:
- changes += deploy_helper.remove_unfinished_link(facts['project_path'])
- changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
- changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
-
- elif deploy_helper.state == 'clean':
- changes += deploy_helper.remove_unfinished_link(facts['project_path'])
- changes += deploy_helper.remove_unfinished_builds(facts['releases_path'])
- changes += deploy_helper.cleanup(facts['releases_path'], facts['new_release'])
-
- elif deploy_helper.state == 'absent':
- # destroy the facts
- result['ansible_facts'] = {'deploy_helper': []}
- changes += deploy_helper.delete_path(facts['project_path'])
-
- if changes > 0:
- result['changed'] = True
- else:
- result['changed'] = False
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/ec2_ami_info.py b/test/support/integration/plugins/modules/ec2_ami_info.py
index 53c2374d..26f86946 100644
--- a/test/support/integration/plugins/modules/ec2_ami_info.py
+++ b/test/support/integration/plugins/modules/ec2_ami_info.py
@@ -269,9 +269,6 @@ def main():
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
- if module._module._name == 'ec2_ami_facts':
- module._module.deprecate("The 'ec2_ami_facts' module has been renamed to 'ec2_ami_info'",
- version='2.13', collection_name='ansible.builtin')
ec2_client = module.client('ec2')
diff --git a/test/support/integration/plugins/modules/locale_gen.py b/test/support/integration/plugins/modules/locale_gen.py
deleted file mode 100644
index 4968b834..00000000
--- a/test/support/integration/plugins/modules/locale_gen.py
+++ /dev/null
@@ -1,237 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: locale_gen
-short_description: Creates or removes locales
-description:
- - Manages locales by editing /etc/locale.gen and invoking locale-gen.
-version_added: "1.6"
-author:
-- Augustus Kling (@AugustusKling)
-options:
- name:
- description:
- - Name and encoding of the locale, such as "en_GB.UTF-8".
- required: true
- state:
- description:
- - Whether the locale shall be present.
- choices: [ absent, present ]
- default: present
-'''
-
-EXAMPLES = '''
-- name: Ensure a locale exists
- locale_gen:
- name: de_CH.UTF-8
- state: present
-'''
-
-import os
-import re
-from subprocess import Popen, PIPE, call
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-
-LOCALE_NORMALIZATION = {
- ".utf8": ".UTF-8",
- ".eucjp": ".EUC-JP",
- ".iso885915": ".ISO-8859-15",
- ".cp1251": ".CP1251",
- ".koi8r": ".KOI8-R",
- ".armscii8": ".ARMSCII-8",
- ".euckr": ".EUC-KR",
- ".gbk": ".GBK",
- ".gb18030": ".GB18030",
- ".euctw": ".EUC-TW",
-}
-
-
-# ===========================================
-# location module specific support methods.
-#
-
-def is_available(name, ubuntuMode):
- """Check if the given locale is available on the system. This is done by
- checking either :
- * if the locale is present in /etc/locales.gen
- * or if the locale is present in /usr/share/i18n/SUPPORTED"""
- if ubuntuMode:
- __regexp = r'^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
- __locales_available = '/usr/share/i18n/SUPPORTED'
- else:
- __regexp = r'^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
- __locales_available = '/etc/locale.gen'
-
- re_compiled = re.compile(__regexp)
- fd = open(__locales_available, 'r')
- for line in fd:
- result = re_compiled.match(line)
- if result and result.group('locale') == name:
- return True
- fd.close()
- return False
-
-
-def is_present(name):
- """Checks if the given locale is currently installed."""
- output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
- output = to_native(output)
- return any(fix_case(name) == fix_case(line) for line in output.splitlines())
-
-
-def fix_case(name):
- """locale -a might return the encoding in either lower or upper case.
- Passing through this function makes them uniform for comparisons."""
- for s, r in LOCALE_NORMALIZATION.items():
- name = name.replace(s, r)
- return name
-
-
-def replace_line(existing_line, new_line):
- """Replaces lines in /etc/locale.gen"""
- try:
- f = open("/etc/locale.gen", "r")
- lines = [line.replace(existing_line, new_line) for line in f]
- finally:
- f.close()
- try:
- f = open("/etc/locale.gen", "w")
- f.write("".join(lines))
- finally:
- f.close()
-
-
-def set_locale(name, enabled=True):
- """ Sets the state of the locale. Defaults to enabled. """
- search_string = r'#{0,1}\s*%s (?P<charset>.+)' % name
- if enabled:
- new_string = r'%s \g<charset>' % (name)
- else:
- new_string = r'# %s \g<charset>' % (name)
- try:
- f = open("/etc/locale.gen", "r")
- lines = [re.sub(search_string, new_string, line) for line in f]
- finally:
- f.close()
- try:
- f = open("/etc/locale.gen", "w")
- f.write("".join(lines))
- finally:
- f.close()
-
-
-def apply_change(targetState, name):
- """Create or remove locale.
-
- Keyword arguments:
- targetState -- Desired state, either present or absent.
- name -- Name including encoding such as de_CH.UTF-8.
- """
- if targetState == "present":
- # Create locale.
- set_locale(name, enabled=True)
- else:
- # Delete locale.
- set_locale(name, enabled=False)
-
- localeGenExitValue = call("locale-gen")
- if localeGenExitValue != 0:
- raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
-
-
-def apply_change_ubuntu(targetState, name):
- """Create or remove locale.
-
- Keyword arguments:
- targetState -- Desired state, either present or absent.
- name -- Name including encoding such as de_CH.UTF-8.
- """
- if targetState == "present":
- # Create locale.
- # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
- localeGenExitValue = call(["locale-gen", name])
- else:
- # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
- try:
- f = open("/var/lib/locales/supported.d/local", "r")
- content = f.readlines()
- finally:
- f.close()
- try:
- f = open("/var/lib/locales/supported.d/local", "w")
- for line in content:
- locale, charset = line.split(' ')
- if locale != name:
- f.write(line)
- finally:
- f.close()
- # Purge locales and regenerate.
- # Please provide a patch if you know how to avoid regenerating the locales to keep!
- localeGenExitValue = call(["locale-gen", "--purge"])
-
- if localeGenExitValue != 0:
- raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned " + str(localeGenExitValue))
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- ),
- supports_check_mode=True,
- )
-
- name = module.params['name']
- state = module.params['state']
-
- if not os.path.exists("/etc/locale.gen"):
- if os.path.exists("/var/lib/locales/supported.d/"):
- # Ubuntu created its own system to manage locales.
- ubuntuMode = True
- else:
- module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
- else:
- # We found the common way to manage locales.
- ubuntuMode = False
-
- if not is_available(name, ubuntuMode):
- module.fail_json(msg="The locale you've entered is not available "
- "on your system.")
-
- if is_present(name):
- prev_state = "present"
- else:
- prev_state = "absent"
- changed = (prev_state != state)
-
- if module.check_mode:
- module.exit_json(changed=changed)
- else:
- if changed:
- try:
- if ubuntuMode is False:
- apply_change(state, name)
- else:
- apply_change_ubuntu(state, name)
- except EnvironmentError as e:
- module.fail_json(msg=to_native(e), exitValue=e.errno)
-
- module.exit_json(name=name, changed=changed, msg="OK")
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/lvg.py b/test/support/integration/plugins/modules/lvg.py
deleted file mode 100644
index e2035f68..00000000
--- a/test/support/integration/plugins/modules/lvg.py
+++ /dev/null
@@ -1,295 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com>
-# Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-author:
-- Alexander Bulimov (@abulimov)
-module: lvg
-short_description: Configure LVM volume groups
-description:
- - This module creates, removes or resizes volume groups.
-version_added: "1.1"
-options:
- vg:
- description:
- - The name of the volume group.
- type: str
- required: true
- pvs:
- description:
- - List of comma-separated devices to use as physical devices in this volume group.
- - Required when creating or resizing volume group.
- - The module will take care of running pvcreate if needed.
- type: list
- pesize:
- description:
- - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector
- (where the sector size is the largest sector size of the PVs currently used in the VG),
- or at least 128KiB."
- - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte.
- type: str
- default: "4"
- pv_options:
- description:
- - Additional options to pass to C(pvcreate) when creating the volume group.
- type: str
- version_added: "2.4"
- vg_options:
- description:
- - Additional options to pass to C(vgcreate) when creating the volume group.
- type: str
- version_added: "1.6"
- state:
- description:
- - Control if the volume group exists.
- type: str
- choices: [ absent, present ]
- default: present
- force:
- description:
- - If C(yes), allows to remove volume group with logical volumes.
- type: bool
- default: no
-seealso:
-- module: filesystem
-- module: lvol
-- module: parted
-notes:
- - This module does not modify PE size for already present volume group.
-'''
-
-EXAMPLES = r'''
-- name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB
- lvg:
- vg: vg.services
- pvs: /dev/sda1
- pesize: 32
-
-- name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB
- lvg:
- vg: vg.services
- pvs: /dev/sdb
- pesize: 128K
-
-# If, for example, we already have VG vg.services on top of /dev/sdb1,
-# this VG will be extended by /dev/sdc5. Or if vg.services was created on
-# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5,
-# and then reduce by /dev/sda5.
-- name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5.
- lvg:
- vg: vg.services
- pvs: /dev/sdb1,/dev/sdc5
-
-- name: Remove a volume group with name vg.services
- lvg:
- vg: vg.services
- state: absent
-'''
-
-import itertools
-import os
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-def parse_vgs(data):
- vgs = []
- for line in data.splitlines():
- parts = line.strip().split(';')
- vgs.append({
- 'name': parts[0],
- 'pv_count': int(parts[1]),
- 'lv_count': int(parts[2]),
- })
- return vgs
-
-
-def find_mapper_device_name(module, dm_device):
- dmsetup_cmd = module.get_bin_path('dmsetup', True)
- mapper_prefix = '/dev/mapper/'
- rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
- if rc != 0:
- module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err)
- mapper_device = mapper_prefix + dm_name.rstrip()
- return mapper_device
-
-
-def parse_pvs(module, data):
- pvs = []
- dm_prefix = '/dev/dm-'
- for line in data.splitlines():
- parts = line.strip().split(';')
- if parts[0].startswith(dm_prefix):
- parts[0] = find_mapper_device_name(module, parts[0])
- pvs.append({
- 'name': parts[0],
- 'vg_name': parts[1],
- })
- return pvs
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- vg=dict(type='str', required=True),
- pvs=dict(type='list'),
- pesize=dict(type='str', default='4'),
- pv_options=dict(type='str', default=''),
- vg_options=dict(type='str', default=''),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- force=dict(type='bool', default=False),
- ),
- supports_check_mode=True,
- )
-
- vg = module.params['vg']
- state = module.params['state']
- force = module.boolean(module.params['force'])
- pesize = module.params['pesize']
- pvoptions = module.params['pv_options'].split()
- vgoptions = module.params['vg_options'].split()
-
- dev_list = []
- if module.params['pvs']:
- dev_list = list(module.params['pvs'])
- elif state == 'present':
- module.fail_json(msg="No physical volumes given.")
-
- # LVM always uses real paths not symlinks so replace symlinks with actual path
- for idx, dev in enumerate(dev_list):
- dev_list[idx] = os.path.realpath(dev)
-
- if state == 'present':
- # check given devices
- for test_dev in dev_list:
- if not os.path.exists(test_dev):
- module.fail_json(msg="Device %s not found." % test_dev)
-
- # get pv list
- pvs_cmd = module.get_bin_path('pvs', True)
- if dev_list:
- pvs_filter_pv_name = ' || '.join(
- 'pv_name = {0}'.format(x)
- for x in itertools.chain(dev_list, module.params['pvs'])
- )
- pvs_filter_vg_name = 'vg_name = {0}'.format(vg)
- pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name)
- else:
- pvs_filter = ''
- rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter))
- if rc != 0:
- module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err)
-
- # check pv for devices
- pvs = parse_pvs(module, current_pvs)
- used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg]
- if used_pvs:
- module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name']))
-
- vgs_cmd = module.get_bin_path('vgs', True)
- rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd)
-
- if rc != 0:
- module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err)
-
- changed = False
-
- vgs = parse_vgs(current_vgs)
-
- for test_vg in vgs:
- if test_vg['name'] == vg:
- this_vg = test_vg
- break
- else:
- this_vg = None
-
- if this_vg is None:
- if state == 'present':
- # create VG
- if module.check_mode:
- changed = True
- else:
- # create PV
- pvcreate_cmd = module.get_bin_path('pvcreate', True)
- for current_dev in dev_list:
- rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
- vgcreate_cmd = module.get_bin_path('vgcreate')
- rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list)
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err)
- else:
- if state == 'absent':
- if module.check_mode:
- module.exit_json(changed=True)
- else:
- if this_vg['lv_count'] == 0 or force:
- # remove VG
- vgremove_cmd = module.get_bin_path('vgremove', True)
- rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg))
- if rc == 0:
- module.exit_json(changed=True)
- else:
- module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err)
- else:
- module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg))
-
- # resize VG
- current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg]
- devs_to_remove = list(set(current_devs) - set(dev_list))
- devs_to_add = list(set(dev_list) - set(current_devs))
-
- if devs_to_add or devs_to_remove:
- if module.check_mode:
- changed = True
- else:
- if devs_to_add:
- devs_to_add_string = ' '.join(devs_to_add)
- # create PV
- pvcreate_cmd = module.get_bin_path('pvcreate', True)
- for current_dev in devs_to_add:
- rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)])
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
- # add PV to our VG
- vgextend_cmd = module.get_bin_path('vgextend', True)
- rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err)
-
- # remove some PV from our VG
- if devs_to_remove:
- devs_to_remove_string = ' '.join(devs_to_remove)
- vgreduce_cmd = module.get_bin_path('vgreduce', True)
- rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string))
- if rc == 0:
- changed = True
- else:
- module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err)
-
- module.exit_json(changed=changed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/mongodb_parameter.py b/test/support/integration/plugins/modules/mongodb_parameter.py
deleted file mode 100644
index 05de42b2..00000000
--- a/test/support/integration/plugins/modules/mongodb_parameter.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2016, Loic Blot <loic.blot@unix-experience.fr>
-# Sponsored by Infopro Digital. http://www.infopro-digital.com/
-# Sponsored by E.T.A.I. http://www.etai.fr/
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = r'''
----
-module: mongodb_parameter
-short_description: Change an administrative parameter on a MongoDB server
-description:
- - Change an administrative parameter on a MongoDB server.
-version_added: "2.1"
-options:
- login_user:
- description:
- - The MongoDB username used to authenticate with.
- type: str
- login_password:
- description:
- - The login user's password used to authenticate with.
- type: str
- login_host:
- description:
- - The host running the database.
- type: str
- default: localhost
- login_port:
- description:
- - The MongoDB port to connect to.
- default: 27017
- type: int
- login_database:
- description:
- - The database where login credentials are stored.
- type: str
- replica_set:
- description:
- - Replica set to connect to (automatically connects to primary for writes).
- type: str
- ssl:
- description:
- - Whether to use an SSL connection when connecting to the database.
- type: bool
- default: no
- param:
- description:
- - MongoDB administrative parameter to modify.
- type: str
- required: true
- value:
- description:
- - MongoDB administrative parameter value to set.
- type: str
- required: true
- param_type:
- description:
- - Define the type of parameter value.
- default: str
- type: str
- choices: [int, str]
-
-notes:
- - Requires the pymongo Python package on the remote host, version 2.4.2+.
- - This can be installed using pip or the OS package manager.
- - See also U(http://api.mongodb.org/python/current/installation.html)
-requirements: [ "pymongo" ]
-author: "Loic Blot (@nerzhul)"
-'''
-
-EXAMPLES = r'''
-- name: Set MongoDB syncdelay to 60 (this is an int)
- mongodb_parameter:
- param: syncdelay
- value: 60
- param_type: int
-'''
-
-RETURN = r'''
-before:
- description: value before modification
- returned: success
- type: str
-after:
- description: value after modification
- returned: success
- type: str
-'''
-
-import os
-import traceback
-
-try:
- from pymongo.errors import ConnectionFailure
- from pymongo.errors import OperationFailure
- from pymongo import version as PyMongoVersion
- from pymongo import MongoClient
-except ImportError:
- try: # for older PyMongo 2.2
- from pymongo import Connection as MongoClient
- except ImportError:
- pymongo_found = False
- else:
- pymongo_found = True
-else:
- pymongo_found = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.six.moves import configparser
-from ansible.module_utils._text import to_native
-
-
-# =========================================
-# MongoDB module specific support methods.
-#
-
-def load_mongocnf():
- config = configparser.RawConfigParser()
- mongocnf = os.path.expanduser('~/.mongodb.cnf')
-
- try:
- config.readfp(open(mongocnf))
- creds = dict(
- user=config.get('client', 'user'),
- password=config.get('client', 'pass')
- )
- except (configparser.NoOptionError, IOError):
- return False
-
- return creds
-
-
-# =========================================
-# Module execution.
-#
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- login_user=dict(default=None),
- login_password=dict(default=None, no_log=True),
- login_host=dict(default='localhost'),
- login_port=dict(default=27017, type='int'),
- login_database=dict(default=None),
- replica_set=dict(default=None),
- param=dict(required=True),
- value=dict(required=True),
- param_type=dict(default="str", choices=['str', 'int']),
- ssl=dict(default=False, type='bool'),
- )
- )
-
- if not pymongo_found:
- module.fail_json(msg=missing_required_lib('pymongo'))
-
- login_user = module.params['login_user']
- login_password = module.params['login_password']
- login_host = module.params['login_host']
- login_port = module.params['login_port']
- login_database = module.params['login_database']
-
- replica_set = module.params['replica_set']
- ssl = module.params['ssl']
-
- param = module.params['param']
- param_type = module.params['param_type']
- value = module.params['value']
-
- # Verify parameter is coherent with specified type
- try:
- if param_type == 'int':
- value = int(value)
- except ValueError:
- module.fail_json(msg="value '%s' is not %s" % (value, param_type))
-
- try:
- if replica_set:
- client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
- else:
- client = MongoClient(login_host, int(login_port), ssl=ssl)
-
- if login_user is None and login_password is None:
- mongocnf_creds = load_mongocnf()
- if mongocnf_creds is not False:
- login_user = mongocnf_creds['user']
- login_password = mongocnf_creds['password']
- elif login_password is None or login_user is None:
- module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
-
- if login_user is not None and login_password is not None:
- client.admin.authenticate(login_user, login_password, source=login_database)
-
- except ConnectionFailure as e:
- module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
-
- db = client.admin
-
- try:
- after_value = db.command("setParameter", **{param: value})
- except OperationFailure as e:
- module.fail_json(msg="unable to change parameter: %s" % to_native(e), exception=traceback.format_exc())
-
- if "was" not in after_value:
- module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
- else:
- module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
- after=value)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/mongodb_user.py b/test/support/integration/plugins/modules/mongodb_user.py
deleted file mode 100644
index 7a18b159..00000000
--- a/test/support/integration/plugins/modules/mongodb_user.py
+++ /dev/null
@@ -1,474 +0,0 @@
-#!/usr/bin/python
-
-# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
-# Sponsored by Four Kitchens http://fourkitchens.com.
-# (c) 2014, Epic Games, Inc.
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: mongodb_user
-short_description: Adds or removes a user from a MongoDB database
-description:
- - Adds or removes a user from a MongoDB database.
-version_added: "1.1"
-options:
- login_user:
- description:
- - The MongoDB username used to authenticate with.
- type: str
- login_password:
- description:
- - The login user's password used to authenticate with.
- type: str
- login_host:
- description:
- - The host running the database.
- default: localhost
- type: str
- login_port:
- description:
- - The MongoDB port to connect to.
- default: '27017'
- type: str
- login_database:
- version_added: "2.0"
- description:
- - The database where login credentials are stored.
- type: str
- replica_set:
- version_added: "1.6"
- description:
- - Replica set to connect to (automatically connects to primary for writes).
- type: str
- database:
- description:
- - The name of the database to add/remove the user from.
- required: true
- type: str
- aliases: [db]
- name:
- description:
- - The name of the user to add or remove.
- required: true
- aliases: [user]
- type: str
- password:
- description:
- - The password to use for the user.
- type: str
- aliases: [pass]
- ssl:
- version_added: "1.8"
- description:
- - Whether to use an SSL connection when connecting to the database.
- type: bool
- ssl_cert_reqs:
- version_added: "2.2"
- description:
- - Specifies whether a certificate is required from the other side of the connection,
- and whether it will be validated if provided.
- default: CERT_REQUIRED
- choices: [CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED]
- type: str
- roles:
- version_added: "1.3"
- type: list
- elements: raw
- description:
- - >
- The database user roles valid values could either be one or more of the following strings:
- 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
- 'dbAdminAnyDatabase'
- - "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- - "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
- state:
- description:
- - The database user state.
- default: present
- choices: [absent, present]
- type: str
- update_password:
- default: always
- choices: [always, on_create]
- version_added: "2.1"
- description:
- - C(always) will update passwords if they differ.
- - C(on_create) will only set the password for newly created users.
- type: str
-
-notes:
- - Requires the pymongo Python package on the remote host, version 2.4.2+. This
- can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
-requirements: [ "pymongo" ]
-author:
- - "Elliott Foster (@elliotttf)"
- - "Julien Thebault (@Lujeni)"
-'''
-
-EXAMPLES = '''
-- name: Create 'burgers' database user with name 'bob' and password '12345'.
- mongodb_user:
- database: burgers
- name: bob
- password: 12345
- state: present
-
-- name: Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
- mongodb_user:
- database: burgers
- name: bob
- password: 12345
- state: present
- ssl: True
-
-- name: Delete 'burgers' database user with name 'bob'.
- mongodb_user:
- database: burgers
- name: bob
- state: absent
-
-- name: Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
- mongodb_user:
- database: burgers
- name: ben
- password: 12345
- roles: read
- state: present
-
-- name: Define roles
- mongodb_user:
- database: burgers
- name: jim
- password: 12345
- roles: readWrite,dbAdmin,userAdmin
- state: present
-
-- name: Define roles
- mongodb_user:
- database: burgers
- name: joe
- password: 12345
- roles: readWriteAnyDatabase
- state: present
-
-- name: Add a user to database in a replica set, the primary server is automatically discovered and written to
- mongodb_user:
- database: burgers
- name: bob
- replica_set: belcher
- password: 12345
- roles: readWriteAnyDatabase
- state: present
-
-# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is useful for oplog access (MONGO_OPLOG_URL).
-# please notice the credentials must be added to the 'admin' database because the 'local' database is not synchronized and can't receive user credentials
-# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
-# This syntax requires mongodb 2.6+ and pymongo 2.5+
-- name: Roles as a dictionary
- mongodb_user:
- login_user: root
- login_password: root_password
- database: admin
- user: oplog_reader
- password: oplog_reader_password
- state: present
- replica_set: belcher
- roles:
- - db: local
- role: read
-
-'''
-
-RETURN = '''
-user:
- description: The name of the user to add or remove.
- returned: success
- type: str
-'''
-
-import os
-import ssl as ssl_lib
-import traceback
-from ansible.module_utils.compat.version import LooseVersion
-from operator import itemgetter
-
-try:
- from pymongo.errors import ConnectionFailure
- from pymongo.errors import OperationFailure
- from pymongo import version as PyMongoVersion
- from pymongo import MongoClient
-except ImportError:
- try: # for older PyMongo 2.2
- from pymongo import Connection as MongoClient
- except ImportError:
- pymongo_found = False
- else:
- pymongo_found = True
-else:
- pymongo_found = True
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.six import binary_type, text_type
-from ansible.module_utils.six.moves import configparser
-from ansible.module_utils._text import to_native
-
-
-# =========================================
-# MongoDB module specific support methods.
-#
-
-def check_compatibility(module, client):
- """Check the compatibility between the driver and the database.
-
- See: https://docs.mongodb.com/ecosystem/drivers/driver-compatibility-reference/#python-driver-compatibility
-
- Args:
- module: Ansible module.
- client (cursor): Mongodb cursor on admin database.
- """
- loose_srv_version = LooseVersion(client.server_info()['version'])
- loose_driver_version = LooseVersion(PyMongoVersion)
-
- if loose_srv_version >= LooseVersion('3.2') and loose_driver_version < LooseVersion('3.2'):
- module.fail_json(msg=' (Note: you must use pymongo 3.2+ with MongoDB >= 3.2)')
-
- elif loose_srv_version >= LooseVersion('3.0') and loose_driver_version <= LooseVersion('2.8'):
- module.fail_json(msg=' (Note: you must use pymongo 2.8+ with MongoDB 3.0)')
-
- elif loose_srv_version >= LooseVersion('2.6') and loose_driver_version <= LooseVersion('2.7'):
- module.fail_json(msg=' (Note: you must use pymongo 2.7+ with MongoDB 2.6)')
-
- elif LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
- module.fail_json(msg=' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)')
-
-
-def user_find(client, user, db_name):
- """Check if the user exists.
-
- Args:
- client (cursor): Mongodb cursor on admin database.
- user (str): User to check.
- db_name (str): User's database.
-
- Returns:
- dict: when user exists, False otherwise.
- """
- for mongo_user in client["admin"].system.users.find():
- if mongo_user['user'] == user:
- # NOTE: there is no 'db' field in mongo 2.4.
- if 'db' not in mongo_user:
- return mongo_user
-
- if mongo_user["db"] == db_name:
- return mongo_user
- return False
-
-
-def user_add(module, client, db_name, user, password, roles):
- # pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
- # without reproducing a lot of the logic in database.py of pymongo
- db = client[db_name]
-
- if roles is None:
- db.add_user(user, password, False)
- else:
- db.add_user(user, password, None, roles=roles)
-
-
-def user_remove(module, client, db_name, user):
- exists = user_find(client, user, db_name)
- if exists:
- if module.check_mode:
- module.exit_json(changed=True, user=user)
- db = client[db_name]
- db.remove_user(user)
- else:
- module.exit_json(changed=False, user=user)
-
-
-def load_mongocnf():
- config = configparser.RawConfigParser()
- mongocnf = os.path.expanduser('~/.mongodb.cnf')
-
- try:
- config.readfp(open(mongocnf))
- creds = dict(
- user=config.get('client', 'user'),
- password=config.get('client', 'pass')
- )
- except (configparser.NoOptionError, IOError):
- return False
-
- return creds
-
-
-def check_if_roles_changed(uinfo, roles, db_name):
- # We must be aware of users which can read the oplog on a replicaset
- # Such users must have access to the local DB, but since this DB does not store users credentials
- # and is not synchronized among replica sets, the user must be stored on the admin db
- # Therefore their structure is the following :
- # {
- # "_id" : "admin.oplog_reader",
- # "user" : "oplog_reader",
- # "db" : "admin", # <-- admin DB
- # "roles" : [
- # {
- # "role" : "read",
- # "db" : "local" # <-- local DB
- # }
- # ]
- # }
-
- def make_sure_roles_are_a_list_of_dict(roles, db_name):
- output = list()
- for role in roles:
- if isinstance(role, (binary_type, text_type)):
- new_role = {"role": role, "db": db_name}
- output.append(new_role)
- else:
- output.append(role)
- return output
-
- roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
- uinfo_roles = uinfo.get('roles', [])
-
- if sorted(roles_as_list_of_dict, key=itemgetter('db')) == sorted(uinfo_roles, key=itemgetter('db')):
- return False
- return True
-
-
-# =========================================
-# Module execution.
-#
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- login_user=dict(default=None),
- login_password=dict(default=None, no_log=True),
- login_host=dict(default='localhost'),
- login_port=dict(default='27017'),
- login_database=dict(default=None),
- replica_set=dict(default=None),
- database=dict(required=True, aliases=['db']),
- name=dict(required=True, aliases=['user']),
- password=dict(aliases=['pass'], no_log=True),
- ssl=dict(default=False, type='bool'),
- roles=dict(default=None, type='list', elements='raw'),
- state=dict(default='present', choices=['absent', 'present']),
- update_password=dict(default="always", choices=["always", "on_create"]),
- ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
- ),
- supports_check_mode=True
- )
-
- if not pymongo_found:
- module.fail_json(msg=missing_required_lib('pymongo'))
-
- login_user = module.params['login_user']
- login_password = module.params['login_password']
- login_host = module.params['login_host']
- login_port = module.params['login_port']
- login_database = module.params['login_database']
-
- replica_set = module.params['replica_set']
- db_name = module.params['database']
- user = module.params['name']
- password = module.params['password']
- ssl = module.params['ssl']
- roles = module.params['roles'] or []
- state = module.params['state']
- update_password = module.params['update_password']
-
- try:
- connection_params = {
- "host": login_host,
- "port": int(login_port),
- }
-
- if replica_set:
- connection_params["replicaset"] = replica_set
-
- if ssl:
- connection_params["ssl"] = ssl
- connection_params["ssl_cert_reqs"] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
-
- client = MongoClient(**connection_params)
-
- # NOTE: this check must be done ASAP.
- # We doesn't need to be authenticated (this ability has lost in PyMongo 3.6)
- if LooseVersion(PyMongoVersion) <= LooseVersion('3.5'):
- check_compatibility(module, client)
-
- if login_user is None and login_password is None:
- mongocnf_creds = load_mongocnf()
- if mongocnf_creds is not False:
- login_user = mongocnf_creds['user']
- login_password = mongocnf_creds['password']
- elif login_password is None or login_user is None:
- module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
-
- if login_user is not None and login_password is not None:
- client.admin.authenticate(login_user, login_password, source=login_database)
- elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
- if db_name != "admin":
- module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
- # else: this has to be the first admin user added
-
- except Exception as e:
- module.fail_json(msg='unable to connect to database: %s' % to_native(e), exception=traceback.format_exc())
-
- if state == 'present':
- if password is None and update_password == 'always':
- module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
-
- try:
- if update_password != 'always':
- uinfo = user_find(client, user, db_name)
- if uinfo:
- password = None
- if not check_if_roles_changed(uinfo, roles, db_name):
- module.exit_json(changed=False, user=user)
-
- if module.check_mode:
- module.exit_json(changed=True, user=user)
-
- user_add(module, client, db_name, user, password, roles)
- except Exception as e:
- module.fail_json(msg='Unable to add or update user: %s' % to_native(e), exception=traceback.format_exc())
- finally:
- try:
- client.close()
- except Exception:
- pass
- # Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
- # newuinfo = user_find(client, user, db_name)
- # if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
- # module.exit_json(changed=False, user=user)
-
- elif state == 'absent':
- try:
- user_remove(module, client, db_name, user)
- except Exception as e:
- module.fail_json(msg='Unable to remove user: %s' % to_native(e), exception=traceback.format_exc())
- finally:
- try:
- client.close()
- except Exception:
- pass
- module.exit_json(changed=True, user=user)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/pids.py b/test/support/integration/plugins/modules/pids.py
deleted file mode 100644
index 4cbf45a9..00000000
--- a/test/support/integration/plugins/modules/pids.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/python
-# Copyright: (c) 2019, Saranya Sridharan
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
-module: pids
-version_added: 2.8
-description: "Retrieves a list of PIDs of given process name in Ansible controller/controlled machines.Returns an empty list if no process in that name exists."
-short_description: "Retrieves process IDs list if the process is running otherwise return empty list"
-author:
- - Saranya Sridharan (@saranyasridharan)
-requirements:
- - psutil(python module)
-options:
- name:
- description: the name of the process you want to get PID for.
- required: true
- type: str
-'''
-
-EXAMPLES = '''
-# Pass the process name
-- name: Getting process IDs of the process
- pids:
- name: python
- register: pids_of_python
-
-- name: Printing the process IDs obtained
- debug:
- msg: "PIDS of python:{{pids_of_python.pids|join(',')}}"
-'''
-
-RETURN = '''
-pids:
- description: Process IDs of the given process
- returned: list of none, one, or more process IDs
- type: list
- sample: [100,200]
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-try:
- import psutil
- HAS_PSUTIL = True
-except ImportError:
- HAS_PSUTIL = False
-
-
-def compare_lower(a, b):
- if a is None or b is None:
- # this could just be "return False" but would lead to surprising behavior if both a and b are None
- return a == b
-
- return a.lower() == b.lower()
-
-
-def get_pid(name):
- pids = []
-
- for proc in psutil.process_iter(attrs=['name', 'cmdline']):
- if compare_lower(proc.info['name'], name) or \
- proc.info['cmdline'] and compare_lower(proc.info['cmdline'][0], name):
- pids.append(proc.pid)
-
- return pids
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- name=dict(required=True, type="str"),
- ),
- supports_check_mode=True,
- )
- if not HAS_PSUTIL:
- module.fail_json(msg="Missing required 'psutil' python module. Try installing it with: pip install psutil")
- name = module.params["name"]
- response = dict(pids=get_pid(name))
- module.exit_json(**response)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/postgresql_db.py b/test/support/integration/plugins/modules/postgresql_db.py
deleted file mode 100644
index 40858d99..00000000
--- a/test/support/integration/plugins/modules/postgresql_db.py
+++ /dev/null
@@ -1,657 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: postgresql_db
-short_description: Add or remove PostgreSQL databases from a remote host.
-description:
- - Add or remove PostgreSQL databases from a remote host.
-version_added: '0.6'
-options:
- name:
- description:
- - Name of the database to add or remove
- type: str
- required: true
- aliases: [ db ]
- port:
- description:
- - Database port to connect (if needed)
- type: int
- default: 5432
- aliases:
- - login_port
- owner:
- description:
- - Name of the role to set as owner of the database
- type: str
- template:
- description:
- - Template used to create the database
- type: str
- encoding:
- description:
- - Encoding of the database
- type: str
- lc_collate:
- description:
- - Collation order (LC_COLLATE) to use in the database. Must match collation order of template database unless C(template0) is used as template.
- type: str
- lc_ctype:
- description:
- - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
- is used as template.
- type: str
- session_role:
- description:
- - Switch to session_role after connecting. The specified session_role must be a role that the current login_user is a member of.
- - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
- type: str
- version_added: '2.8'
- state:
- description:
- - The database state.
- - C(present) implies that the database should be created if necessary.
- - C(absent) implies that the database should be removed if present.
- - C(dump) requires a target definition to which the database will be backed up. (Added in Ansible 2.4)
- Note that in some PostgreSQL versions of pg_dump, which is an embedded PostgreSQL utility and is used by the module,
- returns rc 0 even when errors occurred (e.g. the connection is forbidden by pg_hba.conf, etc.),
- so the module returns changed=True but the dump has not actually been done. Please, be sure that your version of
- pg_dump returns rc 1 in this case.
- - C(restore) also requires a target definition from which the database will be restored. (Added in Ansible 2.4)
- - The format of the backup will be detected based on the target name.
- - Supported compression formats for dump and restore include C(.pgc), C(.bz2), C(.gz) and C(.xz)
- - Supported formats for dump and restore include C(.sql) and C(.tar)
- type: str
- choices: [ absent, dump, present, restore ]
- default: present
- target:
- description:
- - File to back up or restore from.
- - Used when I(state) is C(dump) or C(restore).
- type: path
- version_added: '2.4'
- target_opts:
- description:
- - Further arguments for pg_dump or pg_restore.
- - Used when I(state) is C(dump) or C(restore).
- type: str
- version_added: '2.4'
- maintenance_db:
- description:
- - The value specifies the initial database (which is also called as maintenance DB) that Ansible connects to.
- type: str
- default: postgres
- version_added: '2.5'
- conn_limit:
- description:
- - Specifies the database connection limit.
- type: str
- version_added: '2.8'
- tablespace:
- description:
- - The tablespace to set for the database
- U(https://www.postgresql.org/docs/current/sql-alterdatabase.html).
- - If you want to move the database back to the default tablespace,
- explicitly set this to pg_default.
- type: path
- version_added: '2.9'
- dump_extra_args:
- description:
- - Provides additional arguments when I(state) is C(dump).
- - Cannot be used with dump-file-format-related arguments like ``--format=d``.
- type: str
- version_added: '2.10'
-seealso:
-- name: CREATE DATABASE reference
- description: Complete reference of the CREATE DATABASE command documentation.
- link: https://www.postgresql.org/docs/current/sql-createdatabase.html
-- name: DROP DATABASE reference
- description: Complete reference of the DROP DATABASE command documentation.
- link: https://www.postgresql.org/docs/current/sql-dropdatabase.html
-- name: pg_dump reference
- description: Complete reference of pg_dump documentation.
- link: https://www.postgresql.org/docs/current/app-pgdump.html
-- name: pg_restore reference
- description: Complete reference of pg_restore documentation.
- link: https://www.postgresql.org/docs/current/app-pgrestore.html
-- module: postgresql_tablespace
-- module: postgresql_info
-- module: postgresql_ping
-notes:
-- State C(dump) and C(restore) don't require I(psycopg2) since version 2.8.
-author: "Ansible Core Team"
-extends_documentation_fragment:
-- postgres
-'''
-
-EXAMPLES = r'''
-- name: Create a new database with name "acme"
- postgresql_db:
- name: acme
-
-# Note: If a template different from "template0" is specified, encoding and locale settings must match those of the template.
-- name: Create a new database with name "acme" and specific encoding and locale # settings.
- postgresql_db:
- name: acme
- encoding: UTF-8
- lc_collate: de_DE.UTF-8
- lc_ctype: de_DE.UTF-8
- template: template0
-
-# Note: Default limit for the number of concurrent connections to a specific database is "-1", which means "unlimited"
-- name: Create a new database with name "acme" which has a limit of 100 concurrent connections
- postgresql_db:
- name: acme
- conn_limit: "100"
-
-- name: Dump an existing database to a file
- postgresql_db:
- name: acme
- state: dump
- target: /tmp/acme.sql
-
-- name: Dump an existing database to a file excluding the test table
- postgresql_db:
- name: acme
- state: dump
- target: /tmp/acme.sql
- dump_extra_args: --exclude-table=test
-
-- name: Dump an existing database to a file (with compression)
- postgresql_db:
- name: acme
- state: dump
- target: /tmp/acme.sql.gz
-
-- name: Dump a single schema for an existing database
- postgresql_db:
- name: acme
- state: dump
- target: /tmp/acme.sql
- target_opts: "-n public"
-
-# Note: In the example below, if database foo exists and has another tablespace
-# the tablespace will be changed to foo. Access to the database will be locked
-# until the copying of database files is finished.
-- name: Create a new database called foo in tablespace bar
- postgresql_db:
- name: foo
- tablespace: bar
-'''
-
-RETURN = r'''
-executed_commands:
- description: List of commands which tried to run.
- returned: always
- type: list
- sample: ["CREATE DATABASE acme"]
- version_added: '2.10'
-'''
-
-
-import os
-import subprocess
-import traceback
-
-try:
- import psycopg2
- import psycopg2.extras
-except ImportError:
- HAS_PSYCOPG2 = False
-else:
- HAS_PSYCOPG2 = True
-
-import ansible.module_utils.postgres as pgutils
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.database import SQLParseError, pg_quote_identifier
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.six.moves import shlex_quote
-from ansible.module_utils._text import to_native
-
-executed_commands = []
-
-
-class NotSupportedError(Exception):
- pass
-
-# ===========================================
-# PostgreSQL module specific support methods.
-#
-
-
-def set_owner(cursor, db, owner):
- query = 'ALTER DATABASE %s OWNER TO "%s"' % (
- pg_quote_identifier(db, 'database'),
- owner)
- executed_commands.append(query)
- cursor.execute(query)
- return True
-
-
-def set_conn_limit(cursor, db, conn_limit):
- query = "ALTER DATABASE %s CONNECTION LIMIT %s" % (
- pg_quote_identifier(db, 'database'),
- conn_limit)
- executed_commands.append(query)
- cursor.execute(query)
- return True
-
-
-def get_encoding_id(cursor, encoding):
- query = "SELECT pg_char_to_encoding(%(encoding)s) AS encoding_id;"
- cursor.execute(query, {'encoding': encoding})
- return cursor.fetchone()['encoding_id']
-
-
-def get_db_info(cursor, db):
- query = """
- SELECT rolname AS owner,
- pg_encoding_to_char(encoding) AS encoding, encoding AS encoding_id,
- datcollate AS lc_collate, datctype AS lc_ctype, pg_database.datconnlimit AS conn_limit,
- spcname AS tablespace
- FROM pg_database
- JOIN pg_roles ON pg_roles.oid = pg_database.datdba
- JOIN pg_tablespace ON pg_tablespace.oid = pg_database.dattablespace
- WHERE datname = %(db)s
- """
- cursor.execute(query, {'db': db})
- return cursor.fetchone()
-
-
-def db_exists(cursor, db):
- query = "SELECT * FROM pg_database WHERE datname=%(db)s"
- cursor.execute(query, {'db': db})
- return cursor.rowcount == 1
-
-
-def db_delete(cursor, db):
- if db_exists(cursor, db):
- query = "DROP DATABASE %s" % pg_quote_identifier(db, 'database')
- executed_commands.append(query)
- cursor.execute(query)
- return True
- else:
- return False
-
-
-def db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
- params = dict(enc=encoding, collate=lc_collate, ctype=lc_ctype, conn_limit=conn_limit, tablespace=tablespace)
- if not db_exists(cursor, db):
- query_fragments = ['CREATE DATABASE %s' % pg_quote_identifier(db, 'database')]
- if owner:
- query_fragments.append('OWNER "%s"' % owner)
- if template:
- query_fragments.append('TEMPLATE %s' % pg_quote_identifier(template, 'database'))
- if encoding:
- query_fragments.append('ENCODING %(enc)s')
- if lc_collate:
- query_fragments.append('LC_COLLATE %(collate)s')
- if lc_ctype:
- query_fragments.append('LC_CTYPE %(ctype)s')
- if tablespace:
- query_fragments.append('TABLESPACE %s' % pg_quote_identifier(tablespace, 'tablespace'))
- if conn_limit:
- query_fragments.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
- query = ' '.join(query_fragments)
- executed_commands.append(cursor.mogrify(query, params))
- cursor.execute(query, params)
- return True
- else:
- db_info = get_db_info(cursor, db)
- if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
- raise NotSupportedError(
- 'Changing database encoding is not supported. '
- 'Current encoding: %s' % db_info['encoding']
- )
- elif lc_collate and lc_collate != db_info['lc_collate']:
- raise NotSupportedError(
- 'Changing LC_COLLATE is not supported. '
- 'Current LC_COLLATE: %s' % db_info['lc_collate']
- )
- elif lc_ctype and lc_ctype != db_info['lc_ctype']:
- raise NotSupportedError(
- 'Changing LC_CTYPE is not supported.'
- 'Current LC_CTYPE: %s' % db_info['lc_ctype']
- )
- else:
- changed = False
-
- if owner and owner != db_info['owner']:
- changed = set_owner(cursor, db, owner)
-
- if conn_limit and conn_limit != str(db_info['conn_limit']):
- changed = set_conn_limit(cursor, db, conn_limit)
-
- if tablespace and tablespace != db_info['tablespace']:
- changed = set_tablespace(cursor, db, tablespace)
-
- return changed
-
-
-def db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace):
- if not db_exists(cursor, db):
- return False
- else:
- db_info = get_db_info(cursor, db)
- if (encoding and get_encoding_id(cursor, encoding) != db_info['encoding_id']):
- return False
- elif lc_collate and lc_collate != db_info['lc_collate']:
- return False
- elif lc_ctype and lc_ctype != db_info['lc_ctype']:
- return False
- elif owner and owner != db_info['owner']:
- return False
- elif conn_limit and conn_limit != str(db_info['conn_limit']):
- return False
- elif tablespace and tablespace != db_info['tablespace']:
- return False
- else:
- return True
-
-
-def db_dump(module, target, target_opts="",
- db=None,
- dump_extra_args=None,
- user=None,
- password=None,
- host=None,
- port=None,
- **kw):
-
- flags = login_flags(db, host, port, user, db_prefix=False)
- cmd = module.get_bin_path('pg_dump', True)
- comp_prog_path = None
-
- if os.path.splitext(target)[-1] == '.tar':
- flags.append(' --format=t')
- elif os.path.splitext(target)[-1] == '.pgc':
- flags.append(' --format=c')
- if os.path.splitext(target)[-1] == '.gz':
- if module.get_bin_path('pigz'):
- comp_prog_path = module.get_bin_path('pigz', True)
- else:
- comp_prog_path = module.get_bin_path('gzip', True)
- elif os.path.splitext(target)[-1] == '.bz2':
- comp_prog_path = module.get_bin_path('bzip2', True)
- elif os.path.splitext(target)[-1] == '.xz':
- comp_prog_path = module.get_bin_path('xz', True)
-
- cmd += "".join(flags)
-
- if dump_extra_args:
- cmd += " {0} ".format(dump_extra_args)
-
- if target_opts:
- cmd += " {0} ".format(target_opts)
-
- if comp_prog_path:
- # Use a fifo to be notified of an error in pg_dump
- # Using shell pipe has no way to return the code of the first command
- # in a portable way.
- fifo = os.path.join(module.tmpdir, 'pg_fifo')
- os.mkfifo(fifo)
- cmd = '{1} <{3} > {2} & {0} >{3}'.format(cmd, comp_prog_path, shlex_quote(target), fifo)
- else:
- cmd = '{0} > {1}'.format(cmd, shlex_quote(target))
-
- return do_with_password(module, cmd, password)
-
-
-def db_restore(module, target, target_opts="",
- db=None,
- user=None,
- password=None,
- host=None,
- port=None,
- **kw):
-
- flags = login_flags(db, host, port, user)
- comp_prog_path = None
- cmd = module.get_bin_path('psql', True)
-
- if os.path.splitext(target)[-1] == '.sql':
- flags.append(' --file={0}'.format(target))
-
- elif os.path.splitext(target)[-1] == '.tar':
- flags.append(' --format=Tar')
- cmd = module.get_bin_path('pg_restore', True)
-
- elif os.path.splitext(target)[-1] == '.pgc':
- flags.append(' --format=Custom')
- cmd = module.get_bin_path('pg_restore', True)
-
- elif os.path.splitext(target)[-1] == '.gz':
- comp_prog_path = module.get_bin_path('zcat', True)
-
- elif os.path.splitext(target)[-1] == '.bz2':
- comp_prog_path = module.get_bin_path('bzcat', True)
-
- elif os.path.splitext(target)[-1] == '.xz':
- comp_prog_path = module.get_bin_path('xzcat', True)
-
- cmd += "".join(flags)
- if target_opts:
- cmd += " {0} ".format(target_opts)
-
- if comp_prog_path:
- env = os.environ.copy()
- if password:
- env = {"PGPASSWORD": password}
- p1 = subprocess.Popen([comp_prog_path, target], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p2 = subprocess.Popen(cmd, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
- (stdout2, stderr2) = p2.communicate()
- p1.stdout.close()
- p1.wait()
- if p1.returncode != 0:
- stderr1 = p1.stderr.read()
- return p1.returncode, '', stderr1, 'cmd: ****'
- else:
- return p2.returncode, '', stderr2, 'cmd: ****'
- else:
- cmd = '{0} < {1}'.format(cmd, shlex_quote(target))
-
- return do_with_password(module, cmd, password)
-
-
-def login_flags(db, host, port, user, db_prefix=True):
- """
- returns a list of connection argument strings each prefixed
- with a space and quoted where necessary to later be combined
- in a single shell string with `"".join(rv)`
-
- db_prefix determines if "--dbname" is prefixed to the db argument,
- since the argument was introduced in 9.3.
- """
- flags = []
- if db:
- if db_prefix:
- flags.append(' --dbname={0}'.format(shlex_quote(db)))
- else:
- flags.append(' {0}'.format(shlex_quote(db)))
- if host:
- flags.append(' --host={0}'.format(host))
- if port:
- flags.append(' --port={0}'.format(port))
- if user:
- flags.append(' --username={0}'.format(user))
- return flags
-
-
-def do_with_password(module, cmd, password):
- env = {}
- if password:
- env = {"PGPASSWORD": password}
- executed_commands.append(cmd)
- rc, stderr, stdout = module.run_command(cmd, use_unsafe_shell=True, environ_update=env)
- return rc, stderr, stdout, cmd
-
-
-def set_tablespace(cursor, db, tablespace):
- query = "ALTER DATABASE %s SET TABLESPACE %s" % (
- pg_quote_identifier(db, 'database'),
- pg_quote_identifier(tablespace, 'tablespace'))
- executed_commands.append(query)
- cursor.execute(query)
- return True
-
-# ===========================================
-# Module execution.
-#
-
-
-def main():
- argument_spec = pgutils.postgres_common_argument_spec()
- argument_spec.update(
- db=dict(type='str', required=True, aliases=['name']),
- owner=dict(type='str', default=''),
- template=dict(type='str', default=''),
- encoding=dict(type='str', default=''),
- lc_collate=dict(type='str', default=''),
- lc_ctype=dict(type='str', default=''),
- state=dict(type='str', default='present', choices=['absent', 'dump', 'present', 'restore']),
- target=dict(type='path', default=''),
- target_opts=dict(type='str', default=''),
- maintenance_db=dict(type='str', default="postgres"),
- session_role=dict(type='str'),
- conn_limit=dict(type='str', default=''),
- tablespace=dict(type='path', default=''),
- dump_extra_args=dict(type='str', default=None),
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- db = module.params["db"]
- owner = module.params["owner"]
- template = module.params["template"]
- encoding = module.params["encoding"]
- lc_collate = module.params["lc_collate"]
- lc_ctype = module.params["lc_ctype"]
- target = module.params["target"]
- target_opts = module.params["target_opts"]
- state = module.params["state"]
- changed = False
- maintenance_db = module.params['maintenance_db']
- session_role = module.params["session_role"]
- conn_limit = module.params['conn_limit']
- tablespace = module.params['tablespace']
- dump_extra_args = module.params['dump_extra_args']
-
- raw_connection = state in ("dump", "restore")
-
- if not raw_connection:
- pgutils.ensure_required_libs(module)
-
- # To use defaults values, keyword arguments must be absent, so
- # check which values are empty and don't include in the **kw
- # dictionary
- params_map = {
- "login_host": "host",
- "login_user": "user",
- "login_password": "password",
- "port": "port",
- "ssl_mode": "sslmode",
- "ca_cert": "sslrootcert"
- }
- kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
- if k in params_map and v != '' and v is not None)
-
- # If a login_unix_socket is specified, incorporate it here.
- is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
-
- if is_localhost and module.params["login_unix_socket"] != "":
- kw["host"] = module.params["login_unix_socket"]
-
- if target == "":
- target = "{0}/{1}.sql".format(os.getcwd(), db)
- target = os.path.expanduser(target)
-
- if not raw_connection:
- try:
- db_connection = psycopg2.connect(database=maintenance_db, **kw)
-
- # Enable autocommit so we can create databases
- if psycopg2.__version__ >= '2.4.2':
- db_connection.autocommit = True
- else:
- db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
- cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
-
- except TypeError as e:
- if 'sslrootcert' in e.args[0]:
- module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(to_native(e)),
- exception=traceback.format_exc())
- module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
-
- except Exception as e:
- module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
-
- if session_role:
- try:
- cursor.execute('SET ROLE "%s"' % session_role)
- except Exception as e:
- module.fail_json(msg="Could not switch role: %s" % to_native(e), exception=traceback.format_exc())
-
- try:
- if module.check_mode:
- if state == "absent":
- changed = db_exists(cursor, db)
- elif state == "present":
- changed = not db_matches(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
- module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
-
- if state == "absent":
- try:
- changed = db_delete(cursor, db)
- except SQLParseError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- elif state == "present":
- try:
- changed = db_create(cursor, db, owner, template, encoding, lc_collate, lc_ctype, conn_limit, tablespace)
- except SQLParseError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- elif state in ("dump", "restore"):
- method = state == "dump" and db_dump or db_restore
- try:
- if state == 'dump':
- rc, stdout, stderr, cmd = method(module, target, target_opts, db, dump_extra_args, **kw)
- else:
- rc, stdout, stderr, cmd = method(module, target, target_opts, db, **kw)
-
- if rc != 0:
- module.fail_json(msg=stderr, stdout=stdout, rc=rc, cmd=cmd)
- else:
- module.exit_json(changed=True, msg=stdout, stderr=stderr, rc=rc, cmd=cmd,
- executed_commands=executed_commands)
- except SQLParseError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- except NotSupportedError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- except SystemExit:
- # Avoid catching this on Python 2.4
- raise
- except Exception as e:
- module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())
-
- module.exit_json(changed=changed, db=db, executed_commands=executed_commands)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/postgresql_privs.py b/test/support/integration/plugins/modules/postgresql_privs.py
deleted file mode 100644
index ba8324dd..00000000
--- a/test/support/integration/plugins/modules/postgresql_privs.py
+++ /dev/null
@@ -1,1097 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# Copyright: (c) 2019, Tobias Birkefeld (@tcraxs) <t@craxs.de>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: postgresql_privs
-version_added: '1.2'
-short_description: Grant or revoke privileges on PostgreSQL database objects
-description:
-- Grant or revoke privileges on PostgreSQL database objects.
-- This module is basically a wrapper around most of the functionality of
- PostgreSQL's GRANT and REVOKE statements with detection of changes
- (GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
-options:
- database:
- description:
- - Name of database to connect to.
- required: yes
- type: str
- aliases:
- - db
- - login_db
- state:
- description:
- - If C(present), the specified privileges are granted, if C(absent) they are revoked.
- type: str
- default: present
- choices: [ absent, present ]
- privs:
- description:
- - Comma separated list of privileges to grant/revoke.
- type: str
- aliases:
- - priv
- type:
- description:
- - Type of database object to set privileges on.
- - The C(default_privs) choice is available starting at version 2.7.
- - The C(foreign_data_wrapper) and C(foreign_server) object types are available from Ansible version '2.8'.
- - The C(type) choice is available from Ansible version '2.10'.
- type: str
- default: table
- choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
- group, language, table, tablespace, schema, sequence, type ]
- objs:
- description:
- - Comma separated list of database objects to set privileges on.
- - If I(type) is C(table), C(partition table), C(sequence) or C(function),
- the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
- database objects of type I(type) in the schema specified via I(schema).
- (This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
- for C(function) and C(partition table) from version 2.8)
- - If I(type) is C(database), this parameter can be omitted, in which case
- privileges are set for the database specified via I(database).
- - 'If I(type) is I(function), colons (":") in object names will be
- replaced with commas (needed to specify function signatures, see examples)'
- type: str
- aliases:
- - obj
- schema:
- description:
- - Schema that contains the database objects specified via I(objs).
- - May only be provided if I(type) is C(table), C(sequence), C(function), C(type),
- or C(default_privs). Defaults to C(public) in these cases.
- - Pay attention, for embedded types when I(type=type)
- I(schema) can be C(pg_catalog) or C(information_schema) respectively.
- type: str
- roles:
- description:
- - Comma separated list of role (user/group) names to set permissions for.
- - The special value C(PUBLIC) can be provided instead to set permissions
- for the implicitly defined PUBLIC group.
- type: str
- required: yes
- aliases:
- - role
- fail_on_role:
- version_added: '2.8'
- description:
- - If C(yes), fail when target role (for whom privs need to be granted) does not exist.
- Otherwise just warn and continue.
- default: yes
- type: bool
- session_role:
- version_added: '2.8'
- description:
- - Switch to session_role after connecting.
- - The specified session_role must be a role that the current login_user is a member of.
- - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
- type: str
- target_roles:
- description:
- - A list of existing role (user/group) names to set as the
- default permissions for database objects subsequently created by them.
- - Parameter I(target_roles) is only available with C(type=default_privs).
- type: str
- version_added: '2.8'
- grant_option:
- description:
- - Whether C(role) may grant/revoke the specified privileges/group memberships to others.
- - Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
- - I(grant_option) only has an effect if I(state) is C(present).
- type: bool
- aliases:
- - admin_option
- host:
- description:
- - Database host address. If unspecified, connect via Unix socket.
- type: str
- aliases:
- - login_host
- port:
- description:
- - Database port to connect to.
- type: int
- default: 5432
- aliases:
- - login_port
- unix_socket:
- description:
- - Path to a Unix domain socket for local connections.
- type: str
- aliases:
- - login_unix_socket
- login:
- description:
- - The username to authenticate with.
- type: str
- default: postgres
- aliases:
- - login_user
- password:
- description:
- - The password to authenticate with.
- type: str
- aliases:
- - login_password
- ssl_mode:
- description:
- - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
- - Default of C(prefer) matches libpq default.
- type: str
- default: prefer
- choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
- version_added: '2.3'
- ca_cert:
- description:
- - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
- version_added: '2.3'
- type: str
- aliases:
- - ssl_rootcert
-
-notes:
-- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
- have singular alias names (I(priv), I(obj), I(role)).
-- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
- C(present) and I(grant_option) to C(no) (see examples).
-- Note that when revoking privileges from a role R, this role may still have
- access via privileges granted to any role R is a member of including C(PUBLIC).
-- Note that when revoking privileges from a role R, you do so as the user
- specified via I(login). If R has been granted the same privileges by
- another user also, R can still access database objects via these privileges.
-- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
-
-seealso:
-- module: postgresql_user
-- module: postgresql_owner
-- module: postgresql_membership
-- name: PostgreSQL privileges
- description: General information about PostgreSQL privileges.
- link: https://www.postgresql.org/docs/current/ddl-priv.html
-- name: PostgreSQL GRANT command reference
- description: Complete reference of the PostgreSQL GRANT command documentation.
- link: https://www.postgresql.org/docs/current/sql-grant.html
-- name: PostgreSQL REVOKE command reference
- description: Complete reference of the PostgreSQL REVOKE command documentation.
- link: https://www.postgresql.org/docs/current/sql-revoke.html
-
-extends_documentation_fragment:
-- postgres
-
-author:
-- Bernhard Weitzhofer (@b6d)
-- Tobias Birkefeld (@tcraxs)
-'''
-
-EXAMPLES = r'''
-# On database "library":
-# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
-# TO librarian, reader WITH GRANT OPTION
-- name: Grant privs to librarian and reader on database library
- postgresql_privs:
- database: library
- state: present
- privs: SELECT,INSERT,UPDATE
- type: table
- objs: books,authors
- schema: public
- roles: librarian,reader
- grant_option: yes
-
-- name: Same as above leveraging default values
- postgresql_privs:
- db: library
- privs: SELECT,INSERT,UPDATE
- objs: books,authors
- roles: librarian,reader
- grant_option: yes
-
-# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
-# Note that role "reader" will be *granted* INSERT privilege itself if this
-# isn't already the case (since state: present).
-- name: Revoke privs from reader
- postgresql_privs:
- db: library
- state: present
- priv: INSERT
- obj: books
- role: reader
- grant_option: no
-
-# "public" is the default schema. This also works for PostgreSQL 8.x.
-- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
- postgresql_privs:
- db: library
- state: absent
- privs: INSERT,UPDATE
- objs: ALL_IN_SCHEMA
- role: reader
-
-- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
- postgresql_privs:
- db: library
- privs: ALL
- type: schema
- objs: public,math
- role: librarian
-
-# Note the separation of arguments with colons.
-- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
- postgresql_privs:
- db: library
- privs: ALL
- type: function
- obj: add(int:int)
- schema: math
- roles: librarian,reader
-
-# Note that group role memberships apply cluster-wide and therefore are not
-# restricted to database "library" here.
-- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
- postgresql_privs:
- db: library
- type: group
- objs: librarian,reader
- roles: alice,bob
- admin_option: yes
-
-# Note that here "db: postgres" specifies the database to connect to, not the
-# database to grant privileges on (which is specified via the "objs" param)
-- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
- postgresql_privs:
- db: postgres
- privs: ALL
- type: database
- obj: library
- role: librarian
-
-# If objs is omitted for type "database", it defaults to the database
-# to which the connection is established
-- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
- postgresql_privs:
- db: library
- privs: ALL
- type: database
- role: librarian
-
-# Available since version 2.7
-# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
-# ALL_DEFAULT works only with privs=ALL
-# For specific
-- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
- postgresql_privs:
- db: library
- objs: ALL_DEFAULT
- privs: ALL
- type: default_privs
- role: librarian
- grant_option: yes
-
-# Available since version 2.7
-# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
-# ALL_DEFAULT works only with privs=ALL
-# For specific
-- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
- postgresql_privs:
- db: library
- objs: TABLES,SEQUENCES
- privs: SELECT
- type: default_privs
- role: reader
-
-- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
- postgresql_privs:
- db: library
- objs: TYPES
- privs: USAGE
- type: default_privs
- role: reader
-
-# Available since version 2.8
-- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
- postgresql_privs:
- db: test
- objs: fdw
- privs: ALL
- type: foreign_data_wrapper
- role: reader
-
-# Available since version 2.10
-- name: GRANT ALL PRIVILEGES ON TYPE customtype TO reader
- postgresql_privs:
- db: test
- objs: customtype
- privs: ALL
- type: type
- role: reader
-
-# Available since version 2.8
-- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
- postgresql_privs:
- db: test
- objs: fdw_server
- privs: ALL
- type: foreign_server
- role: reader
-
-# Available since version 2.8
-# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
-- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
- postgresql_privs:
- type: function
- state: present
- privs: EXECUTE
- roles: caller
- objs: ALL_IN_SCHEMA
- schema: common
-
-# Available since version 2.8
-# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
-# GRANT SELECT privileges for new TABLES objects created by librarian as
-# default to the role reader.
-# For specific
-- name: ALTER privs
- postgresql_privs:
- db: library
- schema: library
- objs: TABLES
- privs: SELECT
- type: default_privs
- role: reader
- target_roles: librarian
-
-# Available since version 2.8
-# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
-# REVOKE SELECT privileges for new TABLES objects created by librarian as
-# default from the role reader.
-# For specific
-- name: ALTER privs
- postgresql_privs:
- db: library
- state: absent
- schema: library
- objs: TABLES
- privs: SELECT
- type: default_privs
- role: reader
- target_roles: librarian
-
-# Available since version 2.10
-- name: Grant type privileges for pg_catalog.numeric type to alice
- postgresql_privs:
- type: type
- roles: alice
- privs: ALL
- objs: numeric
- schema: pg_catalog
- db: acme
-'''
-
-RETURN = r'''
-queries:
- description: List of executed queries.
- returned: always
- type: list
- sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
- version_added: '2.8'
-'''
-
-import traceback
-
-PSYCOPG2_IMP_ERR = None
-try:
- import psycopg2
- import psycopg2.extensions
-except ImportError:
- PSYCOPG2_IMP_ERR = traceback.format_exc()
- psycopg2 = None
-
-# import module snippets
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.database import pg_quote_identifier
-from ansible.module_utils.postgres import postgres_common_argument_spec
-from ansible.module_utils._text import to_native
-
-VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
- 'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
- 'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
-VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
- 'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
- 'FUNCTIONS': ('ALL', 'EXECUTE'),
- 'TYPES': ('ALL', 'USAGE')}
-
-executed_queries = []
-
-
-class Error(Exception):
- pass
-
-
-def role_exists(module, cursor, rolname):
- """Check user exists or not"""
- query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
- try:
- cursor.execute(query)
- return cursor.rowcount > 0
-
- except Exception as e:
- module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
-
- return False
-
-
-# We don't have functools.partial in Python < 2.5
-def partial(f, *args, **kwargs):
- """Partial function application"""
-
- def g(*g_args, **g_kwargs):
- new_kwargs = kwargs.copy()
- new_kwargs.update(g_kwargs)
- return f(*(args + g_args), **g_kwargs)
-
- g.f = f
- g.args = args
- g.kwargs = kwargs
- return g
-
-
-class Connection(object):
- """Wrapper around a psycopg2 connection with some convenience methods"""
-
- def __init__(self, params, module):
- self.database = params.database
- self.module = module
- # To use defaults values, keyword arguments must be absent, so
- # check which values are empty and don't include in the **kw
- # dictionary
- params_map = {
- "host": "host",
- "login": "user",
- "password": "password",
- "port": "port",
- "database": "database",
- "ssl_mode": "sslmode",
- "ca_cert": "sslrootcert"
- }
-
- kw = dict((params_map[k], getattr(params, k)) for k in params_map
- if getattr(params, k) != '' and getattr(params, k) is not None)
-
- # If a unix_socket is specified, incorporate it here.
- is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
- if is_localhost and params.unix_socket != "":
- kw["host"] = params.unix_socket
-
- sslrootcert = params.ca_cert
- if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
- raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
-
- self.connection = psycopg2.connect(**kw)
- self.cursor = self.connection.cursor()
-
- def commit(self):
- self.connection.commit()
-
- def rollback(self):
- self.connection.rollback()
-
- @property
- def encoding(self):
- """Connection encoding in Python-compatible form"""
- return psycopg2.extensions.encodings[self.connection.encoding]
-
- # Methods for querying database objects
-
- # PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
- # phrases in GRANT or REVOKE statements, therefore alternative methods are
- # provided here.
-
- def schema_exists(self, schema):
- query = """SELECT count(*)
- FROM pg_catalog.pg_namespace WHERE nspname = %s"""
- self.cursor.execute(query, (schema,))
- return self.cursor.fetchone()[0] > 0
-
- def get_all_tables_in_schema(self, schema):
- if not self.schema_exists(schema):
- raise Error('Schema "%s" does not exist.' % schema)
- query = """SELECT relname
- FROM pg_catalog.pg_class c
- JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
- WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
- self.cursor.execute(query, (schema,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_all_sequences_in_schema(self, schema):
- if not self.schema_exists(schema):
- raise Error('Schema "%s" does not exist.' % schema)
- query = """SELECT relname
- FROM pg_catalog.pg_class c
- JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
- WHERE nspname = %s AND relkind = 'S'"""
- self.cursor.execute(query, (schema,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_all_functions_in_schema(self, schema):
- if not self.schema_exists(schema):
- raise Error('Schema "%s" does not exist.' % schema)
- query = """SELECT p.proname, oidvectortypes(p.proargtypes)
- FROM pg_catalog.pg_proc p
- JOIN pg_namespace n ON n.oid = p.pronamespace
- WHERE nspname = %s"""
- self.cursor.execute(query, (schema,))
- return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
-
- # Methods for getting access control lists and group membership info
-
- # To determine whether anything has changed after granting/revoking
- # privileges, we compare the access control lists of the specified database
- # objects before and afterwards. Python's list/string comparison should
- # suffice for change detection, we should not actually have to parse ACLs.
- # The same should apply to group membership information.
-
- def get_table_acls(self, schema, tables):
- query = """SELECT relacl
- FROM pg_catalog.pg_class c
- JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
- WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
- ORDER BY relname"""
- self.cursor.execute(query, (schema, tables))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_sequence_acls(self, schema, sequences):
- query = """SELECT relacl
- FROM pg_catalog.pg_class c
- JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
- WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
- ORDER BY relname"""
- self.cursor.execute(query, (schema, sequences))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_function_acls(self, schema, function_signatures):
- funcnames = [f.split('(', 1)[0] for f in function_signatures]
- query = """SELECT proacl
- FROM pg_catalog.pg_proc p
- JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
- WHERE nspname = %s AND proname = ANY (%s)
- ORDER BY proname, proargtypes"""
- self.cursor.execute(query, (schema, funcnames))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_schema_acls(self, schemas):
- query = """SELECT nspacl FROM pg_catalog.pg_namespace
- WHERE nspname = ANY (%s) ORDER BY nspname"""
- self.cursor.execute(query, (schemas,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_language_acls(self, languages):
- query = """SELECT lanacl FROM pg_catalog.pg_language
- WHERE lanname = ANY (%s) ORDER BY lanname"""
- self.cursor.execute(query, (languages,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_tablespace_acls(self, tablespaces):
- query = """SELECT spcacl FROM pg_catalog.pg_tablespace
- WHERE spcname = ANY (%s) ORDER BY spcname"""
- self.cursor.execute(query, (tablespaces,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_database_acls(self, databases):
- query = """SELECT datacl FROM pg_catalog.pg_database
- WHERE datname = ANY (%s) ORDER BY datname"""
- self.cursor.execute(query, (databases,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_group_memberships(self, groups):
- query = """SELECT roleid, grantor, member, admin_option
- FROM pg_catalog.pg_auth_members am
- JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
- WHERE r.rolname = ANY(%s)
- ORDER BY roleid, grantor, member"""
- self.cursor.execute(query, (groups,))
- return self.cursor.fetchall()
-
- def get_default_privs(self, schema, *args):
- query = """SELECT defaclacl
- FROM pg_default_acl a
- JOIN pg_namespace b ON a.defaclnamespace=b.oid
- WHERE b.nspname = %s;"""
- self.cursor.execute(query, (schema,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_foreign_data_wrapper_acls(self, fdws):
- query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
- WHERE fdwname = ANY (%s) ORDER BY fdwname"""
- self.cursor.execute(query, (fdws,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_foreign_server_acls(self, fs):
- query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
- WHERE srvname = ANY (%s) ORDER BY srvname"""
- self.cursor.execute(query, (fs,))
- return [t[0] for t in self.cursor.fetchall()]
-
- def get_type_acls(self, schema, types):
- query = """SELECT t.typacl FROM pg_catalog.pg_type t
- JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
- WHERE n.nspname = %s AND t.typname = ANY (%s) ORDER BY typname"""
- self.cursor.execute(query, (schema, types))
- return [t[0] for t in self.cursor.fetchall()]
-
- # Manipulating privileges
-
- def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
- state, grant_option, schema_qualifier=None, fail_on_role=True):
- """Manipulate database object privileges.
-
- :param obj_type: Type of database object to grant/revoke
- privileges for.
- :param privs: Either a list of privileges to grant/revoke
- or None if type is "group".
- :param objs: List of database objects to grant/revoke
- privileges for.
- :param roles: Either a list of role names or "PUBLIC"
- for the implicitly defined "PUBLIC" group
- :param target_roles: List of role names to grant/revoke
- default privileges as.
- :param state: "present" to grant privileges, "absent" to revoke.
- :param grant_option: Only for state "present": If True, set
- grant/admin option. If False, revoke it.
- If None, don't change grant option.
- :param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
- "FUNCTION") must be qualified by schema.
- Ignored for other Types.
- """
- # get_status: function to get current status
- if obj_type == 'table':
- get_status = partial(self.get_table_acls, schema_qualifier)
- elif obj_type == 'sequence':
- get_status = partial(self.get_sequence_acls, schema_qualifier)
- elif obj_type == 'function':
- get_status = partial(self.get_function_acls, schema_qualifier)
- elif obj_type == 'schema':
- get_status = self.get_schema_acls
- elif obj_type == 'language':
- get_status = self.get_language_acls
- elif obj_type == 'tablespace':
- get_status = self.get_tablespace_acls
- elif obj_type == 'database':
- get_status = self.get_database_acls
- elif obj_type == 'group':
- get_status = self.get_group_memberships
- elif obj_type == 'default_privs':
- get_status = partial(self.get_default_privs, schema_qualifier)
- elif obj_type == 'foreign_data_wrapper':
- get_status = self.get_foreign_data_wrapper_acls
- elif obj_type == 'foreign_server':
- get_status = self.get_foreign_server_acls
- elif obj_type == 'type':
- get_status = partial(self.get_type_acls, schema_qualifier)
- else:
- raise Error('Unsupported database object type "%s".' % obj_type)
-
- # Return False (nothing has changed) if there are no objs to work on.
- if not objs:
- return False
-
- # obj_ids: quoted db object identifiers (sometimes schema-qualified)
- if obj_type == 'function':
- obj_ids = []
- for obj in objs:
- try:
- f, args = obj.split('(', 1)
- except Exception:
- raise Error('Illegal function signature: "%s".' % obj)
- obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
- elif obj_type in ['table', 'sequence', 'type']:
- obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
- else:
- obj_ids = ['"%s"' % o for o in objs]
-
- # set_what: SQL-fragment specifying what to set for the target roles:
- # Either group membership or privileges on objects of a certain type
- if obj_type == 'group':
- set_what = ','.join('"%s"' % i for i in obj_ids)
- elif obj_type == 'default_privs':
- # We don't want privs to be quoted here
- set_what = ','.join(privs)
- else:
- # function types are already quoted above
- if obj_type != 'function':
- obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
- # Note: obj_type has been checked against a set of string literals
- # and privs was escaped when it was parsed
- # Note: Underscores are replaced with spaces to support multi-word obj_type
- set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
- ','.join(obj_ids))
-
- # for_whom: SQL-fragment specifying for whom to set the above
- if roles == 'PUBLIC':
- for_whom = 'PUBLIC'
- else:
- for_whom = []
- for r in roles:
- if not role_exists(self.module, self.cursor, r):
- if fail_on_role:
- self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
-
- else:
- self.module.warn("Role '%s' does not exist, pass it" % r.strip())
- else:
- for_whom.append('"%s"' % r)
-
- if not for_whom:
- return False
-
- for_whom = ','.join(for_whom)
-
- # as_who:
- as_who = None
- if target_roles:
- as_who = ','.join('"%s"' % r for r in target_roles)
-
- status_before = get_status(objs)
-
- query = QueryBuilder(state) \
- .for_objtype(obj_type) \
- .with_grant_option(grant_option) \
- .for_whom(for_whom) \
- .as_who(as_who) \
- .for_schema(schema_qualifier) \
- .set_what(set_what) \
- .for_objs(objs) \
- .build()
-
- executed_queries.append(query)
- self.cursor.execute(query)
- status_after = get_status(objs)
-
- def nonesorted(e):
- # For python 3+ that can fail trying
- # to compare NoneType elements by sort method.
- if e is None:
- return ''
- return e
-
- status_before.sort(key=nonesorted)
- status_after.sort(key=nonesorted)
- return status_before != status_after
-
-
-class QueryBuilder(object):
- def __init__(self, state):
- self._grant_option = None
- self._for_whom = None
- self._as_who = None
- self._set_what = None
- self._obj_type = None
- self._state = state
- self._schema = None
- self._objs = None
- self.query = []
-
- def for_objs(self, objs):
- self._objs = objs
- return self
-
- def for_schema(self, schema):
- self._schema = schema
- return self
-
- def with_grant_option(self, option):
- self._grant_option = option
- return self
-
- def for_whom(self, who):
- self._for_whom = who
- return self
-
- def as_who(self, target_roles):
- self._as_who = target_roles
- return self
-
- def set_what(self, what):
- self._set_what = what
- return self
-
- def for_objtype(self, objtype):
- self._obj_type = objtype
- return self
-
- def build(self):
- if self._state == 'present':
- self.build_present()
- elif self._state == 'absent':
- self.build_absent()
- else:
- self.build_absent()
- return '\n'.join(self.query)
-
- def add_default_revoke(self):
- for obj in self._objs:
- if self._as_who:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
- self._schema, obj,
- self._for_whom))
- else:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
- self._for_whom))
-
- def add_grant_option(self):
- if self._grant_option:
- if self._obj_type == 'group':
- self.query[-1] += ' WITH ADMIN OPTION;'
- else:
- self.query[-1] += ' WITH GRANT OPTION;'
- else:
- self.query[-1] += ';'
- if self._obj_type == 'group':
- self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
- elif not self._obj_type == 'default_privs':
- self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
-
- def add_default_priv(self):
- for obj in self._objs:
- if self._as_who:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
- self._schema,
- self._set_what,
- obj,
- self._for_whom))
- else:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
- self._set_what,
- obj,
- self._for_whom))
- self.add_grant_option()
- if self._as_who:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
- self._schema,
- self._for_whom))
- else:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
- self.add_grant_option()
-
- def build_present(self):
- if self._obj_type == 'default_privs':
- self.add_default_revoke()
- self.add_default_priv()
- else:
- self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
- self.add_grant_option()
-
- def build_absent(self):
- if self._obj_type == 'default_privs':
- self.query = []
- for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
- if self._as_who:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
- self._schema, obj,
- self._for_whom))
- else:
- self.query.append(
- 'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
- self._for_whom))
- else:
- self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
-
-
-def main():
- argument_spec = postgres_common_argument_spec()
- argument_spec.update(
- database=dict(required=True, aliases=['db', 'login_db']),
- state=dict(default='present', choices=['present', 'absent']),
- privs=dict(required=False, aliases=['priv']),
- type=dict(default='table',
- choices=['table',
- 'sequence',
- 'function',
- 'database',
- 'schema',
- 'language',
- 'tablespace',
- 'group',
- 'default_privs',
- 'foreign_data_wrapper',
- 'foreign_server',
- 'type', ]),
- objs=dict(required=False, aliases=['obj']),
- schema=dict(required=False),
- roles=dict(required=True, aliases=['role']),
- session_role=dict(required=False),
- target_roles=dict(required=False),
- grant_option=dict(required=False, type='bool',
- aliases=['admin_option']),
- host=dict(default='', aliases=['login_host']),
- unix_socket=dict(default='', aliases=['login_unix_socket']),
- login=dict(default='postgres', aliases=['login_user']),
- password=dict(default='', aliases=['login_password'], no_log=True),
- fail_on_role=dict(type='bool', default=True),
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- fail_on_role = module.params['fail_on_role']
-
- # Create type object as namespace for module params
- p = type('Params', (), module.params)
- # param "schema": default, allowed depends on param "type"
- if p.type in ['table', 'sequence', 'function', 'type', 'default_privs']:
- p.schema = p.schema or 'public'
- elif p.schema:
- module.fail_json(msg='Argument "schema" is not allowed '
- 'for type "%s".' % p.type)
-
- # param "objs": default, required depends on param "type"
- if p.type == 'database':
- p.objs = p.objs or p.database
- elif not p.objs:
- module.fail_json(msg='Argument "objs" is required '
- 'for type "%s".' % p.type)
-
- # param "privs": allowed, required depends on param "type"
- if p.type == 'group':
- if p.privs:
- module.fail_json(msg='Argument "privs" is not allowed '
- 'for type "group".')
- elif not p.privs:
- module.fail_json(msg='Argument "privs" is required '
- 'for type "%s".' % p.type)
-
- # Connect to Database
- if not psycopg2:
- module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
- try:
- conn = Connection(p, module)
- except psycopg2.Error as e:
- module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
- except TypeError as e:
- if 'sslrootcert' in e.args[0]:
- module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
- module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
- except ValueError as e:
- # We raise this when the psycopg library is too old
- module.fail_json(msg=to_native(e))
-
- if p.session_role:
- try:
- conn.cursor.execute('SET ROLE "%s"' % p.session_role)
- except Exception as e:
- module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
-
- try:
- # privs
- if p.privs:
- privs = frozenset(pr.upper() for pr in p.privs.split(','))
- if not privs.issubset(VALID_PRIVS):
- module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
- else:
- privs = None
- # objs:
- if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
- objs = conn.get_all_tables_in_schema(p.schema)
- elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
- objs = conn.get_all_sequences_in_schema(p.schema)
- elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
- objs = conn.get_all_functions_in_schema(p.schema)
- elif p.type == 'default_privs':
- if p.objs == 'ALL_DEFAULT':
- objs = frozenset(VALID_DEFAULT_OBJS.keys())
- else:
- objs = frozenset(obj.upper() for obj in p.objs.split(','))
- if not objs.issubset(VALID_DEFAULT_OBJS):
- module.fail_json(
- msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
- # Again, do we have valid privs specified for object type:
- valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
- if not valid_objects_for_priv == objs:
- module.fail_json(
- msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
- valid_objects_for_priv, objs))
- else:
- objs = p.objs.split(',')
-
- # function signatures are encoded using ':' to separate args
- if p.type == 'function':
- objs = [obj.replace(':', ',') for obj in objs]
-
- # roles
- if p.roles == 'PUBLIC':
- roles = 'PUBLIC'
- else:
- roles = p.roles.split(',')
-
- if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
- module.exit_json(changed=False)
-
- if fail_on_role:
- module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
-
- else:
- module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
-
- # check if target_roles is set with type: default_privs
- if p.target_roles and not p.type == 'default_privs':
- module.warn('"target_roles" will be ignored '
- 'Argument "type: default_privs" is required for usage of "target_roles".')
-
- # target roles
- if p.target_roles:
- target_roles = p.target_roles.split(',')
- else:
- target_roles = None
-
- changed = conn.manipulate_privs(
- obj_type=p.type,
- privs=privs,
- objs=objs,
- roles=roles,
- target_roles=target_roles,
- state=p.state,
- grant_option=p.grant_option,
- schema_qualifier=p.schema,
- fail_on_role=fail_on_role,
- )
-
- except Error as e:
- conn.rollback()
- module.fail_json(msg=e.message, exception=traceback.format_exc())
-
- except psycopg2.Error as e:
- conn.rollback()
- module.fail_json(msg=to_native(e.message))
-
- if module.check_mode:
- conn.rollback()
- else:
- conn.commit()
- module.exit_json(changed=changed, queries=executed_queries)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/postgresql_query.py b/test/support/integration/plugins/modules/postgresql_query.py
deleted file mode 100644
index 18d63e33..00000000
--- a/test/support/integration/plugins/modules/postgresql_query.py
+++ /dev/null
@@ -1,364 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Felix Archambault
-# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'supported_by': 'community',
- 'status': ['preview']
-}
-
-DOCUMENTATION = r'''
----
-module: postgresql_query
-short_description: Run PostgreSQL queries
-description:
-- Runs arbitrary PostgreSQL queries.
-- Can run queries from SQL script files.
-- Does not run against backup files. Use M(postgresql_db) with I(state=restore)
- to run queries on files made by pg_dump/pg_dumpall utilities.
-version_added: '2.8'
-options:
- query:
- description:
- - SQL query to run. Variables can be escaped with psycopg2 syntax
- U(http://initd.org/psycopg/docs/usage.html).
- type: str
- positional_args:
- description:
- - List of values to be passed as positional arguments to the query.
- When the value is a list, it will be converted to PostgreSQL array.
- - Mutually exclusive with I(named_args).
- type: list
- elements: raw
- named_args:
- description:
- - Dictionary of key-value arguments to pass to the query.
- When the value is a list, it will be converted to PostgreSQL array.
- - Mutually exclusive with I(positional_args).
- type: dict
- path_to_script:
- description:
- - Path to SQL script on the remote host.
- - Returns result of the last query in the script.
- - Mutually exclusive with I(query).
- type: path
- session_role:
- description:
- - Switch to session_role after connecting. The specified session_role must
- be a role that the current login_user is a member of.
- - Permissions checking for SQL commands is carried out as though
- the session_role were the one that had logged in originally.
- type: str
- db:
- description:
- - Name of database to connect to and run queries against.
- type: str
- aliases:
- - login_db
- autocommit:
- description:
- - Execute in autocommit mode when the query can't be run inside a transaction block
- (e.g., VACUUM).
- - Mutually exclusive with I(check_mode).
- type: bool
- default: no
- version_added: '2.9'
- encoding:
- description:
- - Set the client encoding for the current session (e.g. C(UTF-8)).
- - The default is the encoding defined by the database.
- type: str
- version_added: '2.10'
-seealso:
-- module: postgresql_db
-author:
-- Felix Archambault (@archf)
-- Andrew Klychkov (@Andersson007)
-- Will Rouesnel (@wrouesnel)
-extends_documentation_fragment: postgres
-'''
-
-EXAMPLES = r'''
-- name: Simple select query to acme db
- postgresql_query:
- db: acme
- query: SELECT version()
-
-- name: Select query to db acme with positional arguments and non-default credentials
- postgresql_query:
- db: acme
- login_user: django
- login_password: mysecretpass
- query: SELECT * FROM acme WHERE id = %s AND story = %s
- positional_args:
- - 1
- - test
-
-- name: Select query to test_db with named_args
- postgresql_query:
- db: test_db
- query: SELECT * FROM test WHERE id = %(id_val)s AND story = %(story_val)s
- named_args:
- id_val: 1
- story_val: test
-
-- name: Insert query to test_table in db test_db
- postgresql_query:
- db: test_db
- query: INSERT INTO test_table (id, story) VALUES (2, 'my_long_story')
-
-- name: Run queries from SQL script using UTF-8 client encoding for session
- postgresql_query:
- db: test_db
- path_to_script: /var/lib/pgsql/test.sql
- positional_args:
- - 1
- encoding: UTF-8
-
-- name: Example of using autocommit parameter
- postgresql_query:
- db: test_db
- query: VACUUM
- autocommit: yes
-
-- name: >
- Insert data to the column of array type using positional_args.
- Note that we use quotes here, the same as for passing JSON, etc.
- postgresql_query:
- query: INSERT INTO test_table (array_column) VALUES (%s)
- positional_args:
- - '{1,2,3}'
-
-# Pass list and string vars as positional_args
-- name: Set vars
- set_fact:
- my_list:
- - 1
- - 2
- - 3
- my_arr: '{1, 2, 3}'
-
-- name: Select from test table by passing positional_args as arrays
- postgresql_query:
- query: SELECT * FROM test_array_table WHERE arr_col1 = %s AND arr_col2 = %s
- positional_args:
- - '{{ my_list }}'
- - '{{ my_arr|string }}'
-'''
-
-RETURN = r'''
-query:
- description: Query that was tried to be executed.
- returned: always
- type: str
- sample: 'SELECT * FROM bar'
-statusmessage:
- description: Attribute containing the message returned by the command.
- returned: always
- type: str
- sample: 'INSERT 0 1'
-query_result:
- description:
- - List of dictionaries in column:value form representing returned rows.
- returned: changed
- type: list
- sample: [{"Column": "Value1"},{"Column": "Value2"}]
-rowcount:
- description: Number of affected rows.
- returned: changed
- type: int
- sample: 5
-'''
-
-try:
- from psycopg2 import ProgrammingError as Psycopg2ProgrammingError
- from psycopg2.extras import DictCursor
-except ImportError:
- # it is needed for checking 'no result to fetch' in main(),
- # psycopg2 availability will be checked by connect_to_db() into
- # ansible.module_utils.postgres
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.postgres import (
- connect_to_db,
- get_conn_params,
- postgres_common_argument_spec,
-)
-from ansible.module_utils._text import to_native
-from ansible.module_utils.six import iteritems
-
-
-# ===========================================
-# Module execution.
-#
-
-def list_to_pg_array(elem):
- """Convert the passed list to PostgreSQL array
- represented as a string.
-
- Args:
- elem (list): List that needs to be converted.
-
- Returns:
- elem (str): String representation of PostgreSQL array.
- """
- elem = str(elem).strip('[]')
- elem = '{' + elem + '}'
- return elem
-
-
-def convert_elements_to_pg_arrays(obj):
- """Convert list elements of the passed object
- to PostgreSQL arrays represented as strings.
-
- Args:
- obj (dict or list): Object whose elements need to be converted.
-
- Returns:
- obj (dict or list): Object with converted elements.
- """
- if isinstance(obj, dict):
- for (key, elem) in iteritems(obj):
- if isinstance(elem, list):
- obj[key] = list_to_pg_array(elem)
-
- elif isinstance(obj, list):
- for i, elem in enumerate(obj):
- if isinstance(elem, list):
- obj[i] = list_to_pg_array(elem)
-
- return obj
-
-
-def main():
- argument_spec = postgres_common_argument_spec()
- argument_spec.update(
- query=dict(type='str'),
- db=dict(type='str', aliases=['login_db']),
- positional_args=dict(type='list', elements='raw'),
- named_args=dict(type='dict'),
- session_role=dict(type='str'),
- path_to_script=dict(type='path'),
- autocommit=dict(type='bool', default=False),
- encoding=dict(type='str'),
- )
-
- module = AnsibleModule(
- argument_spec=argument_spec,
- mutually_exclusive=(('positional_args', 'named_args'),),
- supports_check_mode=True,
- )
-
- query = module.params["query"]
- positional_args = module.params["positional_args"]
- named_args = module.params["named_args"]
- path_to_script = module.params["path_to_script"]
- autocommit = module.params["autocommit"]
- encoding = module.params["encoding"]
-
- if autocommit and module.check_mode:
- module.fail_json(msg="Using autocommit is mutually exclusive with check_mode")
-
- if path_to_script and query:
- module.fail_json(msg="path_to_script is mutually exclusive with query")
-
- if positional_args:
- positional_args = convert_elements_to_pg_arrays(positional_args)
-
- elif named_args:
- named_args = convert_elements_to_pg_arrays(named_args)
-
- if path_to_script:
- try:
- with open(path_to_script, 'rb') as f:
- query = to_native(f.read())
- except Exception as e:
- module.fail_json(msg="Cannot read file '%s' : %s" % (path_to_script, to_native(e)))
-
- conn_params = get_conn_params(module, module.params)
- db_connection = connect_to_db(module, conn_params, autocommit=autocommit)
- if encoding is not None:
- db_connection.set_client_encoding(encoding)
- cursor = db_connection.cursor(cursor_factory=DictCursor)
-
- # Prepare args:
- if module.params.get("positional_args"):
- arguments = module.params["positional_args"]
- elif module.params.get("named_args"):
- arguments = module.params["named_args"]
- else:
- arguments = None
-
- # Set defaults:
- changed = False
-
- # Execute query:
- try:
- cursor.execute(query, arguments)
- except Exception as e:
- if not autocommit:
- db_connection.rollback()
-
- cursor.close()
- db_connection.close()
- module.fail_json(msg="Cannot execute SQL '%s' %s: %s" % (query, arguments, to_native(e)))
-
- statusmessage = cursor.statusmessage
- rowcount = cursor.rowcount
-
- try:
- query_result = [dict(row) for row in cursor.fetchall()]
- except Psycopg2ProgrammingError as e:
- if to_native(e) == 'no results to fetch':
- query_result = {}
-
- except Exception as e:
- module.fail_json(msg="Cannot fetch rows from cursor: %s" % to_native(e))
-
- if 'SELECT' not in statusmessage:
- if 'UPDATE' in statusmessage or 'INSERT' in statusmessage or 'DELETE' in statusmessage:
- s = statusmessage.split()
- if len(s) == 3:
- if statusmessage.split()[2] != '0':
- changed = True
-
- elif len(s) == 2:
- if statusmessage.split()[1] != '0':
- changed = True
-
- else:
- changed = True
-
- else:
- changed = True
-
- if module.check_mode:
- db_connection.rollback()
- else:
- if not autocommit:
- db_connection.commit()
-
- kw = dict(
- changed=changed,
- query=cursor.query,
- statusmessage=statusmessage,
- query_result=query_result,
- rowcount=rowcount if rowcount >= 0 else 0,
- )
-
- cursor.close()
- db_connection.close()
-
- module.exit_json(**kw)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/postgresql_set.py b/test/support/integration/plugins/modules/postgresql_set.py
deleted file mode 100644
index cfbdae64..00000000
--- a/test/support/integration/plugins/modules/postgresql_set.py
+++ /dev/null
@@ -1,434 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = r'''
----
-module: postgresql_set
-short_description: Change a PostgreSQL server configuration parameter
-description:
- - Allows to change a PostgreSQL server configuration parameter.
- - The module uses ALTER SYSTEM command and applies changes by reload server configuration.
- - ALTER SYSTEM is used for changing server configuration parameters across the entire database cluster.
- - It can be more convenient and safe than the traditional method of manually editing the postgresql.conf file.
- - ALTER SYSTEM writes the given parameter setting to the $PGDATA/postgresql.auto.conf file,
- which is read in addition to postgresql.conf.
- - The module allows to reset parameter to boot_val (cluster initial value) by I(reset=yes) or remove parameter
- string from postgresql.auto.conf and reload I(value=default) (for settings with postmaster context restart is required).
- - After change you can see in the ansible output the previous and
- the new parameter value and other information using returned values and M(debug) module.
-version_added: '2.8'
-options:
- name:
- description:
- - Name of PostgreSQL server parameter.
- type: str
- required: true
- value:
- description:
- - Parameter value to set.
- - To remove parameter string from postgresql.auto.conf and
- reload the server configuration you must pass I(value=default).
- With I(value=default) the playbook always returns changed is true.
- type: str
- reset:
- description:
- - Restore parameter to initial state (boot_val). Mutually exclusive with I(value).
- type: bool
- default: false
- session_role:
- description:
- - Switch to session_role after connecting. The specified session_role must
- be a role that the current login_user is a member of.
- - Permissions checking for SQL commands is carried out as though
- the session_role were the one that had logged in originally.
- type: str
- db:
- description:
- - Name of database to connect.
- type: str
- aliases:
- - login_db
-notes:
-- Supported version of PostgreSQL is 9.4 and later.
-- Pay attention, change setting with 'postmaster' context can return changed is true
- when actually nothing changes because the same value may be presented in
- several different form, for example, 1024MB, 1GB, etc. However in pg_settings
- system view it can be defined like 131072 number of 8kB pages.
- The final check of the parameter value cannot compare it because the server was
- not restarted and the value in pg_settings is not updated yet.
-- For some parameters restart of PostgreSQL server is required.
- See official documentation U(https://www.postgresql.org/docs/current/view-pg-settings.html).
-seealso:
-- module: postgresql_info
-- name: PostgreSQL server configuration
- description: General information about PostgreSQL server configuration.
- link: https://www.postgresql.org/docs/current/runtime-config.html
-- name: PostgreSQL view pg_settings reference
- description: Complete reference of the pg_settings view documentation.
- link: https://www.postgresql.org/docs/current/view-pg-settings.html
-- name: PostgreSQL ALTER SYSTEM command reference
- description: Complete reference of the ALTER SYSTEM command documentation.
- link: https://www.postgresql.org/docs/current/sql-altersystem.html
-author:
-- Andrew Klychkov (@Andersson007)
-extends_documentation_fragment: postgres
-'''
-
-EXAMPLES = r'''
-- name: Restore wal_keep_segments parameter to initial state
- postgresql_set:
- name: wal_keep_segments
- reset: yes
-
-# Set work_mem parameter to 32MB and show what's been changed and restart is required or not
-# (output example: "msg": "work_mem 4MB >> 64MB restart_req: False")
-- name: Set work mem parameter
- postgresql_set:
- name: work_mem
- value: 32mb
- register: set
-
-- debug:
- msg: "{{ set.name }} {{ set.prev_val_pretty }} >> {{ set.value_pretty }} restart_req: {{ set.restart_required }}"
- when: set.changed
-# Ensure that the restart of PostgreSQL server must be required for some parameters.
-# In this situation you see the same parameter in prev_val and value_prettyue, but 'changed=True'
-# (If you passed the value that was different from the current server setting).
-
-- name: Set log_min_duration_statement parameter to 1 second
- postgresql_set:
- name: log_min_duration_statement
- value: 1s
-
-- name: Set wal_log_hints parameter to default value (remove parameter from postgresql.auto.conf)
- postgresql_set:
- name: wal_log_hints
- value: default
-'''
-
-RETURN = r'''
-name:
- description: Name of PostgreSQL server parameter.
- returned: always
- type: str
- sample: 'shared_buffers'
-restart_required:
- description: Information about parameter current state.
- returned: always
- type: bool
- sample: true
-prev_val_pretty:
- description: Information about previous state of the parameter.
- returned: always
- type: str
- sample: '4MB'
-value_pretty:
- description: Information about current state of the parameter.
- returned: always
- type: str
- sample: '64MB'
-value:
- description:
- - Dictionary that contains the current parameter value (at the time of playbook finish).
- - Pay attention that for real change some parameters restart of PostgreSQL server is required.
- - Returns the current value in the check mode.
- returned: always
- type: dict
- sample: { "value": 67108864, "unit": "b" }
-context:
- description:
- - PostgreSQL setting context.
- returned: always
- type: str
- sample: user
-'''
-
-try:
- from psycopg2.extras import DictCursor
-except Exception:
- # psycopg2 is checked by connect_to_db()
- # from ansible.module_utils.postgres
- pass
-
-from copy import deepcopy
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.postgres import (
- connect_to_db,
- get_conn_params,
- postgres_common_argument_spec,
-)
-from ansible.module_utils._text import to_native
-
-PG_REQ_VER = 90400
-
-# To allow to set value like 1mb instead of 1MB, etc:
-POSSIBLE_SIZE_UNITS = ("mb", "gb", "tb")
-
-# ===========================================
-# PostgreSQL module specific support methods.
-#
-
-
-def param_get(cursor, module, name):
- query = ("SELECT name, setting, unit, context, boot_val "
- "FROM pg_settings WHERE name = %(name)s")
- try:
- cursor.execute(query, {'name': name})
- info = cursor.fetchall()
- cursor.execute("SHOW %s" % name)
- val = cursor.fetchone()
-
- except Exception as e:
- module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
-
- raw_val = info[0][1]
- unit = info[0][2]
- context = info[0][3]
- boot_val = info[0][4]
-
- if val[0] == 'True':
- val[0] = 'on'
- elif val[0] == 'False':
- val[0] = 'off'
-
- if unit == 'kB':
- if int(raw_val) > 0:
- raw_val = int(raw_val) * 1024
- if int(boot_val) > 0:
- boot_val = int(boot_val) * 1024
-
- unit = 'b'
-
- elif unit == 'MB':
- if int(raw_val) > 0:
- raw_val = int(raw_val) * 1024 * 1024
- if int(boot_val) > 0:
- boot_val = int(boot_val) * 1024 * 1024
-
- unit = 'b'
-
- return (val[0], raw_val, unit, boot_val, context)
-
-
-def pretty_to_bytes(pretty_val):
- # The function returns a value in bytes
- # if the value contains 'B', 'kB', 'MB', 'GB', 'TB'.
- # Otherwise it returns the passed argument.
-
- val_in_bytes = None
-
- if 'kB' in pretty_val:
- num_part = int(''.join(d for d in pretty_val if d.isdigit()))
- val_in_bytes = num_part * 1024
-
- elif 'MB' in pretty_val.upper():
- num_part = int(''.join(d for d in pretty_val if d.isdigit()))
- val_in_bytes = num_part * 1024 * 1024
-
- elif 'GB' in pretty_val.upper():
- num_part = int(''.join(d for d in pretty_val if d.isdigit()))
- val_in_bytes = num_part * 1024 * 1024 * 1024
-
- elif 'TB' in pretty_val.upper():
- num_part = int(''.join(d for d in pretty_val if d.isdigit()))
- val_in_bytes = num_part * 1024 * 1024 * 1024 * 1024
-
- elif 'B' in pretty_val.upper():
- num_part = int(''.join(d for d in pretty_val if d.isdigit()))
- val_in_bytes = num_part
-
- else:
- return pretty_val
-
- return val_in_bytes
-
-
-def param_set(cursor, module, name, value, context):
- try:
- if str(value).lower() == 'default':
- query = "ALTER SYSTEM SET %s = DEFAULT" % name
- else:
- query = "ALTER SYSTEM SET %s = '%s'" % (name, value)
- cursor.execute(query)
-
- if context != 'postmaster':
- cursor.execute("SELECT pg_reload_conf()")
-
- except Exception as e:
- module.fail_json(msg="Unable to get %s value due to : %s" % (name, to_native(e)))
-
- return True
-
-
-# ===========================================
-# Module execution.
-#
-
-
-def main():
- argument_spec = postgres_common_argument_spec()
- argument_spec.update(
- name=dict(type='str', required=True),
- db=dict(type='str', aliases=['login_db']),
- value=dict(type='str'),
- reset=dict(type='bool'),
- session_role=dict(type='str'),
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- name = module.params["name"]
- value = module.params["value"]
- reset = module.params["reset"]
-
- # Allow to pass values like 1mb instead of 1MB, etc:
- if value:
- for unit in POSSIBLE_SIZE_UNITS:
- if value[:-2].isdigit() and unit in value[-2:]:
- value = value.upper()
-
- if value and reset:
- module.fail_json(msg="%s: value and reset params are mutually exclusive" % name)
-
- if not value and not reset:
- module.fail_json(msg="%s: at least one of value or reset param must be specified" % name)
-
- conn_params = get_conn_params(module, module.params, warn_db_default=False)
- db_connection = connect_to_db(module, conn_params, autocommit=True)
- cursor = db_connection.cursor(cursor_factory=DictCursor)
-
- kw = {}
- # Check server version (needs 9.4 or later):
- ver = db_connection.server_version
- if ver < PG_REQ_VER:
- module.warn("PostgreSQL is %s version but %s or later is required" % (ver, PG_REQ_VER))
- kw = dict(
- changed=False,
- restart_required=False,
- value_pretty="",
- prev_val_pretty="",
- value={"value": "", "unit": ""},
- )
- kw['name'] = name
- db_connection.close()
- module.exit_json(**kw)
-
- # Set default returned values:
- restart_required = False
- changed = False
- kw['name'] = name
- kw['restart_required'] = False
-
- # Get info about param state:
- res = param_get(cursor, module, name)
- current_value = res[0]
- raw_val = res[1]
- unit = res[2]
- boot_val = res[3]
- context = res[4]
-
- if value == 'True':
- value = 'on'
- elif value == 'False':
- value = 'off'
-
- kw['prev_val_pretty'] = current_value
- kw['value_pretty'] = deepcopy(kw['prev_val_pretty'])
- kw['context'] = context
-
- # Do job
- if context == "internal":
- module.fail_json(msg="%s: cannot be changed (internal context). See "
- "https://www.postgresql.org/docs/current/runtime-config-preset.html" % name)
-
- if context == "postmaster":
- restart_required = True
-
- # If check_mode, just compare and exit:
- if module.check_mode:
- if pretty_to_bytes(value) == pretty_to_bytes(current_value):
- kw['changed'] = False
-
- else:
- kw['value_pretty'] = value
- kw['changed'] = True
-
- # Anyway returns current raw value in the check_mode:
- kw['value'] = dict(
- value=raw_val,
- unit=unit,
- )
- kw['restart_required'] = restart_required
- module.exit_json(**kw)
-
- # Set param:
- if value and value != current_value:
- changed = param_set(cursor, module, name, value, context)
-
- kw['value_pretty'] = value
-
- # Reset param:
- elif reset:
- if raw_val == boot_val:
- # nothing to change, exit:
- kw['value'] = dict(
- value=raw_val,
- unit=unit,
- )
- module.exit_json(**kw)
-
- changed = param_set(cursor, module, name, boot_val, context)
-
- if restart_required:
- module.warn("Restart of PostgreSQL is required for setting %s" % name)
-
- cursor.close()
- db_connection.close()
-
- # Reconnect and recheck current value:
- if context in ('sighup', 'superuser-backend', 'backend', 'superuser', 'user'):
- db_connection = connect_to_db(module, conn_params, autocommit=True)
- cursor = db_connection.cursor(cursor_factory=DictCursor)
-
- res = param_get(cursor, module, name)
- # f_ means 'final'
- f_value = res[0]
- f_raw_val = res[1]
-
- if raw_val == f_raw_val:
- changed = False
-
- else:
- changed = True
-
- kw['value_pretty'] = f_value
- kw['value'] = dict(
- value=f_raw_val,
- unit=unit,
- )
-
- cursor.close()
- db_connection.close()
-
- kw['changed'] = changed
- kw['restart_required'] = restart_required
- module.exit_json(**kw)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/postgresql_table.py b/test/support/integration/plugins/modules/postgresql_table.py
deleted file mode 100644
index 3bef03b0..00000000
--- a/test/support/integration/plugins/modules/postgresql_table.py
+++ /dev/null
@@ -1,601 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = r'''
----
-module: postgresql_table
-short_description: Create, drop, or modify a PostgreSQL table
-description:
-- Allows to create, drop, rename, truncate a table, or change some table attributes.
-version_added: '2.8'
-options:
- table:
- description:
- - Table name.
- required: true
- aliases:
- - name
- type: str
- state:
- description:
- - The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
- I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
- type: str
- default: present
- choices: [ absent, present ]
- tablespace:
- description:
- - Set a tablespace for the table.
- required: false
- type: str
- owner:
- description:
- - Set a table owner.
- type: str
- unlogged:
- description:
- - Create an unlogged table.
- type: bool
- default: no
- like:
- description:
- - Create a table like another table (with similar DDL).
- Mutually exclusive with I(columns), I(rename), and I(truncate).
- type: str
- including:
- description:
- - Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
- Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
- type: str
- columns:
- description:
- - Columns that are needed.
- type: list
- elements: str
- rename:
- description:
- - New table name. Mutually exclusive with I(tablespace), I(owner),
- I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
- type: str
- truncate:
- description:
- - Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
- I(like), I(including), I(columns), I(rename), and I(storage_params).
- type: bool
- default: no
- storage_params:
- description:
- - Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
- Mutually exclusive with I(rename) and I(truncate).
- type: list
- elements: str
- db:
- description:
- - Name of database to connect and where the table will be created.
- type: str
- aliases:
- - login_db
- session_role:
- description:
- - Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- - Permissions checking for SQL commands is carried out as though
- the session_role were the one that had logged in originally.
- type: str
- cascade:
- description:
- - Automatically drop objects that depend on the table (such as views).
- Used with I(state=absent) only.
- type: bool
- default: no
- version_added: '2.9'
-notes:
-- If you do not pass db parameter, tables will be created in the database
- named postgres.
-- PostgreSQL allows to create columnless table, so columns param is optional.
-- Unlogged tables are available from PostgreSQL server version 9.1.
-seealso:
-- module: postgresql_sequence
-- module: postgresql_idx
-- module: postgresql_info
-- module: postgresql_tablespace
-- module: postgresql_owner
-- module: postgresql_privs
-- module: postgresql_copy
-- name: CREATE TABLE reference
- description: Complete reference of the CREATE TABLE command documentation.
- link: https://www.postgresql.org/docs/current/sql-createtable.html
-- name: ALTER TABLE reference
- description: Complete reference of the ALTER TABLE command documentation.
- link: https://www.postgresql.org/docs/current/sql-altertable.html
-- name: DROP TABLE reference
- description: Complete reference of the DROP TABLE command documentation.
- link: https://www.postgresql.org/docs/current/sql-droptable.html
-- name: PostgreSQL data types
- description: Complete reference of the PostgreSQL data types documentation.
- link: https://www.postgresql.org/docs/current/datatype.html
-author:
-- Andrei Klychkov (@Andersson007)
-extends_documentation_fragment: postgres
-'''
-
-EXAMPLES = r'''
-- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
- postgresql_table:
- db: acme
- name: tbl2
- like: tbl1
- owner: testuser
-
-- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
- postgresql_table:
- db: acme
- table: tbl2
- like: tbl1
- including: comments, indexes
- tablespace: ssd
-
-- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
- postgresql_table:
- name: test_table
- columns:
- - id bigserial primary key
- - num bigint
- - stories text
- tablespace: ssd
- storage_params:
- - fillfactor=10
- - autovacuum_analyze_threshold=1
-
-- name: Create an unlogged table in schema acme
- postgresql_table:
- name: acme.useless_data
- columns: waste_id int
- unlogged: true
-
-- name: Rename table foo to bar
- postgresql_table:
- table: foo
- rename: bar
-
-- name: Rename table foo from schema acme to bar
- postgresql_table:
- name: acme.foo
- rename: bar
-
-- name: Set owner to someuser
- postgresql_table:
- name: foo
- owner: someuser
-
-- name: Change tablespace of foo table to new_tablespace and set owner to new_user
- postgresql_table:
- name: foo
- tablespace: new_tablespace
- owner: new_user
-
-- name: Truncate table foo
- postgresql_table:
- name: foo
- truncate: yes
-
-- name: Drop table foo from schema acme
- postgresql_table:
- name: acme.foo
- state: absent
-
-- name: Drop table bar cascade
- postgresql_table:
- name: bar
- state: absent
- cascade: yes
-'''
-
-RETURN = r'''
-table:
- description: Name of a table.
- returned: always
- type: str
- sample: 'foo'
-state:
- description: Table state.
- returned: always
- type: str
- sample: 'present'
-owner:
- description: Table owner.
- returned: always
- type: str
- sample: 'postgres'
-tablespace:
- description: Tablespace.
- returned: always
- type: str
- sample: 'ssd_tablespace'
-queries:
- description: List of executed queries.
- returned: always
- type: str
- sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
-storage_params:
- description: Storage parameters.
- returned: always
- type: list
- sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
-'''
-
-try:
- from psycopg2.extras import DictCursor
-except ImportError:
- # psycopg2 is checked by connect_to_db()
- # from ansible.module_utils.postgres
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.database import pg_quote_identifier
-from ansible.module_utils.postgres import (
- connect_to_db,
- exec_sql,
- get_conn_params,
- postgres_common_argument_spec,
-)
-
-
-# ===========================================
-# PostgreSQL module specific support methods.
-#
-
-class Table(object):
- def __init__(self, name, module, cursor):
- self.name = name
- self.module = module
- self.cursor = cursor
- self.info = {
- 'owner': '',
- 'tblspace': '',
- 'storage_params': [],
- }
- self.exists = False
- self.__exists_in_db()
- self.executed_queries = []
-
- def get_info(self):
- """Getter to refresh and get table info"""
- self.__exists_in_db()
-
- def __exists_in_db(self):
- """Check table exists and refresh info"""
- if "." in self.name:
- schema = self.name.split('.')[-2]
- tblname = self.name.split('.')[-1]
- else:
- schema = 'public'
- tblname = self.name
-
- query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
- "FROM pg_tables AS t "
- "INNER JOIN pg_class AS c ON c.relname = t.tablename "
- "INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
- "WHERE t.tablename = %(tblname)s "
- "AND n.nspname = %(schema)s")
- res = exec_sql(self, query, query_params={'tblname': tblname, 'schema': schema},
- add_to_executed=False)
- if res:
- self.exists = True
- self.info = dict(
- owner=res[0][0],
- tblspace=res[0][1] if res[0][1] else '',
- storage_params=res[0][2] if res[0][2] else [],
- )
-
- return True
- else:
- self.exists = False
- return False
-
- def create(self, columns='', params='', tblspace='',
- unlogged=False, owner=''):
- """
- Create table.
- If table exists, check passed args (params, tblspace, owner) and,
- if they're different from current, change them.
- Arguments:
- params - storage params (passed by "WITH (...)" in SQL),
- comma separated.
- tblspace - tablespace.
- owner - table owner.
- unlogged - create unlogged table.
- columns - column string (comma separated).
- """
- name = pg_quote_identifier(self.name, 'table')
-
- changed = False
-
- if self.exists:
- if tblspace == 'pg_default' and self.info['tblspace'] is None:
- pass # Because they have the same meaning
- elif tblspace and self.info['tblspace'] != tblspace:
- self.set_tblspace(tblspace)
- changed = True
-
- if owner and self.info['owner'] != owner:
- self.set_owner(owner)
- changed = True
-
- if params:
- param_list = [p.strip(' ') for p in params.split(',')]
-
- new_param = False
- for p in param_list:
- if p not in self.info['storage_params']:
- new_param = True
-
- if new_param:
- self.set_stor_params(params)
- changed = True
-
- if changed:
- return True
- return False
-
- query = "CREATE"
- if unlogged:
- query += " UNLOGGED TABLE %s" % name
- else:
- query += " TABLE %s" % name
-
- if columns:
- query += " (%s)" % columns
- else:
- query += " ()"
-
- if params:
- query += " WITH (%s)" % params
-
- if tblspace:
- query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
-
- if exec_sql(self, query, ddl=True):
- changed = True
-
- if owner:
- changed = self.set_owner(owner)
-
- return changed
-
- def create_like(self, src_table, including='', tblspace='',
- unlogged=False, params='', owner=''):
- """
- Create table like another table (with similar DDL).
- Arguments:
- src_table - source table.
- including - corresponds to optional INCLUDING expression
- in CREATE TABLE ... LIKE statement.
- params - storage params (passed by "WITH (...)" in SQL),
- comma separated.
- tblspace - tablespace.
- owner - table owner.
- unlogged - create unlogged table.
- """
- changed = False
-
- name = pg_quote_identifier(self.name, 'table')
-
- query = "CREATE"
- if unlogged:
- query += " UNLOGGED TABLE %s" % name
- else:
- query += " TABLE %s" % name
-
- query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
-
- if including:
- including = including.split(',')
- for i in including:
- query += " INCLUDING %s" % i
-
- query += ')'
-
- if params:
- query += " WITH (%s)" % params
-
- if tblspace:
- query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
-
- if exec_sql(self, query, ddl=True):
- changed = True
-
- if owner:
- changed = self.set_owner(owner)
-
- return changed
-
- def truncate(self):
- query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
- return exec_sql(self, query, ddl=True)
-
- def rename(self, newname):
- query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
- pg_quote_identifier(newname, 'table'))
- return exec_sql(self, query, ddl=True)
-
- def set_owner(self, username):
- query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'),
- pg_quote_identifier(username, 'role'))
- return exec_sql(self, query, ddl=True)
-
- def drop(self, cascade=False):
- if not self.exists:
- return False
-
- query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
- if cascade:
- query += " CASCADE"
- return exec_sql(self, query, ddl=True)
-
- def set_tblspace(self, tblspace):
- query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'),
- pg_quote_identifier(tblspace, 'database'))
- return exec_sql(self, query, ddl=True)
-
- def set_stor_params(self, params):
- query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
- return exec_sql(self, query, ddl=True)
-
-
-# ===========================================
-# Module execution.
-#
-
-
-def main():
- argument_spec = postgres_common_argument_spec()
- argument_spec.update(
- table=dict(type='str', required=True, aliases=['name']),
- state=dict(type='str', default="present", choices=["absent", "present"]),
- db=dict(type='str', default='', aliases=['login_db']),
- tablespace=dict(type='str'),
- owner=dict(type='str'),
- unlogged=dict(type='bool', default=False),
- like=dict(type='str'),
- including=dict(type='str'),
- rename=dict(type='str'),
- truncate=dict(type='bool', default=False),
- columns=dict(type='list', elements='str'),
- storage_params=dict(type='list', elements='str'),
- session_role=dict(type='str'),
- cascade=dict(type='bool', default=False),
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True,
- )
-
- table = module.params["table"]
- state = module.params["state"]
- tablespace = module.params["tablespace"]
- owner = module.params["owner"]
- unlogged = module.params["unlogged"]
- like = module.params["like"]
- including = module.params["including"]
- newname = module.params["rename"]
- storage_params = module.params["storage_params"]
- truncate = module.params["truncate"]
- columns = module.params["columns"]
- cascade = module.params["cascade"]
-
- if state == 'present' and cascade:
- module.warn("cascade=true is ignored when state=present")
-
- # Check mutual exclusive parameters:
- if state == 'absent' and (truncate or newname or columns or tablespace or like or storage_params or unlogged or owner or including):
- module.fail_json(msg="%s: state=absent is mutually exclusive with: "
- "truncate, rename, columns, tablespace, "
- "including, like, storage_params, unlogged, owner" % table)
-
- if truncate and (newname or columns or like or unlogged or storage_params or owner or tablespace or including):
- module.fail_json(msg="%s: truncate is mutually exclusive with: "
- "rename, columns, like, unlogged, including, "
- "storage_params, owner, tablespace" % table)
-
- if newname and (columns or like or unlogged or storage_params or owner or tablespace or including):
- module.fail_json(msg="%s: rename is mutually exclusive with: "
- "columns, like, unlogged, including, "
- "storage_params, owner, tablespace" % table)
-
- if like and columns:
- module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
- if including and not like:
- module.fail_json(msg="%s: including param needs like param specified" % table)
-
- conn_params = get_conn_params(module, module.params)
- db_connection = connect_to_db(module, conn_params, autocommit=False)
- cursor = db_connection.cursor(cursor_factory=DictCursor)
-
- if storage_params:
- storage_params = ','.join(storage_params)
-
- if columns:
- columns = ','.join(columns)
-
- ##############
- # Do main job:
- table_obj = Table(table, module, cursor)
-
- # Set default returned values:
- changed = False
- kw = {}
- kw['table'] = table
- kw['state'] = ''
- if table_obj.exists:
- kw = dict(
- table=table,
- state='present',
- owner=table_obj.info['owner'],
- tablespace=table_obj.info['tblspace'],
- storage_params=table_obj.info['storage_params'],
- )
-
- if state == 'absent':
- changed = table_obj.drop(cascade=cascade)
-
- elif truncate:
- changed = table_obj.truncate()
-
- elif newname:
- changed = table_obj.rename(newname)
- q = table_obj.executed_queries
- table_obj = Table(newname, module, cursor)
- table_obj.executed_queries = q
-
- elif state == 'present' and not like:
- changed = table_obj.create(columns, storage_params,
- tablespace, unlogged, owner)
-
- elif state == 'present' and like:
- changed = table_obj.create_like(like, including, tablespace,
- unlogged, storage_params)
-
- if changed:
- if module.check_mode:
- db_connection.rollback()
- else:
- db_connection.commit()
-
- # Refresh table info for RETURN.
- # Note, if table has been renamed, it gets info by newname:
- table_obj.get_info()
- db_connection.commit()
- if table_obj.exists:
- kw = dict(
- table=table,
- state='present',
- owner=table_obj.info['owner'],
- tablespace=table_obj.info['tblspace'],
- storage_params=table_obj.info['storage_params'],
- )
- else:
- # We just change the table state here
- # to keep other information about the dropped table:
- kw['state'] = 'absent'
-
- kw['queries'] = table_obj.executed_queries
- kw['changed'] = changed
- db_connection.close()
- module.exit_json(**kw)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/postgresql_user.py b/test/support/integration/plugins/modules/postgresql_user.py
deleted file mode 100644
index 10afd0a0..00000000
--- a/test/support/integration/plugins/modules/postgresql_user.py
+++ /dev/null
@@ -1,927 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = r'''
----
-module: postgresql_user
-short_description: Add or remove a user (role) from a PostgreSQL server instance
-description:
-- Adds or removes a user (role) from a PostgreSQL server instance
- ("cluster" in PostgreSQL terminology) and, optionally,
- grants the user access to an existing database or tables.
-- A user is a role with login privilege.
-- The fundamental function of the module is to create, or delete, users from
- a PostgreSQL instances. Privilege assignment, or removal, is an optional
- step, which works on one database at a time. This allows for the module to
- be called several times in the same module to modify the permissions on
- different databases, or to grant permissions to already existing users.
-- A user cannot be removed until all the privileges have been stripped from
- the user. In such situation, if the module tries to remove the user it
- will fail. To avoid this from happening the fail_on_user option signals
- the module to try to remove the user, but if not possible keep going; the
- module will report if changes happened and separately if the user was
- removed or not.
-version_added: '0.6'
-options:
- name:
- description:
- - Name of the user (role) to add or remove.
- type: str
- required: true
- aliases:
- - user
- password:
- description:
- - Set the user's password, before 1.4 this was required.
- - Password can be passed unhashed or hashed (MD5-hashed).
- - Unhashed password will automatically be hashed when saved into the
- database if C(encrypted) parameter is set, otherwise it will be save in
- plain text format.
- - When passing a hashed password it must be generated with the format
- C('str["md5"] + md5[ password + username ]'), resulting in a total of
- 35 characters. An easy way to do this is C(echo "md5$(echo -n
- 'verysecretpasswordJOE' | md5sum | awk '{print $1}')").
- - Note that if the provided password string is already in MD5-hashed
- format, then it is used as-is, regardless of C(encrypted) parameter.
- type: str
- db:
- description:
- - Name of database to connect to and where user's permissions will be granted.
- type: str
- aliases:
- - login_db
- fail_on_user:
- description:
- - If C(yes), fail when user (role) can't be removed. Otherwise just log and continue.
- default: 'yes'
- type: bool
- aliases:
- - fail_on_role
- priv:
- description:
- - "Slash-separated PostgreSQL privileges string: C(priv1/priv2), where
- privileges can be defined for database ( allowed options - 'CREATE',
- 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL'. For example C(CONNECT) ) or
- for table ( allowed options - 'SELECT', 'INSERT', 'UPDATE', 'DELETE',
- 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL'. For example
- C(table:SELECT) ). Mixed example of this string:
- C(CONNECT/CREATE/table1:SELECT/table2:INSERT)."
- type: str
- role_attr_flags:
- description:
- - "PostgreSQL user attributes string in the format: CREATEDB,CREATEROLE,SUPERUSER."
- - Note that '[NO]CREATEUSER' is deprecated.
- - To create a simple role for using it like a group, use C(NOLOGIN) flag.
- type: str
- choices: [ '[NO]SUPERUSER', '[NO]CREATEROLE', '[NO]CREATEDB',
- '[NO]INHERIT', '[NO]LOGIN', '[NO]REPLICATION', '[NO]BYPASSRLS' ]
- session_role:
- version_added: '2.8'
- description:
- - Switch to session_role after connecting.
- - The specified session_role must be a role that the current login_user is a member of.
- - Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
- type: str
- state:
- description:
- - The user (role) state.
- type: str
- default: present
- choices: [ absent, present ]
- encrypted:
- description:
- - Whether the password is stored hashed in the database.
- - Passwords can be passed already hashed or unhashed, and postgresql
- ensures the stored password is hashed when C(encrypted) is set.
- - "Note: Postgresql 10 and newer doesn't support unhashed passwords."
- - Previous to Ansible 2.6, this was C(no) by default.
- default: 'yes'
- type: bool
- version_added: '1.4'
- expires:
- description:
- - The date at which the user's password is to expire.
- - If set to C('infinity'), user's password never expire.
- - Note that this value should be a valid SQL date and time type.
- type: str
- version_added: '1.4'
- no_password_changes:
- description:
- - If C(yes), don't inspect database for password changes. Effective when
- C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make
- password changes as necessary.
- default: 'no'
- type: bool
- version_added: '2.0'
- conn_limit:
- description:
- - Specifies the user (role) connection limit.
- type: int
- version_added: '2.4'
- ssl_mode:
- description:
- - Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- - See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
- - Default of C(prefer) matches libpq default.
- type: str
- default: prefer
- choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
- version_added: '2.3'
- ca_cert:
- description:
- - Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- - If the file exists, the server's certificate will be verified to be signed by one of these authorities.
- type: str
- aliases: [ ssl_rootcert ]
- version_added: '2.3'
- groups:
- description:
- - The list of groups (roles) that need to be granted to the user.
- type: list
- elements: str
- version_added: '2.9'
- comment:
- description:
- - Add a comment on the user (equal to the COMMENT ON ROLE statement result).
- type: str
- version_added: '2.10'
-notes:
-- The module creates a user (role) with login privilege by default.
- Use NOLOGIN role_attr_flags to change this behaviour.
-- If you specify PUBLIC as the user (role), then the privilege changes will apply to all users (roles).
- You may not specify password or role_attr_flags when the PUBLIC user is specified.
-seealso:
-- module: postgresql_privs
-- module: postgresql_membership
-- module: postgresql_owner
-- name: PostgreSQL database roles
- description: Complete reference of the PostgreSQL database roles documentation.
- link: https://www.postgresql.org/docs/current/user-manag.html
-author:
-- Ansible Core Team
-extends_documentation_fragment: postgres
-'''
-
-EXAMPLES = r'''
-- name: Connect to acme database, create django user, and grant access to database and products table
- postgresql_user:
- db: acme
- name: django
- password: ceec4eif7ya
- priv: "CONNECT/products:ALL"
- expires: "Jan 31 2020"
-
-- name: Add a comment on django user
- postgresql_user:
- db: acme
- name: django
- comment: This is a test user
-
-# Connect to default database, create rails user, set its password (MD5-hashed),
-# and grant privilege to create other databases and demote rails from super user status if user exists
-- name: Create rails user, set MD5-hashed password, grant privs
- postgresql_user:
- name: rails
- password: md59543f1d82624df2b31672ec0f7050460
- role_attr_flags: CREATEDB,NOSUPERUSER
-
-- name: Connect to acme database and remove test user privileges from there
- postgresql_user:
- db: acme
- name: test
- priv: "ALL/products:ALL"
- state: absent
- fail_on_user: no
-
-- name: Connect to test database, remove test user from cluster
- postgresql_user:
- db: test
- name: test
- priv: ALL
- state: absent
-
-- name: Connect to acme database and set user's password with no expire date
- postgresql_user:
- db: acme
- name: django
- password: mysupersecretword
- priv: "CONNECT/products:ALL"
- expires: infinity
-
-# Example privileges string format
-# INSERT,UPDATE/table:SELECT/anothertable:ALL
-
-- name: Connect to test database and remove an existing user's password
- postgresql_user:
- db: test
- user: test
- password: ""
-
-- name: Create user test and grant group user_ro and user_rw to it
- postgresql_user:
- name: test
- groups:
- - user_ro
- - user_rw
-'''
-
-RETURN = r'''
-queries:
- description: List of executed queries.
- returned: always
- type: list
- sample: ['CREATE USER "alice"', 'GRANT CONNECT ON DATABASE "acme" TO "alice"']
- version_added: '2.8'
-'''
-
-import itertools
-import re
-import traceback
-from hashlib import md5
-
-try:
- import psycopg2
- from psycopg2.extras import DictCursor
-except ImportError:
- # psycopg2 is checked by connect_to_db()
- # from ansible.module_utils.postgres
- pass
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.database import pg_quote_identifier, SQLParseError
-from ansible.module_utils.postgres import (
- connect_to_db,
- get_conn_params,
- PgMembership,
- postgres_common_argument_spec,
-)
-from ansible.module_utils._text import to_bytes, to_native
-from ansible.module_utils.six import iteritems
-
-
-FLAGS = ('SUPERUSER', 'CREATEROLE', 'CREATEDB', 'INHERIT', 'LOGIN', 'REPLICATION')
-FLAGS_BY_VERSION = {'BYPASSRLS': 90500}
-
-VALID_PRIVS = dict(table=frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER', 'ALL')),
- database=frozenset(
- ('CREATE', 'CONNECT', 'TEMPORARY', 'TEMP', 'ALL')),
- )
-
-# map to cope with idiosyncracies of SUPERUSER and LOGIN
-PRIV_TO_AUTHID_COLUMN = dict(SUPERUSER='rolsuper', CREATEROLE='rolcreaterole',
- CREATEDB='rolcreatedb', INHERIT='rolinherit', LOGIN='rolcanlogin',
- REPLICATION='rolreplication', BYPASSRLS='rolbypassrls')
-
-executed_queries = []
-
-
-class InvalidFlagsError(Exception):
- pass
-
-
-class InvalidPrivsError(Exception):
- pass
-
-# ===========================================
-# PostgreSQL module specific support methods.
-#
-
-
-def user_exists(cursor, user):
- # The PUBLIC user is a special case that is always there
- if user == 'PUBLIC':
- return True
- query = "SELECT rolname FROM pg_roles WHERE rolname=%(user)s"
- cursor.execute(query, {'user': user})
- return cursor.rowcount > 0
-
-
-def user_add(cursor, user, password, role_attr_flags, encrypted, expires, conn_limit):
- """Create a new database user (role)."""
- # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
- # literal
- query_password_data = dict(password=password, expires=expires)
- query = ['CREATE USER "%(user)s"' %
- {"user": user}]
- if password is not None and password != '':
- query.append("WITH %(crypt)s" % {"crypt": encrypted})
- query.append("PASSWORD %(password)s")
- if expires is not None:
- query.append("VALID UNTIL %(expires)s")
- if conn_limit is not None:
- query.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
- query.append(role_attr_flags)
- query = ' '.join(query)
- executed_queries.append(query)
- cursor.execute(query, query_password_data)
- return True
-
-
-def user_should_we_change_password(current_role_attrs, user, password, encrypted):
- """Check if we should change the user's password.
-
- Compare the proposed password with the existing one, comparing
- hashes if encrypted. If we can't access it assume yes.
- """
-
- if current_role_attrs is None:
- # on some databases, E.g. AWS RDS instances, there is no access to
- # the pg_authid relation to check the pre-existing password, so we
- # just assume password is different
- return True
-
- # Do we actually need to do anything?
- pwchanging = False
- if password is not None:
- # Empty password means that the role shouldn't have a password, which
- # means we need to check if the current password is None.
- if password == '':
- if current_role_attrs['rolpassword'] is not None:
- pwchanging = True
- # 32: MD5 hashes are represented as a sequence of 32 hexadecimal digits
- # 3: The size of the 'md5' prefix
- # When the provided password looks like a MD5-hash, value of
- # 'encrypted' is ignored.
- elif (password.startswith('md5') and len(password) == 32 + 3) or encrypted == 'UNENCRYPTED':
- if password != current_role_attrs['rolpassword']:
- pwchanging = True
- elif encrypted == 'ENCRYPTED':
- hashed_password = 'md5{0}'.format(md5(to_bytes(password) + to_bytes(user)).hexdigest())
- if hashed_password != current_role_attrs['rolpassword']:
- pwchanging = True
-
- return pwchanging
-
-
-def user_alter(db_connection, module, user, password, role_attr_flags, encrypted, expires, no_password_changes, conn_limit):
- """Change user password and/or attributes. Return True if changed, False otherwise."""
- changed = False
-
- cursor = db_connection.cursor(cursor_factory=DictCursor)
- # Note: role_attr_flags escaped by parse_role_attrs and encrypted is a
- # literal
- if user == 'PUBLIC':
- if password is not None:
- module.fail_json(msg="cannot change the password for PUBLIC user")
- elif role_attr_flags != '':
- module.fail_json(msg="cannot change the role_attr_flags for PUBLIC user")
- else:
- return False
-
- # Handle passwords.
- if not no_password_changes and (password is not None or role_attr_flags != '' or expires is not None or conn_limit is not None):
- # Select password and all flag-like columns in order to verify changes.
- try:
- select = "SELECT * FROM pg_authid where rolname=%(user)s"
- cursor.execute(select, {"user": user})
- # Grab current role attributes.
- current_role_attrs = cursor.fetchone()
- except psycopg2.ProgrammingError:
- current_role_attrs = None
- db_connection.rollback()
-
- pwchanging = user_should_we_change_password(current_role_attrs, user, password, encrypted)
-
- if current_role_attrs is None:
- try:
- # AWS RDS instances does not allow user to access pg_authid
- # so try to get current_role_attrs from pg_roles tables
- select = "SELECT * FROM pg_roles where rolname=%(user)s"
- cursor.execute(select, {"user": user})
- # Grab current role attributes from pg_roles
- current_role_attrs = cursor.fetchone()
- except psycopg2.ProgrammingError as e:
- db_connection.rollback()
- module.fail_json(msg="Failed to get role details for current user %s: %s" % (user, e))
-
- role_attr_flags_changing = False
- if role_attr_flags:
- role_attr_flags_dict = {}
- for r in role_attr_flags.split(' '):
- if r.startswith('NO'):
- role_attr_flags_dict[r.replace('NO', '', 1)] = False
- else:
- role_attr_flags_dict[r] = True
-
- for role_attr_name, role_attr_value in role_attr_flags_dict.items():
- if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
- role_attr_flags_changing = True
-
- if expires is not None:
- cursor.execute("SELECT %s::timestamptz;", (expires,))
- expires_with_tz = cursor.fetchone()[0]
- expires_changing = expires_with_tz != current_role_attrs.get('rolvaliduntil')
- else:
- expires_changing = False
-
- conn_limit_changing = (conn_limit is not None and conn_limit != current_role_attrs['rolconnlimit'])
-
- if not pwchanging and not role_attr_flags_changing and not expires_changing and not conn_limit_changing:
- return False
-
- alter = ['ALTER USER "%(user)s"' % {"user": user}]
- if pwchanging:
- if password != '':
- alter.append("WITH %(crypt)s" % {"crypt": encrypted})
- alter.append("PASSWORD %(password)s")
- else:
- alter.append("WITH PASSWORD NULL")
- alter.append(role_attr_flags)
- elif role_attr_flags:
- alter.append('WITH %s' % role_attr_flags)
- if expires is not None:
- alter.append("VALID UNTIL %(expires)s")
- if conn_limit is not None:
- alter.append("CONNECTION LIMIT %(conn_limit)s" % {"conn_limit": conn_limit})
-
- query_password_data = dict(password=password, expires=expires)
- try:
- cursor.execute(' '.join(alter), query_password_data)
- changed = True
- except psycopg2.InternalError as e:
- if e.pgcode == '25006':
- # Handle errors due to read-only transactions indicated by pgcode 25006
- # ERROR: cannot execute ALTER ROLE in a read-only transaction
- changed = False
- module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
- return changed
- else:
- raise psycopg2.InternalError(e)
- except psycopg2.NotSupportedError as e:
- module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
-
- elif no_password_changes and role_attr_flags != '':
- # Grab role information from pg_roles instead of pg_authid
- select = "SELECT * FROM pg_roles where rolname=%(user)s"
- cursor.execute(select, {"user": user})
- # Grab current role attributes.
- current_role_attrs = cursor.fetchone()
-
- role_attr_flags_changing = False
-
- if role_attr_flags:
- role_attr_flags_dict = {}
- for r in role_attr_flags.split(' '):
- if r.startswith('NO'):
- role_attr_flags_dict[r.replace('NO', '', 1)] = False
- else:
- role_attr_flags_dict[r] = True
-
- for role_attr_name, role_attr_value in role_attr_flags_dict.items():
- if current_role_attrs[PRIV_TO_AUTHID_COLUMN[role_attr_name]] != role_attr_value:
- role_attr_flags_changing = True
-
- if not role_attr_flags_changing:
- return False
-
- alter = ['ALTER USER "%(user)s"' %
- {"user": user}]
- if role_attr_flags:
- alter.append('WITH %s' % role_attr_flags)
-
- try:
- cursor.execute(' '.join(alter))
- except psycopg2.InternalError as e:
- if e.pgcode == '25006':
- # Handle errors due to read-only transactions indicated by pgcode 25006
- # ERROR: cannot execute ALTER ROLE in a read-only transaction
- changed = False
- module.fail_json(msg=e.pgerror, exception=traceback.format_exc())
- return changed
- else:
- raise psycopg2.InternalError(e)
-
- # Grab new role attributes.
- cursor.execute(select, {"user": user})
- new_role_attrs = cursor.fetchone()
-
- # Detect any differences between current_ and new_role_attrs.
- changed = current_role_attrs != new_role_attrs
-
- return changed
-
-
-def user_delete(cursor, user):
- """Try to remove a user. Returns True if successful otherwise False"""
- cursor.execute("SAVEPOINT ansible_pgsql_user_delete")
- try:
- query = 'DROP USER "%s"' % user
- executed_queries.append(query)
- cursor.execute(query)
- except Exception:
- cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_user_delete")
- cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
- return False
-
- cursor.execute("RELEASE SAVEPOINT ansible_pgsql_user_delete")
- return True
-
-
-def has_table_privileges(cursor, user, table, privs):
- """
- Return the difference between the privileges that a user already has and
- the privileges that they desire to have.
-
- :returns: tuple of:
- * privileges that they have and were requested
- * privileges they currently hold but were not requested
- * privileges requested that they do not hold
- """
- cur_privs = get_table_privileges(cursor, user, table)
- have_currently = cur_privs.intersection(privs)
- other_current = cur_privs.difference(privs)
- desired = privs.difference(cur_privs)
- return (have_currently, other_current, desired)
-
-
-def get_table_privileges(cursor, user, table):
- if '.' in table:
- schema, table = table.split('.', 1)
- else:
- schema = 'public'
- query = ("SELECT privilege_type FROM information_schema.role_table_grants "
- "WHERE grantee=%(user)s AND table_name=%(table)s AND table_schema=%(schema)s")
- cursor.execute(query, {'user': user, 'table': table, 'schema': schema})
- return frozenset([x[0] for x in cursor.fetchall()])
-
-
-def grant_table_privileges(cursor, user, table, privs):
- # Note: priv escaped by parse_privs
- privs = ', '.join(privs)
- query = 'GRANT %s ON TABLE %s TO "%s"' % (
- privs, pg_quote_identifier(table, 'table'), user)
- executed_queries.append(query)
- cursor.execute(query)
-
-
-def revoke_table_privileges(cursor, user, table, privs):
- # Note: priv escaped by parse_privs
- privs = ', '.join(privs)
- query = 'REVOKE %s ON TABLE %s FROM "%s"' % (
- privs, pg_quote_identifier(table, 'table'), user)
- executed_queries.append(query)
- cursor.execute(query)
-
-
-def get_database_privileges(cursor, user, db):
- priv_map = {
- 'C': 'CREATE',
- 'T': 'TEMPORARY',
- 'c': 'CONNECT',
- }
- query = 'SELECT datacl FROM pg_database WHERE datname = %s'
- cursor.execute(query, (db,))
- datacl = cursor.fetchone()[0]
- if datacl is None:
- return set()
- r = re.search(r'%s\\?"?=(C?T?c?)/[^,]+,?' % user, datacl)
- if r is None:
- return set()
- o = set()
- for v in r.group(1):
- o.add(priv_map[v])
- return normalize_privileges(o, 'database')
-
-
-def has_database_privileges(cursor, user, db, privs):
- """
- Return the difference between the privileges that a user already has and
- the privileges that they desire to have.
-
- :returns: tuple of:
- * privileges that they have and were requested
- * privileges they currently hold but were not requested
- * privileges requested that they do not hold
- """
- cur_privs = get_database_privileges(cursor, user, db)
- have_currently = cur_privs.intersection(privs)
- other_current = cur_privs.difference(privs)
- desired = privs.difference(cur_privs)
- return (have_currently, other_current, desired)
-
-
-def grant_database_privileges(cursor, user, db, privs):
- # Note: priv escaped by parse_privs
- privs = ', '.join(privs)
- if user == "PUBLIC":
- query = 'GRANT %s ON DATABASE %s TO PUBLIC' % (
- privs, pg_quote_identifier(db, 'database'))
- else:
- query = 'GRANT %s ON DATABASE %s TO "%s"' % (
- privs, pg_quote_identifier(db, 'database'), user)
-
- executed_queries.append(query)
- cursor.execute(query)
-
-
-def revoke_database_privileges(cursor, user, db, privs):
- # Note: priv escaped by parse_privs
- privs = ', '.join(privs)
- if user == "PUBLIC":
- query = 'REVOKE %s ON DATABASE %s FROM PUBLIC' % (
- privs, pg_quote_identifier(db, 'database'))
- else:
- query = 'REVOKE %s ON DATABASE %s FROM "%s"' % (
- privs, pg_quote_identifier(db, 'database'), user)
-
- executed_queries.append(query)
- cursor.execute(query)
-
-
-def revoke_privileges(cursor, user, privs):
- if privs is None:
- return False
-
- revoke_funcs = dict(table=revoke_table_privileges,
- database=revoke_database_privileges)
- check_funcs = dict(table=has_table_privileges,
- database=has_database_privileges)
-
- changed = False
- for type_ in privs:
- for name, privileges in iteritems(privs[type_]):
- # Check that any of the privileges requested to be removed are
- # currently granted to the user
- differences = check_funcs[type_](cursor, user, name, privileges)
- if differences[0]:
- revoke_funcs[type_](cursor, user, name, privileges)
- changed = True
- return changed
-
-
-def grant_privileges(cursor, user, privs):
- if privs is None:
- return False
-
- grant_funcs = dict(table=grant_table_privileges,
- database=grant_database_privileges)
- check_funcs = dict(table=has_table_privileges,
- database=has_database_privileges)
-
- changed = False
- for type_ in privs:
- for name, privileges in iteritems(privs[type_]):
- # Check that any of the privileges requested for the user are
- # currently missing
- differences = check_funcs[type_](cursor, user, name, privileges)
- if differences[2]:
- grant_funcs[type_](cursor, user, name, privileges)
- changed = True
- return changed
-
-
-def parse_role_attrs(cursor, role_attr_flags):
- """
- Parse role attributes string for user creation.
- Format:
-
- attributes[,attributes,...]
-
- Where:
-
- attributes := CREATEDB,CREATEROLE,NOSUPERUSER,...
- [ "[NO]SUPERUSER","[NO]CREATEROLE", "[NO]CREATEDB",
- "[NO]INHERIT", "[NO]LOGIN", "[NO]REPLICATION",
- "[NO]BYPASSRLS" ]
-
- Note: "[NO]BYPASSRLS" role attribute introduced in 9.5
- Note: "[NO]CREATEUSER" role attribute is deprecated.
-
- """
- flags = frozenset(role.upper() for role in role_attr_flags.split(',') if role)
-
- valid_flags = frozenset(itertools.chain(FLAGS, get_valid_flags_by_version(cursor)))
- valid_flags = frozenset(itertools.chain(valid_flags, ('NO%s' % flag for flag in valid_flags)))
-
- if not flags.issubset(valid_flags):
- raise InvalidFlagsError('Invalid role_attr_flags specified: %s' %
- ' '.join(flags.difference(valid_flags)))
-
- return ' '.join(flags)
-
-
-def normalize_privileges(privs, type_):
- new_privs = set(privs)
- if 'ALL' in new_privs:
- new_privs.update(VALID_PRIVS[type_])
- new_privs.remove('ALL')
- if 'TEMP' in new_privs:
- new_privs.add('TEMPORARY')
- new_privs.remove('TEMP')
-
- return new_privs
-
-
-def parse_privs(privs, db):
- """
- Parse privilege string to determine permissions for database db.
- Format:
-
- privileges[/privileges/...]
-
- Where:
-
- privileges := DATABASE_PRIVILEGES[,DATABASE_PRIVILEGES,...] |
- TABLE_NAME:TABLE_PRIVILEGES[,TABLE_PRIVILEGES,...]
- """
- if privs is None:
- return privs
-
- o_privs = {
- 'database': {},
- 'table': {}
- }
- for token in privs.split('/'):
- if ':' not in token:
- type_ = 'database'
- name = db
- priv_set = frozenset(x.strip().upper()
- for x in token.split(',') if x.strip())
- else:
- type_ = 'table'
- name, privileges = token.split(':', 1)
- priv_set = frozenset(x.strip().upper()
- for x in privileges.split(',') if x.strip())
-
- if not priv_set.issubset(VALID_PRIVS[type_]):
- raise InvalidPrivsError('Invalid privs specified for %s: %s' %
- (type_, ' '.join(priv_set.difference(VALID_PRIVS[type_]))))
-
- priv_set = normalize_privileges(priv_set, type_)
- o_privs[type_][name] = priv_set
-
- return o_privs
-
-
-def get_valid_flags_by_version(cursor):
- """
- Some role attributes were introduced after certain versions. We want to
- compile a list of valid flags against the current Postgres version.
- """
- current_version = cursor.connection.server_version
-
- return [
- flag
- for flag, version_introduced in FLAGS_BY_VERSION.items()
- if current_version >= version_introduced
- ]
-
-
-def get_comment(cursor, user):
- """Get user's comment."""
- query = ("SELECT pg_catalog.shobj_description(r.oid, 'pg_authid') "
- "FROM pg_catalog.pg_roles r "
- "WHERE r.rolname = %(user)s")
- cursor.execute(query, {'user': user})
- return cursor.fetchone()[0]
-
-
-def add_comment(cursor, user, comment):
- """Add comment on user."""
- if comment != get_comment(cursor, user):
- query = 'COMMENT ON ROLE "%s" IS ' % user
- cursor.execute(query + '%(comment)s', {'comment': comment})
- executed_queries.append(cursor.mogrify(query + '%(comment)s', {'comment': comment}))
- return True
- else:
- return False
-
-
-# ===========================================
-# Module execution.
-#
-
-def main():
- argument_spec = postgres_common_argument_spec()
- argument_spec.update(
- user=dict(type='str', required=True, aliases=['name']),
- password=dict(type='str', default=None, no_log=True),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- priv=dict(type='str', default=None),
- db=dict(type='str', default='', aliases=['login_db']),
- fail_on_user=dict(type='bool', default='yes', aliases=['fail_on_role']),
- role_attr_flags=dict(type='str', default=''),
- encrypted=dict(type='bool', default='yes'),
- no_password_changes=dict(type='bool', default='no'),
- expires=dict(type='str', default=None),
- conn_limit=dict(type='int', default=None),
- session_role=dict(type='str'),
- groups=dict(type='list', elements='str'),
- comment=dict(type='str', default=None),
- )
- module = AnsibleModule(
- argument_spec=argument_spec,
- supports_check_mode=True
- )
-
- user = module.params["user"]
- password = module.params["password"]
- state = module.params["state"]
- fail_on_user = module.params["fail_on_user"]
- if module.params['db'] == '' and module.params["priv"] is not None:
- module.fail_json(msg="privileges require a database to be specified")
- privs = parse_privs(module.params["priv"], module.params["db"])
- no_password_changes = module.params["no_password_changes"]
- if module.params["encrypted"]:
- encrypted = "ENCRYPTED"
- else:
- encrypted = "UNENCRYPTED"
- expires = module.params["expires"]
- conn_limit = module.params["conn_limit"]
- role_attr_flags = module.params["role_attr_flags"]
- groups = module.params["groups"]
- if groups:
- groups = [e.strip() for e in groups]
- comment = module.params["comment"]
-
- conn_params = get_conn_params(module, module.params, warn_db_default=False)
- db_connection = connect_to_db(module, conn_params)
- cursor = db_connection.cursor(cursor_factory=DictCursor)
-
- try:
- role_attr_flags = parse_role_attrs(cursor, role_attr_flags)
- except InvalidFlagsError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- kw = dict(user=user)
- changed = False
- user_removed = False
-
- if state == "present":
- if user_exists(cursor, user):
- try:
- changed = user_alter(db_connection, module, user, password,
- role_attr_flags, encrypted, expires, no_password_changes, conn_limit)
- except SQLParseError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- else:
- try:
- changed = user_add(cursor, user, password,
- role_attr_flags, encrypted, expires, conn_limit)
- except psycopg2.ProgrammingError as e:
- module.fail_json(msg="Unable to add user with given requirement "
- "due to : %s" % to_native(e),
- exception=traceback.format_exc())
- except SQLParseError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- try:
- changed = grant_privileges(cursor, user, privs) or changed
- except SQLParseError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
-
- if groups:
- target_roles = []
- target_roles.append(user)
- pg_membership = PgMembership(module, cursor, groups, target_roles)
- changed = pg_membership.grant() or changed
- executed_queries.extend(pg_membership.executed_queries)
-
- if comment is not None:
- try:
- changed = add_comment(cursor, user, comment) or changed
- except Exception as e:
- module.fail_json(msg='Unable to add comment on role: %s' % to_native(e),
- exception=traceback.format_exc())
-
- else:
- if user_exists(cursor, user):
- if module.check_mode:
- changed = True
- kw['user_removed'] = True
- else:
- try:
- changed = revoke_privileges(cursor, user, privs)
- user_removed = user_delete(cursor, user)
- except SQLParseError as e:
- module.fail_json(msg=to_native(e), exception=traceback.format_exc())
- changed = changed or user_removed
- if fail_on_user and not user_removed:
- msg = "Unable to remove user"
- module.fail_json(msg=msg)
- kw['user_removed'] = user_removed
-
- if changed:
- if module.check_mode:
- db_connection.rollback()
- else:
- db_connection.commit()
-
- kw['changed'] = changed
- kw['queries'] = executed_queries
- module.exit_json(**kw)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/rabbitmq_plugin.py b/test/support/integration/plugins/modules/rabbitmq_plugin.py
deleted file mode 100644
index 301bbfe2..00000000
--- a/test/support/integration/plugins/modules/rabbitmq_plugin.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2013, Chatham Financial <oss@chathamfinancial.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-
-DOCUMENTATION = '''
----
-module: rabbitmq_plugin
-short_description: Manage RabbitMQ plugins
-description:
- - This module can be used to enable or disable RabbitMQ plugins.
-version_added: "1.1"
-author:
- - Chris Hoffman (@chrishoffman)
-options:
- names:
- description:
- - Comma-separated list of plugin names. Also, accepts plugin name.
- required: true
- aliases: [name]
- new_only:
- description:
- - Only enable missing plugins.
- - Does not disable plugins that are not in the names list.
- type: bool
- default: "no"
- state:
- description:
- - Specify if plugins are to be enabled or disabled.
- default: enabled
- choices: [enabled, disabled]
- prefix:
- description:
- - Specify a custom install prefix to a Rabbit.
- version_added: "1.3"
-'''
-
-EXAMPLES = '''
-- name: Enables the rabbitmq_management plugin
- rabbitmq_plugin:
- names: rabbitmq_management
- state: enabled
-
-- name: Enable multiple rabbitmq plugins
- rabbitmq_plugin:
- names: rabbitmq_management,rabbitmq_management_visualiser
- state: enabled
-
-- name: Disable plugin
- rabbitmq_plugin:
- names: rabbitmq_management
- state: disabled
-
-- name: Enable every plugin in list with existing plugins
- rabbitmq_plugin:
- names: rabbitmq_management,rabbitmq_management_visualiser,rabbitmq_shovel,rabbitmq_shovel_management
- state: enabled
- new_only: 'yes'
-'''
-
-RETURN = '''
-enabled:
- description: list of plugins enabled during task run
- returned: always
- type: list
- sample: ["rabbitmq_management"]
-disabled:
- description: list of plugins disabled during task run
- returned: always
- type: list
- sample: ["rabbitmq_management"]
-'''
-
-import os
-from ansible.module_utils.basic import AnsibleModule
-
-
-class RabbitMqPlugins(object):
-
- def __init__(self, module):
- self.module = module
- bin_path = ''
- if module.params['prefix']:
- if os.path.isdir(os.path.join(module.params['prefix'], 'bin')):
- bin_path = os.path.join(module.params['prefix'], 'bin')
- elif os.path.isdir(os.path.join(module.params['prefix'], 'sbin')):
- bin_path = os.path.join(module.params['prefix'], 'sbin')
- else:
- # No such path exists.
- module.fail_json(msg="No binary folder in prefix %s" % module.params['prefix'])
-
- self._rabbitmq_plugins = os.path.join(bin_path, "rabbitmq-plugins")
- else:
- self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
-
- def _exec(self, args, run_in_check_mode=False):
- if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
- cmd = [self._rabbitmq_plugins]
- rc, out, err = self.module.run_command(cmd + args, check_rc=True)
- return out.splitlines()
- return list()
-
- def get_all(self):
- list_output = self._exec(['list', '-E', '-m'], True)
- plugins = []
- for plugin in list_output:
- if not plugin:
- break
- plugins.append(plugin)
-
- return plugins
-
- def enable(self, name):
- self._exec(['enable', name])
-
- def disable(self, name):
- self._exec(['disable', name])
-
-
-def main():
- arg_spec = dict(
- names=dict(required=True, aliases=['name']),
- new_only=dict(default='no', type='bool'),
- state=dict(default='enabled', choices=['enabled', 'disabled']),
- prefix=dict(required=False, default=None)
- )
- module = AnsibleModule(
- argument_spec=arg_spec,
- supports_check_mode=True
- )
-
- result = dict()
- names = module.params['names'].split(',')
- new_only = module.params['new_only']
- state = module.params['state']
-
- rabbitmq_plugins = RabbitMqPlugins(module)
- enabled_plugins = rabbitmq_plugins.get_all()
-
- enabled = []
- disabled = []
- if state == 'enabled':
- if not new_only:
- for plugin in enabled_plugins:
- if " " in plugin:
- continue
- if plugin not in names:
- rabbitmq_plugins.disable(plugin)
- disabled.append(plugin)
-
- for name in names:
- if name not in enabled_plugins:
- rabbitmq_plugins.enable(name)
- enabled.append(name)
- else:
- for plugin in enabled_plugins:
- if plugin in names:
- rabbitmq_plugins.disable(plugin)
- disabled.append(plugin)
-
- result['changed'] = len(enabled) > 0 or len(disabled) > 0
- result['enabled'] = enabled
- result['disabled'] = disabled
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/rabbitmq_queue.py b/test/support/integration/plugins/modules/rabbitmq_queue.py
deleted file mode 100644
index 567ec813..00000000
--- a/test/support/integration/plugins/modules/rabbitmq_queue.py
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: rabbitmq_queue
-author: Manuel Sousa (@manuel-sousa)
-version_added: "2.0"
-
-short_description: Manage rabbitMQ queues
-description:
- - This module uses rabbitMQ Rest API to create/delete queues
-requirements: [ "requests >= 1.0.0" ]
-options:
- name:
- description:
- - Name of the queue
- required: true
- state:
- description:
- - Whether the queue should be present or absent
- choices: [ "present", "absent" ]
- default: present
- durable:
- description:
- - whether queue is durable or not
- type: bool
- default: 'yes'
- auto_delete:
- description:
- - if the queue should delete itself after all queues/queues unbound from it
- type: bool
- default: 'no'
- message_ttl:
- description:
- - How long a message can live in queue before it is discarded (milliseconds)
- default: forever
- auto_expires:
- description:
- - How long a queue can be unused before it is automatically deleted (milliseconds)
- default: forever
- max_length:
- description:
- - How many messages can the queue contain before it starts rejecting
- default: no limit
- dead_letter_exchange:
- description:
- - Optional name of an exchange to which messages will be republished if they
- - are rejected or expire
- dead_letter_routing_key:
- description:
- - Optional replacement routing key to use when a message is dead-lettered.
- - Original routing key will be used if unset
- max_priority:
- description:
- - Maximum number of priority levels for the queue to support.
- - If not set, the queue will not support message priorities.
- - Larger numbers indicate higher priority.
- version_added: "2.4"
- arguments:
- description:
- - extra arguments for queue. If defined this argument is a key/value dictionary
- default: {}
-extends_documentation_fragment:
- - rabbitmq
-'''
-
-EXAMPLES = '''
-# Create a queue
-- rabbitmq_queue:
- name: myQueue
-
-# Create a queue on remote host
-- rabbitmq_queue:
- name: myRemoteQueue
- login_user: user
- login_password: secret
- login_host: remote.example.org
-'''
-
-import json
-import traceback
-
-REQUESTS_IMP_ERR = None
-try:
- import requests
- HAS_REQUESTS = True
-except ImportError:
- REQUESTS_IMP_ERR = traceback.format_exc()
- HAS_REQUESTS = False
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils.six.moves.urllib import parse as urllib_parse
-from ansible.module_utils.rabbitmq import rabbitmq_argument_spec
-
-
-def main():
-
- argument_spec = rabbitmq_argument_spec()
- argument_spec.update(
- dict(
- state=dict(default='present', choices=['present', 'absent'], type='str'),
- name=dict(required=True, type='str'),
- durable=dict(default=True, type='bool'),
- auto_delete=dict(default=False, type='bool'),
- message_ttl=dict(default=None, type='int'),
- auto_expires=dict(default=None, type='int'),
- max_length=dict(default=None, type='int'),
- dead_letter_exchange=dict(default=None, type='str'),
- dead_letter_routing_key=dict(default=None, type='str'),
- arguments=dict(default=dict(), type='dict'),
- max_priority=dict(default=None, type='int')
- )
- )
- module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
-
- url = "%s://%s:%s/api/queues/%s/%s" % (
- module.params['login_protocol'],
- module.params['login_host'],
- module.params['login_port'],
- urllib_parse.quote(module.params['vhost'], ''),
- module.params['name']
- )
-
- if not HAS_REQUESTS:
- module.fail_json(msg=missing_required_lib("requests"), exception=REQUESTS_IMP_ERR)
-
- result = dict(changed=False, name=module.params['name'])
-
- # Check if queue already exists
- r = requests.get(url, auth=(module.params['login_user'], module.params['login_password']),
- verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
-
- if r.status_code == 200:
- queue_exists = True
- response = r.json()
- elif r.status_code == 404:
- queue_exists = False
- response = r.text
- else:
- module.fail_json(
- msg="Invalid response from RESTAPI when trying to check if queue exists",
- details=r.text
- )
-
- if module.params['state'] == 'present':
- change_required = not queue_exists
- else:
- change_required = queue_exists
-
- # Check if attributes change on existing queue
- if not change_required and r.status_code == 200 and module.params['state'] == 'present':
- if not (
- response['durable'] == module.params['durable'] and
- response['auto_delete'] == module.params['auto_delete'] and
- (
- ('x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl']) or
- ('x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None)
- ) and
- (
- ('x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires']) or
- ('x-expires' not in response['arguments'] and module.params['auto_expires'] is None)
- ) and
- (
- ('x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length']) or
- ('x-max-length' not in response['arguments'] and module.params['max_length'] is None)
- ) and
- (
- ('x-dead-letter-exchange' in response['arguments'] and
- response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
- ('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
- ) and
- (
- ('x-dead-letter-routing-key' in response['arguments'] and
- response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
- ('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
- ) and
- (
- ('x-max-priority' in response['arguments'] and
- response['arguments']['x-max-priority'] == module.params['max_priority']) or
- ('x-max-priority' not in response['arguments'] and module.params['max_priority'] is None)
- )
- ):
- module.fail_json(
- msg="RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
- )
-
- # Copy parameters to arguments as used by RabbitMQ
- for k, v in {
- 'message_ttl': 'x-message-ttl',
- 'auto_expires': 'x-expires',
- 'max_length': 'x-max-length',
- 'dead_letter_exchange': 'x-dead-letter-exchange',
- 'dead_letter_routing_key': 'x-dead-letter-routing-key',
- 'max_priority': 'x-max-priority'
- }.items():
- if module.params[k] is not None:
- module.params['arguments'][v] = module.params[k]
-
- # Exit if check_mode
- if module.check_mode:
- result['changed'] = change_required
- result['details'] = response
- result['arguments'] = module.params['arguments']
- module.exit_json(**result)
-
- # Do changes
- if change_required:
- if module.params['state'] == 'present':
- r = requests.put(
- url,
- auth=(module.params['login_user'], module.params['login_password']),
- headers={"content-type": "application/json"},
- data=json.dumps({
- "durable": module.params['durable'],
- "auto_delete": module.params['auto_delete'],
- "arguments": module.params['arguments']
- }),
- verify=module.params['ca_cert'],
- cert=(module.params['client_cert'], module.params['client_key'])
- )
- elif module.params['state'] == 'absent':
- r = requests.delete(url, auth=(module.params['login_user'], module.params['login_password']),
- verify=module.params['ca_cert'], cert=(module.params['client_cert'], module.params['client_key']))
-
- # RabbitMQ 3.6.7 changed this response code from 204 to 201
- if r.status_code == 204 or r.status_code == 201:
- result['changed'] = True
- module.exit_json(**result)
- else:
- module.fail_json(
- msg="Error creating queue",
- status=r.status_code,
- details=r.text
- )
-
- else:
- module.exit_json(
- changed=False,
- name=module.params['name']
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/s3_bucket.py b/test/support/integration/plugins/modules/s3_bucket.py
deleted file mode 100644
index f35cf53b..00000000
--- a/test/support/integration/plugins/modules/s3_bucket.py
+++ /dev/null
@@ -1,740 +0,0 @@
-#!/usr/bin/python
-#
-# This is a free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This Ansible library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this library. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-
-DOCUMENTATION = '''
----
-module: s3_bucket
-short_description: Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
-description:
- - Manage S3 buckets in AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID
-version_added: "2.0"
-requirements: [ boto3 ]
-author: "Rob White (@wimnat)"
-options:
- force:
- description:
- - When trying to delete a bucket, delete all keys (including versions and delete markers)
- in the bucket first (an s3 bucket must be empty for a successful deletion)
- type: bool
- default: 'no'
- name:
- description:
- - Name of the s3 bucket
- required: true
- type: str
- policy:
- description:
- - The JSON policy as a string.
- type: json
- s3_url:
- description:
- - S3 URL endpoint for usage with DigitalOcean, Ceph, Eucalyptus and fakes3 etc.
- - Assumes AWS if not specified.
- - For Walrus, use FQDN of the endpoint without scheme nor path.
- aliases: [ S3_URL ]
- type: str
- ceph:
- description:
- - Enable API compatibility with Ceph. It takes into account the S3 API subset working
- with Ceph in order to provide the same module behaviour where possible.
- type: bool
- version_added: "2.2"
- requester_pays:
- description:
- - With Requester Pays buckets, the requester instead of the bucket owner pays the cost
- of the request and the data download from the bucket.
- type: bool
- default: False
- state:
- description:
- - Create or remove the s3 bucket
- required: false
- default: present
- choices: [ 'present', 'absent' ]
- type: str
- tags:
- description:
- - tags dict to apply to bucket
- type: dict
- purge_tags:
- description:
- - whether to remove tags that aren't present in the C(tags) parameter
- type: bool
- default: True
- version_added: "2.9"
- versioning:
- description:
- - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
- type: bool
- encryption:
- description:
- - Describes the default server-side encryption to apply to new objects in the bucket.
- In order to remove the server-side encryption, the encryption needs to be set to 'none' explicitly.
- choices: [ 'none', 'AES256', 'aws:kms' ]
- version_added: "2.9"
- type: str
- encryption_key_id:
- description: KMS master key ID to use for the default encryption. This parameter is allowed if encryption is aws:kms. If
- not specified then it will default to the AWS provided KMS key.
- version_added: "2.9"
- type: str
-extends_documentation_fragment:
- - aws
- - ec2
-notes:
- - If C(requestPayment), C(policy), C(tagging) or C(versioning)
- operations/API aren't implemented by the endpoint, module doesn't fail
- if each parameter satisfies the following condition.
- I(requester_pays) is C(False), I(policy), I(tags), and I(versioning) are C(None).
-'''
-
-EXAMPLES = '''
-# Note: These examples do not set authentication details, see the AWS Guide for details.
-
-# Create a simple s3 bucket
-- s3_bucket:
- name: mys3bucket
- state: present
-
-# Create a simple s3 bucket on Ceph Rados Gateway
-- s3_bucket:
- name: mys3bucket
- s3_url: http://your-ceph-rados-gateway-server.xxx
- ceph: true
-
-# Remove an s3 bucket and any keys it contains
-- s3_bucket:
- name: mys3bucket
- state: absent
- force: yes
-
-# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
-- s3_bucket:
- name: mys3bucket
- policy: "{{ lookup('file','policy.json') }}"
- requester_pays: yes
- versioning: yes
- tags:
- example: tag1
- another: tag2
-
-# Create a simple DigitalOcean Spaces bucket using their provided regional endpoint
-- s3_bucket:
- name: mydobucket
- s3_url: 'https://nyc3.digitaloceanspaces.com'
-
-# Create a bucket with AES256 encryption
-- s3_bucket:
- name: mys3bucket
- state: present
- encryption: "AES256"
-
-# Create a bucket with aws:kms encryption, KMS key
-- s3_bucket:
- name: mys3bucket
- state: present
- encryption: "aws:kms"
- encryption_key_id: "arn:aws:kms:us-east-1:1234/5678example"
-
-# Create a bucket with aws:kms encryption, default key
-- s3_bucket:
- name: mys3bucket
- state: present
- encryption: "aws:kms"
-'''
-
-import json
-import os
-import time
-
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.six import string_types
-from ansible.module_utils.basic import to_text
-from ansible.module_utils.aws.core import AnsibleAWSModule, is_boto3_error_code
-from ansible.module_utils.ec2 import compare_policies, ec2_argument_spec, boto3_tag_list_to_ansible_dict, ansible_dict_to_boto3_tag_list
-from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, AWSRetry
-
-try:
- from botocore.exceptions import BotoCoreError, ClientError, EndpointConnectionError, WaiterError
-except ImportError:
- pass # handled by AnsibleAWSModule
-
-
-def create_or_update_bucket(s3_client, module, location):
-
- policy = module.params.get("policy")
- name = module.params.get("name")
- requester_pays = module.params.get("requester_pays")
- tags = module.params.get("tags")
- purge_tags = module.params.get("purge_tags")
- versioning = module.params.get("versioning")
- encryption = module.params.get("encryption")
- encryption_key_id = module.params.get("encryption_key_id")
- changed = False
- result = {}
-
- try:
- bucket_is_present = bucket_exists(s3_client, name)
- except EndpointConnectionError as e:
- module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to check bucket presence")
-
- if not bucket_is_present:
- try:
- bucket_changed = create_bucket(s3_client, name, location)
- s3_client.get_waiter('bucket_exists').wait(Bucket=name)
- changed = changed or bucket_changed
- except WaiterError as e:
- module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available')
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed while creating bucket")
-
- # Versioning
- try:
- versioning_status = get_bucket_versioning(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket versioning")
- except ClientError as exp:
- if exp.response['Error']['Code'] != 'NotImplemented' or versioning is not None:
- module.fail_json_aws(exp, msg="Failed to get bucket versioning")
- else:
- if versioning is not None:
- required_versioning = None
- if versioning and versioning_status.get('Status') != "Enabled":
- required_versioning = 'Enabled'
- elif not versioning and versioning_status.get('Status') == "Enabled":
- required_versioning = 'Suspended'
-
- if required_versioning:
- try:
- put_bucket_versioning(s3_client, name, required_versioning)
- changed = True
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to update bucket versioning")
-
- versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning)
-
- # This output format is there to ensure compatibility with previous versions of the module
- result['versioning'] = {
- 'Versioning': versioning_status.get('Status', 'Disabled'),
- 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'),
- }
-
- # Requester pays
- try:
- requester_pays_status = get_bucket_request_payment(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket request payment")
- except ClientError as exp:
- if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or requester_pays:
- module.fail_json_aws(exp, msg="Failed to get bucket request payment")
- else:
- if requester_pays:
- payer = 'Requester' if requester_pays else 'BucketOwner'
- if requester_pays_status != payer:
- put_bucket_request_payment(s3_client, name, payer)
- requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False)
- if requester_pays_status is None:
- # We have seen that it happens quite a lot of times that the put request was not taken into
- # account, so we retry one more time
- put_bucket_request_payment(s3_client, name, payer)
- requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True)
- changed = True
-
- result['requester_pays'] = requester_pays
-
- # Policy
- try:
- current_policy = get_bucket_policy(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket policy")
- except ClientError as exp:
- if exp.response['Error']['Code'] != 'NotImplemented' or policy is not None:
- module.fail_json_aws(exp, msg="Failed to get bucket policy")
- else:
- if policy is not None:
- if isinstance(policy, string_types):
- policy = json.loads(policy)
-
- if not policy and current_policy:
- try:
- delete_bucket_policy(s3_client, name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket policy")
- current_policy = wait_policy_is_applied(module, s3_client, name, policy)
- changed = True
- elif compare_policies(current_policy, policy):
- try:
- put_bucket_policy(s3_client, name, policy)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to update bucket policy")
- current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=False)
- if current_policy is None:
- # As for request payement, it happens quite a lot of times that the put request was not taken into
- # account, so we retry one more time
- put_bucket_policy(s3_client, name, policy)
- current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True)
- changed = True
-
- result['policy'] = current_policy
-
- # Tags
- try:
- current_tags_dict = get_current_bucket_tags_dict(s3_client, name)
- except BotoCoreError as exp:
- module.fail_json_aws(exp, msg="Failed to get bucket tags")
- except ClientError as exp:
- if exp.response['Error']['Code'] not in ('NotImplemented', 'XNotImplemented') or tags is not None:
- module.fail_json_aws(exp, msg="Failed to get bucket tags")
- else:
- if tags is not None:
- # Tags are always returned as text
- tags = dict((to_text(k), to_text(v)) for k, v in tags.items())
- if not purge_tags:
- # Ensure existing tags that aren't updated by desired tags remain
- current_copy = current_tags_dict.copy()
- current_copy.update(tags)
- tags = current_copy
- if current_tags_dict != tags:
- if tags:
- try:
- put_bucket_tagging(s3_client, name, tags)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to update bucket tags")
- else:
- if purge_tags:
- try:
- delete_bucket_tagging(s3_client, name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket tags")
- current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags)
- changed = True
-
- result['tags'] = current_tags_dict
-
- # Encryption
- if hasattr(s3_client, "get_bucket_encryption"):
- try:
- current_encryption = get_bucket_encryption(s3_client, name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket encryption")
- elif encryption is not None:
- module.fail_json(msg="Using bucket encryption requires botocore version >= 1.7.41")
-
- if encryption is not None:
- current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None
- current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None
- if encryption == 'none' and current_encryption_algorithm is not None:
- try:
- delete_bucket_encryption(s3_client, name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket encryption")
- current_encryption = wait_encryption_is_applied(module, s3_client, name, None)
- changed = True
- elif encryption != 'none' and (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id):
- expected_encryption = {'SSEAlgorithm': encryption}
- if encryption == 'aws:kms' and encryption_key_id is not None:
- expected_encryption.update({'KMSMasterKeyID': encryption_key_id})
- try:
- put_bucket_encryption(s3_client, name, expected_encryption)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to set bucket encryption")
- current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption)
- changed = True
-
- result['encryption'] = current_encryption
-
- module.exit_json(changed=changed, name=name, **result)
-
-
-def bucket_exists(s3_client, bucket_name):
- # head_bucket appeared to be really inconsistent, so we use list_buckets instead,
- # and loop over all the buckets, even if we know it's less performant :(
- all_buckets = s3_client.list_buckets(Bucket=bucket_name)['Buckets']
- return any(bucket['Name'] == bucket_name for bucket in all_buckets)
-
-
-@AWSRetry.exponential_backoff(max_delay=120)
-def create_bucket(s3_client, bucket_name, location):
- try:
- configuration = {}
- if location not in ('us-east-1', None):
- configuration['LocationConstraint'] = location
- if len(configuration) > 0:
- s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=configuration)
- else:
- s3_client.create_bucket(Bucket=bucket_name)
- return True
- except ClientError as e:
- error_code = e.response['Error']['Code']
- if error_code == 'BucketAlreadyOwnedByYou':
- # We should never get there since we check the bucket presence before calling the create_or_update_bucket
- # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception
- return False
- else:
- raise e
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def put_bucket_tagging(s3_client, bucket_name, tags):
- s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)})
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def put_bucket_policy(s3_client, bucket_name, policy):
- s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def delete_bucket_policy(s3_client, bucket_name):
- s3_client.delete_bucket_policy(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def get_bucket_policy(s3_client, bucket_name):
- try:
- current_policy = json.loads(s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy'))
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchBucketPolicy':
- current_policy = None
- else:
- raise e
- return current_policy
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def put_bucket_request_payment(s3_client, bucket_name, payer):
- s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer})
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def get_bucket_request_payment(s3_client, bucket_name):
- return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer')
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def get_bucket_versioning(s3_client, bucket_name):
- return s3_client.get_bucket_versioning(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def put_bucket_versioning(s3_client, bucket_name, required_versioning):
- s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning})
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def get_bucket_encryption(s3_client, bucket_name):
- try:
- result = s3_client.get_bucket_encryption(Bucket=bucket_name)
- return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault')
- except ClientError as e:
- if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
- return None
- else:
- raise e
- except (IndexError, KeyError):
- return None
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def put_bucket_encryption(s3_client, bucket_name, encryption):
- server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]}
- s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def delete_bucket_tagging(s3_client, bucket_name):
- s3_client.delete_bucket_tagging(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket'])
-def delete_bucket_encryption(s3_client, bucket_name):
- s3_client.delete_bucket_encryption(Bucket=bucket_name)
-
-
-@AWSRetry.exponential_backoff(max_delay=120)
-def delete_bucket(s3_client, bucket_name):
- try:
- s3_client.delete_bucket(Bucket=bucket_name)
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchBucket':
- # This means bucket should have been in a deleting state when we checked it existence
- # We just ignore the error
- pass
- else:
- raise e
-
-
-def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True):
- for dummy in range(0, 12):
- try:
- current_policy = get_bucket_policy(s3_client, bucket_name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket policy")
-
- if compare_policies(current_policy, expected_policy):
- time.sleep(5)
- else:
- return current_policy
- if should_fail:
- module.fail_json(msg="Bucket policy failed to apply in the expected time")
- else:
- return None
-
-
-def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should_fail=True):
- for dummy in range(0, 12):
- try:
- requester_pays_status = get_bucket_request_payment(s3_client, bucket_name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket request payment")
- if requester_pays_status != expected_payer:
- time.sleep(5)
- else:
- return requester_pays_status
- if should_fail:
- module.fail_json(msg="Bucket request payment failed to apply in the expected time")
- else:
- return None
-
-
-def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encryption):
- for dummy in range(0, 12):
- try:
- encryption = get_bucket_encryption(s3_client, bucket_name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get updated encryption for bucket")
- if encryption != expected_encryption:
- time.sleep(5)
- else:
- return encryption
- module.fail_json(msg="Bucket encryption failed to apply in the expected time")
-
-
-def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioning):
- for dummy in range(0, 24):
- try:
- versioning_status = get_bucket_versioning(s3_client, bucket_name)
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to get updated versioning for bucket")
- if versioning_status.get('Status') != required_versioning:
- time.sleep(8)
- else:
- return versioning_status
- module.fail_json(msg="Bucket versioning failed to apply in the expected time")
-
-
-def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict):
- for dummy in range(0, 12):
- try:
- current_tags_dict = get_current_bucket_tags_dict(s3_client, bucket_name)
- except (ClientError, BotoCoreError) as e:
- module.fail_json_aws(e, msg="Failed to get bucket policy")
- if current_tags_dict != expected_tags_dict:
- time.sleep(5)
- else:
- return current_tags_dict
- module.fail_json(msg="Bucket tags failed to apply in the expected time")
-
-
-def get_current_bucket_tags_dict(s3_client, bucket_name):
- try:
- current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet')
- except ClientError as e:
- if e.response['Error']['Code'] == 'NoSuchTagSet':
- return {}
- raise e
-
- return boto3_tag_list_to_ansible_dict(current_tags)
-
-
-def paginated_list(s3_client, **pagination_params):
- pg = s3_client.get_paginator('list_objects_v2')
- for page in pg.paginate(**pagination_params):
- yield [data['Key'] for data in page.get('Contents', [])]
-
-
-def paginated_versions_list(s3_client, **pagination_params):
- try:
- pg = s3_client.get_paginator('list_object_versions')
- for page in pg.paginate(**pagination_params):
- # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion
- yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))]
- except is_boto3_error_code('NoSuchBucket'):
- yield []
-
-
-def destroy_bucket(s3_client, module):
-
- force = module.params.get("force")
- name = module.params.get("name")
- try:
- bucket_is_present = bucket_exists(s3_client, name)
- except EndpointConnectionError as e:
- module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e))
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to check bucket presence")
-
- if not bucket_is_present:
- module.exit_json(changed=False)
-
- if force:
- # if there are contents then we need to delete them (including versions) before we can delete the bucket
- try:
- for key_version_pairs in paginated_versions_list(s3_client, Bucket=name):
- formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs]
- for fk in formatted_keys:
- # remove VersionId from cases where they are `None` so that
- # unversioned objects are deleted using `DeleteObject`
- # rather than `DeleteObjectVersion`, improving backwards
- # compatibility with older IAM policies.
- if not fk.get('VersionId'):
- fk.pop('VersionId')
-
- if formatted_keys:
- resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys})
- if resp.get('Errors'):
- module.fail_json(
- msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format(
- ', '.join([k['Key'] for k in resp['Errors']])
- ),
- errors=resp['Errors'], response=resp
- )
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed while deleting bucket")
-
- try:
- delete_bucket(s3_client, name)
- s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60))
- except WaiterError as e:
- module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.')
- except (BotoCoreError, ClientError) as e:
- module.fail_json_aws(e, msg="Failed to delete bucket")
-
- module.exit_json(changed=True)
-
-
-def is_fakes3(s3_url):
- """ Return True if s3_url has scheme fakes3:// """
- if s3_url is not None:
- return urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
- else:
- return False
-
-
-def get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url):
- if s3_url and ceph: # TODO - test this
- ceph = urlparse(s3_url)
- params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', region=location, endpoint=s3_url, **aws_connect_kwargs)
- elif is_fakes3(s3_url):
- fakes3 = urlparse(s3_url)
- port = fakes3.port
- if fakes3.scheme == 'fakes3s':
- protocol = "https"
- if port is None:
- port = 443
- else:
- protocol = "http"
- if port is None:
- port = 80
- params = dict(module=module, conn_type='client', resource='s3', region=location,
- endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)),
- use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs)
- else:
- params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=s3_url, **aws_connect_kwargs)
- return boto3_conn(**params)
-
-
-def main():
-
- argument_spec = ec2_argument_spec()
- argument_spec.update(
- dict(
- force=dict(default=False, type='bool'),
- policy=dict(type='json'),
- name=dict(required=True),
- requester_pays=dict(default=False, type='bool'),
- s3_url=dict(aliases=['S3_URL']),
- state=dict(default='present', choices=['present', 'absent']),
- tags=dict(type='dict'),
- purge_tags=dict(type='bool', default=True),
- versioning=dict(type='bool'),
- ceph=dict(default=False, type='bool'),
- encryption=dict(choices=['none', 'AES256', 'aws:kms']),
- encryption_key_id=dict()
- )
- )
-
- module = AnsibleAWSModule(
- argument_spec=argument_spec,
- )
-
- region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
-
- if region in ('us-east-1', '', None):
- # default to US Standard region
- location = 'us-east-1'
- else:
- # Boto uses symbolic names for locations but region strings will
- # actually work fine for everything except us-east-1 (US Standard)
- location = region
-
- s3_url = module.params.get('s3_url')
- ceph = module.params.get('ceph')
-
- # allow eucarc environment variables to be used if ansible vars aren't set
- if not s3_url and 'S3_URL' in os.environ:
- s3_url = os.environ['S3_URL']
-
- if ceph and not s3_url:
- module.fail_json(msg='ceph flavour requires s3_url')
-
- # Look at s3_url and tweak connection settings
- # if connecting to Ceph RGW, Walrus or fakes3
- if s3_url:
- for key in ['validate_certs', 'security_token', 'profile_name']:
- aws_connect_kwargs.pop(key, None)
- s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, s3_url)
-
- if s3_client is None: # this should never happen
- module.fail_json(msg='Unknown error, failed to create s3 connection, no information from boto.')
-
- state = module.params.get("state")
- encryption = module.params.get("encryption")
- encryption_key_id = module.params.get("encryption_key_id")
-
- # Parameter validation
- if encryption_key_id is not None and encryption is None:
- module.fail_json(msg="You must specify encryption parameter along with encryption_key_id.")
- elif encryption_key_id is not None and encryption != 'aws:kms':
- module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.")
-
- if state == 'present':
- create_or_update_bucket(s3_client, module, location)
- elif state == 'absent':
- destroy_bucket(s3_client, module)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/selogin.py b/test/support/integration/plugins/modules/selogin.py
deleted file mode 100644
index 6429ef36..00000000
--- a/test/support/integration/plugins/modules/selogin.py
+++ /dev/null
@@ -1,260 +0,0 @@
-#!/usr/bin/python
-
-# (c) 2017, Petr Lautrbach <plautrba@redhat.com>
-# Based on seport.py module (c) 2014, Dan Keder <dan.keder@gmail.com>
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: selogin
-short_description: Manages linux user to SELinux user mapping
-description:
- - Manages linux user to SELinux user mapping
-version_added: "2.8"
-options:
- login:
- description:
- - a Linux user
- required: true
- seuser:
- description:
- - SELinux user name
- required: true
- selevel:
- aliases: [ serange ]
- description:
- - MLS/MCS Security Range (MLS/MCS Systems only) SELinux Range for SELinux login mapping defaults to the SELinux user record range.
- default: s0
- state:
- description:
- - Desired mapping value.
- required: true
- default: present
- choices: [ 'present', 'absent' ]
- reload:
- description:
- - Reload SELinux policy after commit.
- default: yes
- ignore_selinux_state:
- description:
- - Run independent of selinux runtime state
- type: bool
- default: false
-notes:
- - The changes are persistent across reboots
- - Not tested on any debian based system
-requirements: [ 'libselinux', 'policycoreutils' ]
-author:
-- Dan Keder (@dankeder)
-- Petr Lautrbach (@bachradsusi)
-- James Cassell (@jamescassell)
-'''
-
-EXAMPLES = '''
-# Modify the default user on the system to the guest_u user
-- selogin:
- login: __default__
- seuser: guest_u
- state: present
-
-# Assign gijoe user on an MLS machine a range and to the staff_u user
-- selogin:
- login: gijoe
- seuser: staff_u
- serange: SystemLow-Secret
- state: present
-
-# Assign all users in the engineering group to the staff_u user
-- selogin:
- login: '%engineering'
- seuser: staff_u
- state: present
-'''
-
-RETURN = r'''
-# Default return values
-'''
-
-
-import traceback
-
-SELINUX_IMP_ERR = None
-try:
- import selinux
- HAVE_SELINUX = True
-except ImportError:
- SELINUX_IMP_ERR = traceback.format_exc()
- HAVE_SELINUX = False
-
-SEOBJECT_IMP_ERR = None
-try:
- import seobject
- HAVE_SEOBJECT = True
-except ImportError:
- SEOBJECT_IMP_ERR = traceback.format_exc()
- HAVE_SEOBJECT = False
-
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-from ansible.module_utils._text import to_native
-
-
-def semanage_login_add(module, login, seuser, do_reload, serange='s0', sestore=''):
- """ Add linux user to SELinux user mapping
-
- :type module: AnsibleModule
- :param module: Ansible module
-
- :type login: str
- :param login: a Linux User or a Linux group if it begins with %
-
- :type seuser: str
- :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
-
- :type serange: str
- :param serange: SELinux MLS/MCS range (defaults to 's0')
-
- :type do_reload: bool
- :param do_reload: Whether to reload SELinux policy after commit
-
- :type sestore: str
- :param sestore: SELinux store
-
- :rtype: bool
- :return: True if the policy was changed, otherwise False
- """
- try:
- selogin = seobject.loginRecords(sestore)
- selogin.set_reload(do_reload)
- change = False
- all_logins = selogin.get_all()
- # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
- # for local_login in all_logins:
- if login not in all_logins.keys():
- change = True
- if not module.check_mode:
- selogin.add(login, seuser, serange)
- else:
- if all_logins[login][0] != seuser or all_logins[login][1] != serange:
- change = True
- if not module.check_mode:
- selogin.modify(login, seuser, serange)
-
- except (ValueError, KeyError, OSError, RuntimeError) as e:
- module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
-
- return change
-
-
-def semanage_login_del(module, login, seuser, do_reload, sestore=''):
- """ Delete linux user to SELinux user mapping
-
- :type module: AnsibleModule
- :param module: Ansible module
-
- :type login: str
- :param login: a Linux User or a Linux group if it begins with %
-
- :type seuser: str
- :param proto: An SELinux user ('__default__', 'unconfined_u', 'staff_u', ...), see 'semanage login -l'
-
- :type do_reload: bool
- :param do_reload: Whether to reload SELinux policy after commit
-
- :type sestore: str
- :param sestore: SELinux store
-
- :rtype: bool
- :return: True if the policy was changed, otherwise False
- """
- try:
- selogin = seobject.loginRecords(sestore)
- selogin.set_reload(do_reload)
- change = False
- all_logins = selogin.get_all()
- # module.fail_json(msg="%s: %s %s" % (all_logins, login, sestore))
- if login in all_logins.keys():
- change = True
- if not module.check_mode:
- selogin.delete(login)
-
- except (ValueError, KeyError, OSError, RuntimeError) as e:
- module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)), exception=traceback.format_exc())
-
- return change
-
-
-def get_runtime_status(ignore_selinux_state=False):
- return True if ignore_selinux_state is True else selinux.is_selinux_enabled()
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- ignore_selinux_state=dict(type='bool', default=False),
- login=dict(type='str', required=True),
- seuser=dict(type='str'),
- selevel=dict(type='str', aliases=['serange'], default='s0'),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- reload=dict(type='bool', default=True),
- ),
- required_if=[
- ["state", "present", ["seuser"]]
- ],
- supports_check_mode=True
- )
- if not HAVE_SELINUX:
- module.fail_json(msg=missing_required_lib("libselinux"), exception=SELINUX_IMP_ERR)
-
- if not HAVE_SEOBJECT:
- module.fail_json(msg=missing_required_lib("seobject from policycoreutils"), exception=SEOBJECT_IMP_ERR)
-
- ignore_selinux_state = module.params['ignore_selinux_state']
-
- if not get_runtime_status(ignore_selinux_state):
- module.fail_json(msg="SELinux is disabled on this host.")
-
- login = module.params['login']
- seuser = module.params['seuser']
- serange = module.params['selevel']
- state = module.params['state']
- do_reload = module.params['reload']
-
- result = {
- 'login': login,
- 'seuser': seuser,
- 'serange': serange,
- 'state': state,
- }
-
- if state == 'present':
- result['changed'] = semanage_login_add(module, login, seuser, do_reload, serange)
- elif state == 'absent':
- result['changed'] = semanage_login_del(module, login, seuser, do_reload)
- else:
- module.fail_json(msg='Invalid value of argument "state": {0}'.format(state))
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/test/support/integration/plugins/modules/x509_crl.py b/test/support/integration/plugins/modules/x509_crl.py
deleted file mode 100644
index 9bb83a5b..00000000
--- a/test/support/integration/plugins/modules/x509_crl.py
+++ /dev/null
@@ -1,783 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: x509_crl
-version_added: "2.10"
-short_description: Generate Certificate Revocation Lists (CRLs)
-description:
- - This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
- - Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
- or as a path to a certificate file in PEM format.
-requirements:
- - cryptography >= 1.2
-author:
- - Felix Fontein (@felixfontein)
-options:
- state:
- description:
- - Whether the CRL file should exist or not, taking action if the state is different from what is stated.
- type: str
- default: present
- choices: [ absent, present ]
-
- mode:
- description:
- - Defines how to process entries of existing CRLs.
- - If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
- as specified in I(revoked_certificates).
- - If set to C(update), makes sure that the CRL contains the revoked certificates from
- I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
- already exists, all entries from the existing CRL will also be included in the new CRL.
- When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
- type: str
- default: generate
- choices: [ generate, update ]
-
- force:
- description:
- - Should the CRL be forced to be regenerated.
- type: bool
- default: no
-
- backup:
- description:
- - Create a backup file including a timestamp so you can get the original
- CRL back if you overwrote it with a new one by accident.
- type: bool
- default: no
-
- path:
- description:
- - Remote absolute path where the generated CRL file should be created or is already located.
- type: path
- required: yes
-
- privatekey_path:
- description:
- - Path to the CA's private key to use when signing the CRL.
- - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
- type: path
-
- privatekey_content:
- description:
- - The content of the CA's private key to use when signing the CRL.
- - Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
- type: str
-
- privatekey_passphrase:
- description:
- - The passphrase for the I(privatekey_path).
- - This is required if the private key is password protected.
- type: str
-
- issuer:
- description:
- - Key/value pairs that will be present in the issuer name field of the CRL.
- - If you need to specify more than one value with the same key, use a list as value.
- - Required if I(state) is C(present).
- type: dict
-
- last_update:
- description:
- - The point in time from which this CRL can be trusted.
- - Time can be specified either as relative time or as absolute timestamp.
- - Time will always be interpreted as UTC.
- - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
- + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- - Note that if using relative time this module is NOT idempotent, except when
- I(ignore_timestamps) is set to C(yes).
- type: str
- default: "+0s"
-
- next_update:
- description:
- - "The absolute latest point in time by which this I(issuer) is expected to have issued
- another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
- - Time can be specified either as relative time or as absolute timestamp.
- - Time will always be interpreted as UTC.
- - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
- + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- - Note that if using relative time this module is NOT idempotent, except when
- I(ignore_timestamps) is set to C(yes).
- - Required if I(state) is C(present).
- type: str
-
- digest:
- description:
- - Digest algorithm to be used when signing the CRL.
- type: str
- default: sha256
-
- revoked_certificates:
- description:
- - List of certificates to be revoked.
- - Required if I(state) is C(present).
- type: list
- elements: dict
- suboptions:
- path:
- description:
- - Path to a certificate in PEM format.
- - The serial number and issuer will be extracted from the certificate.
- - Mutually exclusive with I(content) and I(serial_number). One of these three options
- must be specified.
- type: path
- content:
- description:
- - Content of a certificate in PEM format.
- - The serial number and issuer will be extracted from the certificate.
- - Mutually exclusive with I(path) and I(serial_number). One of these three options
- must be specified.
- type: str
- serial_number:
- description:
- - Serial number of the certificate.
- - Mutually exclusive with I(path) and I(content). One of these three options must
- be specified.
- type: int
- revocation_date:
- description:
- - The point in time the certificate was revoked.
- - Time can be specified either as relative time or as absolute timestamp.
- - Time will always be interpreted as UTC.
- - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
- + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- - Note that if using relative time this module is NOT idempotent, except when
- I(ignore_timestamps) is set to C(yes).
- type: str
- default: "+0s"
- issuer:
- description:
- - The certificate's issuer.
- - "Example: C(DNS:ca.example.org)"
- type: list
- elements: str
- issuer_critical:
- description:
- - Whether the certificate issuer extension should be critical.
- type: bool
- default: no
- reason:
- description:
- - The value for the revocation reason extension.
- type: str
- choices:
- - unspecified
- - key_compromise
- - ca_compromise
- - affiliation_changed
- - superseded
- - cessation_of_operation
- - certificate_hold
- - privilege_withdrawn
- - aa_compromise
- - remove_from_crl
- reason_critical:
- description:
- - Whether the revocation reason extension should be critical.
- type: bool
- default: no
- invalidity_date:
- description:
- - The point in time it was known/suspected that the private key was compromised
- or that the certificate otherwise became invalid.
- - Time can be specified either as relative time or as absolute timestamp.
- - Time will always be interpreted as UTC.
- - Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
- + C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- - Note that if using relative time this module is NOT idempotent. This will NOT
- change when I(ignore_timestamps) is set to C(yes).
- type: str
- invalidity_date_critical:
- description:
- - Whether the invalidity date extension should be critical.
- type: bool
- default: no
-
- ignore_timestamps:
- description:
- - Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
- I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
- I(invalidity_date) in I(revoked_certificates) will never be ignored.
- - Use this in combination with relative timestamps for these values to get idempotency.
- type: bool
- default: no
-
- return_content:
- description:
- - If set to C(yes), will return the (current or generated) CRL's content as I(crl).
- type: bool
- default: no
-
-extends_documentation_fragment:
- - files
-
-notes:
- - All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- - Date specified should be UTC. Minutes and seconds are mandatory.
-'''
-
-EXAMPLES = r'''
-- name: Generate a CRL
- x509_crl:
- path: /etc/ssl/my-ca.crl
- privatekey_path: /etc/ssl/private/my-ca.pem
- issuer:
- CN: My CA
- last_update: "+0s"
- next_update: "+7d"
- revoked_certificates:
- - serial_number: 1234
- revocation_date: 20190331202428Z
- issuer:
- CN: My CA
- - serial_number: 2345
- revocation_date: 20191013152910Z
- reason: affiliation_changed
- invalidity_date: 20191001000000Z
- - path: /etc/ssl/crt/revoked-cert.pem
- revocation_date: 20191010010203Z
-'''
-
-RETURN = r'''
-filename:
- description: Path to the generated CRL
- returned: changed or success
- type: str
- sample: /path/to/my-ca.crl
-backup_file:
- description: Name of backup file created.
- returned: changed and if I(backup) is C(yes)
- type: str
- sample: /path/to/my-ca.crl.2019-03-09@11:22~
-privatekey:
- description: Path to the private CA key
- returned: changed or success
- type: str
- sample: /path/to/my-ca.pem
-issuer:
- description:
- - The CRL's issuer.
- - Note that for repeated values, only the last one will be returned.
- returned: success
- type: dict
- sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
-issuer_ordered:
- description: The CRL's issuer as an ordered list of tuples.
- returned: success
- type: list
- elements: list
- sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
-last_update:
- description: The point in time from which this CRL can be trusted as ASN.1 TIME.
- returned: success
- type: str
- sample: 20190413202428Z
-next_update:
- description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
- returned: success
- type: str
- sample: 20190413202428Z
-digest:
- description: The signature algorithm used to sign the CRL.
- returned: success
- type: str
- sample: sha256WithRSAEncryption
-revoked_certificates:
- description: List of certificates to be revoked.
- returned: success
- type: list
- elements: dict
- contains:
- serial_number:
- description: Serial number of the certificate.
- type: int
- sample: 1234
- revocation_date:
- description: The point in time the certificate was revoked as ASN.1 TIME.
- type: str
- sample: 20190413202428Z
- issuer:
- description: The certificate's issuer.
- type: list
- elements: str
- sample: '["DNS:ca.example.org"]'
- issuer_critical:
- description: Whether the certificate issuer extension is critical.
- type: bool
- sample: no
- reason:
- description:
- - The value for the revocation reason extension.
- - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
- C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
- C(remove_from_crl).
- type: str
- sample: key_compromise
- reason_critical:
- description: Whether the revocation reason extension is critical.
- type: bool
- sample: no
- invalidity_date:
- description: |
- The point in time it was known/suspected that the private key was compromised
- or that the certificate otherwise became invalid as ASN.1 TIME.
- type: str
- sample: 20190413202428Z
- invalidity_date_critical:
- description: Whether the invalidity date extension is critical.
- type: bool
- sample: no
-crl:
- description: The (current or generated) CRL's content.
- returned: if I(state) is C(present) and I(return_content) is C(yes)
- type: str
-'''
-
-
-import os
-import traceback
-from ansible.module_utils.compat.version import LooseVersion
-
-from ansible.module_utils import crypto as crypto_utils
-from ansible.module_utils._text import to_native, to_text
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
-
-CRYPTOGRAPHY_IMP_ERR = None
-try:
- import cryptography
- from cryptography import x509
- from cryptography.hazmat.backends import default_backend
- from cryptography.hazmat.primitives.serialization import Encoding
- from cryptography.x509 import (
- CertificateRevocationListBuilder,
- RevokedCertificateBuilder,
- NameAttribute,
- Name,
- )
- CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
-except ImportError:
- CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
- CRYPTOGRAPHY_FOUND = False
-else:
- CRYPTOGRAPHY_FOUND = True
-
-
-TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
-
-
-class CRLError(crypto_utils.OpenSSLObjectError):
- pass
-
-
-class CRL(crypto_utils.OpenSSLObject):
-
- def __init__(self, module):
- super(CRL, self).__init__(
- module.params['path'],
- module.params['state'],
- module.params['force'],
- module.check_mode
- )
-
- self.update = module.params['mode'] == 'update'
- self.ignore_timestamps = module.params['ignore_timestamps']
- self.return_content = module.params['return_content']
- self.crl_content = None
-
- self.privatekey_path = module.params['privatekey_path']
- self.privatekey_content = module.params['privatekey_content']
- if self.privatekey_content is not None:
- self.privatekey_content = self.privatekey_content.encode('utf-8')
- self.privatekey_passphrase = module.params['privatekey_passphrase']
-
- self.issuer = crypto_utils.parse_name_field(module.params['issuer'])
- self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
-
- self.last_update = crypto_utils.get_relative_time_option(module.params['last_update'], 'last_update')
- self.next_update = crypto_utils.get_relative_time_option(module.params['next_update'], 'next_update')
-
- self.digest = crypto_utils.select_message_digest(module.params['digest'])
- if self.digest is None:
- raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
-
- self.revoked_certificates = []
- for i, rc in enumerate(module.params['revoked_certificates']):
- result = {
- 'serial_number': None,
- 'revocation_date': None,
- 'issuer': None,
- 'issuer_critical': False,
- 'reason': None,
- 'reason_critical': False,
- 'invalidity_date': None,
- 'invalidity_date_critical': False,
- }
- path_prefix = 'revoked_certificates[{0}].'.format(i)
- if rc['path'] is not None or rc['content'] is not None:
- # Load certificate from file or content
- try:
- if rc['content'] is not None:
- rc['content'] = rc['content'].encode('utf-8')
- cert = crypto_utils.load_certificate(rc['path'], content=rc['content'], backend='cryptography')
- try:
- result['serial_number'] = cert.serial_number
- except AttributeError:
- # The property was called "serial" before cryptography 1.4
- result['serial_number'] = cert.serial
- except crypto_utils.OpenSSLObjectError as e:
- if rc['content'] is not None:
- module.fail_json(
- msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
- )
- else:
- module.fail_json(
- msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
- )
- else:
- # Specify serial_number (and potentially issuer) directly
- result['serial_number'] = rc['serial_number']
- # All other options
- if rc['issuer']:
- result['issuer'] = [crypto_utils.cryptography_get_name(issuer) for issuer in rc['issuer']]
- result['issuer_critical'] = rc['issuer_critical']
- result['revocation_date'] = crypto_utils.get_relative_time_option(
- rc['revocation_date'],
- path_prefix + 'revocation_date'
- )
- if rc['reason']:
- result['reason'] = crypto_utils.REVOCATION_REASON_MAP[rc['reason']]
- result['reason_critical'] = rc['reason_critical']
- if rc['invalidity_date']:
- result['invalidity_date'] = crypto_utils.get_relative_time_option(
- rc['invalidity_date'],
- path_prefix + 'invalidity_date'
- )
- result['invalidity_date_critical'] = rc['invalidity_date_critical']
- self.revoked_certificates.append(result)
-
- self.module = module
-
- self.backup = module.params['backup']
- self.backup_file = None
-
- try:
- self.privatekey = crypto_utils.load_privatekey(
- path=self.privatekey_path,
- content=self.privatekey_content,
- passphrase=self.privatekey_passphrase,
- backend='cryptography'
- )
- except crypto_utils.OpenSSLBadPassphraseError as exc:
- raise CRLError(exc)
-
- self.crl = None
- try:
- with open(self.path, 'rb') as f:
- data = f.read()
- self.crl = x509.load_pem_x509_crl(data, default_backend())
- if self.return_content:
- self.crl_content = data
- except Exception as dummy:
- self.crl_content = None
-
- def remove(self):
- if self.backup:
- self.backup_file = self.module.backup_local(self.path)
- super(CRL, self).remove(self.module)
-
- def _compress_entry(self, entry):
- if self.ignore_timestamps:
- # Throw out revocation_date
- return (
- entry['serial_number'],
- tuple(entry['issuer']) if entry['issuer'] is not None else None,
- entry['issuer_critical'],
- entry['reason'],
- entry['reason_critical'],
- entry['invalidity_date'],
- entry['invalidity_date_critical'],
- )
- else:
- return (
- entry['serial_number'],
- entry['revocation_date'],
- tuple(entry['issuer']) if entry['issuer'] is not None else None,
- entry['issuer_critical'],
- entry['reason'],
- entry['reason_critical'],
- entry['invalidity_date'],
- entry['invalidity_date_critical'],
- )
-
- def check(self, perms_required=True):
- """Ensure the resource is in its desired state."""
-
- state_and_perms = super(CRL, self).check(self.module, perms_required)
-
- if not state_and_perms:
- return False
-
- if self.crl is None:
- return False
-
- if self.last_update != self.crl.last_update and not self.ignore_timestamps:
- return False
- if self.next_update != self.crl.next_update and not self.ignore_timestamps:
- return False
- if self.digest.name != self.crl.signature_hash_algorithm.name:
- return False
-
- want_issuer = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
- if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
- return False
-
- old_entries = [self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
- new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
- if self.update:
- # We don't simply use a set so that duplicate entries are treated correctly
- for entry in new_entries:
- try:
- old_entries.remove(entry)
- except ValueError:
- return False
- else:
- if old_entries != new_entries:
- return False
-
- return True
-
- def _generate_crl(self):
- backend = default_backend()
- crl = CertificateRevocationListBuilder()
-
- try:
- crl = crl.issuer_name(Name([
- NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1]))
- for entry in self.issuer
- ]))
- except ValueError as e:
- raise CRLError(e)
-
- crl = crl.last_update(self.last_update)
- crl = crl.next_update(self.next_update)
-
- if self.update and self.crl:
- new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
- for entry in self.crl:
- decoded_entry = self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(entry))
- if decoded_entry not in new_entries:
- crl = crl.add_revoked_certificate(entry)
- for entry in self.revoked_certificates:
- revoked_cert = RevokedCertificateBuilder()
- revoked_cert = revoked_cert.serial_number(entry['serial_number'])
- revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
- if entry['issuer'] is not None:
- revoked_cert = revoked_cert.add_extension(
- x509.CertificateIssuer([
- crypto_utils.cryptography_get_name(name) for name in self.entry['issuer']
- ]),
- entry['issuer_critical']
- )
- if entry['reason'] is not None:
- revoked_cert = revoked_cert.add_extension(
- x509.CRLReason(entry['reason']),
- entry['reason_critical']
- )
- if entry['invalidity_date'] is not None:
- revoked_cert = revoked_cert.add_extension(
- x509.InvalidityDate(entry['invalidity_date']),
- entry['invalidity_date_critical']
- )
- crl = crl.add_revoked_certificate(revoked_cert.build(backend))
-
- self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
- return self.crl.public_bytes(Encoding.PEM)
-
- def generate(self):
- if not self.check(perms_required=False) or self.force:
- result = self._generate_crl()
- if self.return_content:
- self.crl_content = result
- if self.backup:
- self.backup_file = self.module.backup_local(self.path)
- crypto_utils.write_file(self.module, result)
- self.changed = True
-
- file_args = self.module.load_file_common_arguments(self.module.params)
- if self.module.set_fs_attributes_if_different(file_args, False):
- self.changed = True
-
- def _dump_revoked(self, entry):
- return {
- 'serial_number': entry['serial_number'],
- 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
- 'issuer':
- [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
- if entry['issuer'] is not None else None,
- 'issuer_critical': entry['issuer_critical'],
- 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
- 'reason_critical': entry['reason_critical'],
- 'invalidity_date':
- entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
- if entry['invalidity_date'] is not None else None,
- 'invalidity_date_critical': entry['invalidity_date_critical'],
- }
-
- def dump(self, check_mode=False):
- result = {
- 'changed': self.changed,
- 'filename': self.path,
- 'privatekey': self.privatekey_path,
- 'last_update': None,
- 'next_update': None,
- 'digest': None,
- 'issuer_ordered': None,
- 'issuer': None,
- 'revoked_certificates': [],
- }
- if self.backup_file:
- result['backup_file'] = self.backup_file
-
- if check_mode:
- result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
- result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
- # result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
- result['digest'] = self.module.params['digest']
- result['issuer_ordered'] = self.issuer
- result['issuer'] = {}
- for k, v in self.issuer:
- result['issuer'][k] = v
- result['revoked_certificates'] = []
- for entry in self.revoked_certificates:
- result['revoked_certificates'].append(self._dump_revoked(entry))
- elif self.crl:
- result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
- result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
- try:
- result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
- except AttributeError:
- # Older cryptography versions don't have signature_algorithm_oid yet
- dotted = crypto_utils._obj2txt(
- self.crl._backend._lib,
- self.crl._backend._ffi,
- self.crl._x509_crl.sig_alg.algorithm
- )
- oid = x509.oid.ObjectIdentifier(dotted)
- result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
- issuer = []
- for attribute in self.crl.issuer:
- issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
- result['issuer_ordered'] = issuer
- result['issuer'] = {}
- for k, v in issuer:
- result['issuer'][k] = v
- result['revoked_certificates'] = []
- for cert in self.crl:
- entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
- result['revoked_certificates'].append(self._dump_revoked(entry))
-
- if self.return_content:
- result['crl'] = self.crl_content
-
- return result
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- state=dict(type='str', default='present', choices=['present', 'absent']),
- mode=dict(type='str', default='generate', choices=['generate', 'update']),
- force=dict(type='bool', default=False),
- backup=dict(type='bool', default=False),
- path=dict(type='path', required=True),
- privatekey_path=dict(type='path'),
- privatekey_content=dict(type='str'),
- privatekey_passphrase=dict(type='str', no_log=True),
- issuer=dict(type='dict'),
- last_update=dict(type='str', default='+0s'),
- next_update=dict(type='str'),
- digest=dict(type='str', default='sha256'),
- ignore_timestamps=dict(type='bool', default=False),
- return_content=dict(type='bool', default=False),
- revoked_certificates=dict(
- type='list',
- elements='dict',
- options=dict(
- path=dict(type='path'),
- content=dict(type='str'),
- serial_number=dict(type='int'),
- revocation_date=dict(type='str', default='+0s'),
- issuer=dict(type='list', elements='str'),
- issuer_critical=dict(type='bool', default=False),
- reason=dict(
- type='str',
- choices=[
- 'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
- 'superseded', 'cessation_of_operation', 'certificate_hold',
- 'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
- ]
- ),
- reason_critical=dict(type='bool', default=False),
- invalidity_date=dict(type='str'),
- invalidity_date_critical=dict(type='bool', default=False),
- ),
- required_one_of=[['path', 'content', 'serial_number']],
- mutually_exclusive=[['path', 'content', 'serial_number']],
- ),
- ),
- required_if=[
- ('state', 'present', ['privatekey_path', 'privatekey_content'], True),
- ('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
- ],
- mutually_exclusive=(
- ['privatekey_path', 'privatekey_content'],
- ),
- supports_check_mode=True,
- add_file_common_args=True,
- )
-
- if not CRYPTOGRAPHY_FOUND:
- module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
- exception=CRYPTOGRAPHY_IMP_ERR)
-
- try:
- crl = CRL(module)
-
- if module.params['state'] == 'present':
- if module.check_mode:
- result = crl.dump(check_mode=True)
- result['changed'] = module.params['force'] or not crl.check()
- module.exit_json(**result)
-
- crl.generate()
- else:
- if module.check_mode:
- result = crl.dump(check_mode=True)
- result['changed'] = os.path.exists(module.params['path'])
- module.exit_json(**result)
-
- crl.remove()
-
- result = crl.dump()
- module.exit_json(**result)
- except crypto_utils.OpenSSLObjectError as exc:
- module.fail_json(msg=to_native(exc))
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/support/integration/plugins/modules/x509_crl_info.py b/test/support/integration/plugins/modules/x509_crl_info.py
deleted file mode 100644
index b6d36320..00000000
--- a/test/support/integration/plugins/modules/x509_crl_info.py
+++ /dev/null
@@ -1,281 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020, Felix Fontein <felix@fontein.de>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: x509_crl_info
-version_added: "2.10"
-short_description: Retrieve information on Certificate Revocation Lists (CRLs)
-description:
- - This module allows one to retrieve information on Certificate Revocation Lists (CRLs).
-requirements:
- - cryptography >= 1.2
-author:
- - Felix Fontein (@felixfontein)
-options:
- path:
- description:
- - Remote absolute path where the generated CRL file should be created or is already located.
- - Either I(path) or I(content) must be specified, but not both.
- type: path
- content:
- description:
- - Content of the X.509 certificate in PEM format.
- - Either I(path) or I(content) must be specified, but not both.
- type: str
-
-notes:
- - All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern.
- They are all in UTC.
-seealso:
- - module: x509_crl
-'''
-
-EXAMPLES = r'''
-- name: Get information on CRL
- x509_crl_info:
- path: /etc/ssl/my-ca.crl
- register: result
-
-- debug:
- msg: "{{ result }}"
-'''
-
-RETURN = r'''
-issuer:
- description:
- - The CRL's issuer.
- - Note that for repeated values, only the last one will be returned.
- returned: success
- type: dict
- sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
-issuer_ordered:
- description: The CRL's issuer as an ordered list of tuples.
- returned: success
- type: list
- elements: list
- sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
-last_update:
- description: The point in time from which this CRL can be trusted as ASN.1 TIME.
- returned: success
- type: str
- sample: 20190413202428Z
-next_update:
- description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
- returned: success
- type: str
- sample: 20190413202428Z
-digest:
- description: The signature algorithm used to sign the CRL.
- returned: success
- type: str
- sample: sha256WithRSAEncryption
-revoked_certificates:
- description: List of certificates to be revoked.
- returned: success
- type: list
- elements: dict
- contains:
- serial_number:
- description: Serial number of the certificate.
- type: int
- sample: 1234
- revocation_date:
- description: The point in time the certificate was revoked as ASN.1 TIME.
- type: str
- sample: 20190413202428Z
- issuer:
- description: The certificate's issuer.
- type: list
- elements: str
- sample: '["DNS:ca.example.org"]'
- issuer_critical:
- description: Whether the certificate issuer extension is critical.
- type: bool
- sample: no
- reason:
- description:
- - The value for the revocation reason extension.
- - One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
- C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
- C(remove_from_crl).
- type: str
- sample: key_compromise
- reason_critical:
- description: Whether the revocation reason extension is critical.
- type: bool
- sample: no
- invalidity_date:
- description: |
- The point in time it was known/suspected that the private key was compromised
- or that the certificate otherwise became invalid as ASN.1 TIME.
- type: str
- sample: 20190413202428Z
- invalidity_date_critical:
- description: Whether the invalidity date extension is critical.
- type: bool
- sample: no
-'''
-
-
-import traceback
-from ansible.module_utils.compat.version import LooseVersion
-
-from ansible.module_utils import crypto as crypto_utils
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-
-MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
-
-CRYPTOGRAPHY_IMP_ERR = None
-try:
- import cryptography
- from cryptography import x509
- from cryptography.hazmat.backends import default_backend
- CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
-except ImportError:
- CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
- CRYPTOGRAPHY_FOUND = False
-else:
- CRYPTOGRAPHY_FOUND = True
-
-
-TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
-
-
-class CRLError(crypto_utils.OpenSSLObjectError):
- pass
-
-
-class CRLInfo(crypto_utils.OpenSSLObject):
- """The main module implementation."""
-
- def __init__(self, module):
- super(CRLInfo, self).__init__(
- module.params['path'] or '',
- 'present',
- False,
- module.check_mode
- )
-
- self.content = module.params['content']
-
- self.module = module
-
- self.crl = None
- if self.content is None:
- try:
- with open(self.path, 'rb') as f:
- data = f.read()
- except Exception as e:
- self.module.fail_json(msg='Error while reading CRL file from disk: {0}'.format(e))
- else:
- data = self.content.encode('utf-8')
-
- try:
- self.crl = x509.load_pem_x509_crl(data, default_backend())
- except Exception as e:
- self.module.fail_json(msg='Error while decoding CRL: {0}'.format(e))
-
- def _dump_revoked(self, entry):
- return {
- 'serial_number': entry['serial_number'],
- 'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
- 'issuer':
- [crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
- if entry['issuer'] is not None else None,
- 'issuer_critical': entry['issuer_critical'],
- 'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
- 'reason_critical': entry['reason_critical'],
- 'invalidity_date':
- entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
- if entry['invalidity_date'] is not None else None,
- 'invalidity_date_critical': entry['invalidity_date_critical'],
- }
-
- def get_info(self):
- result = {
- 'changed': False,
- 'last_update': None,
- 'next_update': None,
- 'digest': None,
- 'issuer_ordered': None,
- 'issuer': None,
- 'revoked_certificates': [],
- }
-
- result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
- result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
- try:
- result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
- except AttributeError:
- # Older cryptography versions don't have signature_algorithm_oid yet
- dotted = crypto_utils._obj2txt(
- self.crl._backend._lib,
- self.crl._backend._ffi,
- self.crl._x509_crl.sig_alg.algorithm
- )
- oid = x509.oid.ObjectIdentifier(dotted)
- result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
- issuer = []
- for attribute in self.crl.issuer:
- issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
- result['issuer_ordered'] = issuer
- result['issuer'] = {}
- for k, v in issuer:
- result['issuer'][k] = v
- result['revoked_certificates'] = []
- for cert in self.crl:
- entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
- result['revoked_certificates'].append(self._dump_revoked(entry))
-
- return result
-
- def generate(self):
- # Empty method because crypto_utils.OpenSSLObject wants this
- pass
-
- def dump(self):
- # Empty method because crypto_utils.OpenSSLObject wants this
- pass
-
-
-def main():
- module = AnsibleModule(
- argument_spec=dict(
- path=dict(type='path'),
- content=dict(type='str'),
- ),
- required_one_of=(
- ['path', 'content'],
- ),
- mutually_exclusive=(
- ['path', 'content'],
- ),
- supports_check_mode=True,
- )
-
- if not CRYPTOGRAPHY_FOUND:
- module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
- exception=CRYPTOGRAPHY_IMP_ERR)
-
- try:
- crl = CRLInfo(module)
- result = crl.get_info()
- module.exit_json(**result)
- except crypto_utils.OpenSSLObjectError as e:
- module.fail_json(msg=to_native(e))
-
-
-if __name__ == "__main__":
- main()
diff --git a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py
index f99e6e76..72d6c868 100644
--- a/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py
+++ b/test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/filter/network.py
@@ -26,6 +26,7 @@ import os
import traceback
import string
+from collections.abc import Mapping
from xml.etree.ElementTree import fromstring
from ansible.module_utils._text import to_native, to_text
@@ -33,7 +34,6 @@ from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.u
Template,
)
from ansible.module_utils.six import iteritems, string_types
-from ansible.module_utils.common._collections_compat import Mapping
from ansible.errors import AnsibleError, AnsibleFilterError
from ansible.utils.display import Display
from ansible.utils.encrypt import passlib_or_crypt, random_password
diff --git a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py
index 8a390034..feba971a 100644
--- a/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py
+++ b/test/support/network-integration/collections/ansible_collections/cisco/ios/plugins/cliconf/ios.py
@@ -35,9 +35,10 @@ import re
import time
import json
+from collections.abc import Mapping
+
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
-from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
diff --git a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py
index 30336031..3212615f 100644
--- a/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py
+++ b/test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/cliconf/vyos.py
@@ -34,9 +34,10 @@ version_added: "2.4"
import re
import json
+from collections.abc import Mapping
+
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text
-from ansible.module_utils.common._collections_compat import Mapping
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import (
NetworkConfig,
)
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py
index 0364d766..adb918be 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/action/win_copy.py
@@ -1 +1,522 @@
-../../../../../../plugins/action/win_copy.py \ No newline at end of file
+# This file is part of Ansible
+
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import os
+import os.path
+import shutil
+import tempfile
+import traceback
+import zipfile
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.plugins.action import ActionBase
+from ansible.utils.hashing import checksum
+
+
+def _walk_dirs(topdir, loader, decrypt=True, base_path=None, local_follow=False, trailing_slash_detector=None, checksum_check=False):
+ """
+ Walk a filesystem tree returning enough information to copy the files.
+ This is similar to the _walk_dirs function in ``copy.py`` but returns
+ a dict instead of a tuple for each entry and includes the checksum of
+ a local file if wanted.
+
+ :arg topdir: The directory that the filesystem tree is rooted at
+ :arg loader: The self._loader object from ActionBase
+ :kwarg decrypt: Whether to decrypt a file encrypted with ansible-vault
+ :kwarg base_path: The initial directory structure to strip off of the
+ files for the destination directory. If this is None (the default),
+ the base_path is set to ``top_dir``.
+ :kwarg local_follow: Whether to follow symlinks on the source. When set
+ to False, no symlinks are dereferenced. When set to True (the
+ default), the code will dereference most symlinks. However, symlinks
+ can still be present if needed to break a circular link.
+ :kwarg trailing_slash_detector: Function to determine if a path has
+ a trailing directory separator. Only needed when dealing with paths on
+ a remote machine (in which case, pass in a function that is aware of the
+ directory separator conventions on the remote machine).
+ :kawrg whether to get the checksum of the local file and add to the dict
+ :returns: dictionary of dictionaries. All of the path elements in the structure are text string.
+ This separates all the files, directories, and symlinks along with
+ import information about each::
+
+ {
+ 'files'; [{
+ src: '/absolute/path/to/copy/from',
+ dest: 'relative/path/to/copy/to',
+ checksum: 'b54ba7f5621240d403f06815f7246006ef8c7d43'
+ }, ...],
+ 'directories'; [{
+ src: '/absolute/path/to/copy/from',
+ dest: 'relative/path/to/copy/to'
+ }, ...],
+ 'symlinks'; [{
+ src: '/symlink/target/path',
+ dest: 'relative/path/to/copy/to'
+ }, ...],
+
+ }
+
+ The ``symlinks`` field is only populated if ``local_follow`` is set to False
+ *or* a circular symlink cannot be dereferenced. The ``checksum`` entry is set
+ to None if checksum_check=False.
+
+ """
+ # Convert the path segments into byte strings
+
+ r_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ def _recurse(topdir, rel_offset, parent_dirs, rel_base=u'', checksum_check=False):
+ """
+ This is a closure (function utilizing variables from it's parent
+ function's scope) so that we only need one copy of all the containers.
+ Note that this function uses side effects (See the Variables used from
+ outer scope).
+
+ :arg topdir: The directory we are walking for files
+ :arg rel_offset: Integer defining how many characters to strip off of
+ the beginning of a path
+ :arg parent_dirs: Directories that we're copying that this directory is in.
+ :kwarg rel_base: String to prepend to the path after ``rel_offset`` is
+ applied to form the relative path.
+
+ Variables used from the outer scope
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :r_files: Dictionary of files in the hierarchy. See the return value
+ for :func:`walk` for the structure of this dictionary.
+ :local_follow: Read-only inside of :func:`_recurse`. Whether to follow symlinks
+ """
+ for base_path, sub_folders, files in os.walk(topdir):
+ for filename in files:
+ filepath = os.path.join(base_path, filename)
+ dest_filepath = os.path.join(rel_base, filepath[rel_offset:])
+
+ if os.path.islink(filepath):
+ # Dereference the symlnk
+ real_file = loader.get_real_file(os.path.realpath(filepath), decrypt=decrypt)
+ if local_follow and os.path.isfile(real_file):
+ # Add the file pointed to by the symlink
+ r_files['files'].append(
+ {
+ "src": real_file,
+ "dest": dest_filepath,
+ "checksum": _get_local_checksum(checksum_check, real_file)
+ }
+ )
+ else:
+ # Mark this file as a symlink to copy
+ r_files['symlinks'].append({"src": os.readlink(filepath), "dest": dest_filepath})
+ else:
+ # Just a normal file
+ real_file = loader.get_real_file(filepath, decrypt=decrypt)
+ r_files['files'].append(
+ {
+ "src": real_file,
+ "dest": dest_filepath,
+ "checksum": _get_local_checksum(checksum_check, real_file)
+ }
+ )
+
+ for dirname in sub_folders:
+ dirpath = os.path.join(base_path, dirname)
+ dest_dirpath = os.path.join(rel_base, dirpath[rel_offset:])
+ real_dir = os.path.realpath(dirpath)
+ dir_stats = os.stat(real_dir)
+
+ if os.path.islink(dirpath):
+ if local_follow:
+ if (dir_stats.st_dev, dir_stats.st_ino) in parent_dirs:
+ # Just insert the symlink if the target directory
+ # exists inside of the copy already
+ r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
+ else:
+ # Walk the dirpath to find all parent directories.
+ new_parents = set()
+ parent_dir_list = os.path.dirname(dirpath).split(os.path.sep)
+ for parent in range(len(parent_dir_list), 0, -1):
+ parent_stat = os.stat(u'/'.join(parent_dir_list[:parent]))
+ if (parent_stat.st_dev, parent_stat.st_ino) in parent_dirs:
+ # Reached the point at which the directory
+ # tree is already known. Don't add any
+ # more or we might go to an ancestor that
+ # isn't being copied.
+ break
+ new_parents.add((parent_stat.st_dev, parent_stat.st_ino))
+
+ if (dir_stats.st_dev, dir_stats.st_ino) in new_parents:
+ # This was a a circular symlink. So add it as
+ # a symlink
+ r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
+ else:
+ # Walk the directory pointed to by the symlink
+ r_files['directories'].append({"src": real_dir, "dest": dest_dirpath})
+ offset = len(real_dir) + 1
+ _recurse(real_dir, offset, parent_dirs.union(new_parents),
+ rel_base=dest_dirpath,
+ checksum_check=checksum_check)
+ else:
+ # Add the symlink to the destination
+ r_files['symlinks'].append({"src": os.readlink(dirpath), "dest": dest_dirpath})
+ else:
+ # Just a normal directory
+ r_files['directories'].append({"src": dirpath, "dest": dest_dirpath})
+
+ # Check if the source ends with a "/" so that we know which directory
+ # level to work at (similar to rsync)
+ source_trailing_slash = False
+ if trailing_slash_detector:
+ source_trailing_slash = trailing_slash_detector(topdir)
+ else:
+ source_trailing_slash = topdir.endswith(os.path.sep)
+
+ # Calculate the offset needed to strip the base_path to make relative
+ # paths
+ if base_path is None:
+ base_path = topdir
+ if not source_trailing_slash:
+ base_path = os.path.dirname(base_path)
+ if topdir.startswith(base_path):
+ offset = len(base_path)
+
+ # Make sure we're making the new paths relative
+ if trailing_slash_detector and not trailing_slash_detector(base_path):
+ offset += 1
+ elif not base_path.endswith(os.path.sep):
+ offset += 1
+
+ if os.path.islink(topdir) and not local_follow:
+ r_files['symlinks'] = {"src": os.readlink(topdir), "dest": os.path.basename(topdir)}
+ return r_files
+
+ dir_stats = os.stat(topdir)
+ parents = frozenset(((dir_stats.st_dev, dir_stats.st_ino),))
+ # Actually walk the directory hierarchy
+ _recurse(topdir, offset, parents, checksum_check=checksum_check)
+
+ return r_files
+
+
+def _get_local_checksum(get_checksum, local_path):
+ if get_checksum:
+ return checksum(local_path)
+ else:
+ return None
+
+
+class ActionModule(ActionBase):
+
+ WIN_PATH_SEPARATOR = "\\"
+
+ def _create_content_tempfile(self, content):
+ ''' Create a tempfile containing defined content '''
+ fd, content_tempfile = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(content_tempfile)
+ raise Exception(err)
+ finally:
+ f.close()
+ return content_tempfile
+
+ def _create_zip_tempfile(self, files, directories):
+ tmpdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
+ zip_file_path = os.path.join(tmpdir, "win_copy.zip")
+ zip_file = zipfile.ZipFile(zip_file_path, "w", zipfile.ZIP_STORED, True)
+
+ # encoding the file/dir name with base64 so Windows can unzip a unicode
+ # filename and get the right name, Windows doesn't handle unicode names
+ # very well
+ for directory in directories:
+ directory_path = to_bytes(directory['src'], errors='surrogate_or_strict')
+ archive_path = to_bytes(directory['dest'], errors='surrogate_or_strict')
+
+ encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
+ zip_file.write(directory_path, encoded_path, zipfile.ZIP_DEFLATED)
+
+ for file in files:
+ file_path = to_bytes(file['src'], errors='surrogate_or_strict')
+ archive_path = to_bytes(file['dest'], errors='surrogate_or_strict')
+
+ encoded_path = to_text(base64.b64encode(archive_path), errors='surrogate_or_strict')
+ zip_file.write(file_path, encoded_path, zipfile.ZIP_DEFLATED)
+
+ return zip_file_path
+
+ def _remove_tempfile_if_content_defined(self, content, content_tempfile):
+ if content is not None:
+ os.remove(content_tempfile)
+
+ def _copy_single_file(self, local_file, dest, source_rel, task_vars, tmp, backup):
+ if self._play_context.check_mode:
+ module_return = dict(changed=True)
+ return module_return
+
+ # copy the file across to the server
+ tmp_src = self._connection._shell.join_path(tmp, 'source')
+ self._transfer_file(local_file, tmp_src)
+
+ copy_args = self._task.args.copy()
+ copy_args.update(
+ dict(
+ dest=dest,
+ src=tmp_src,
+ _original_basename=source_rel,
+ _copy_mode="single",
+ backup=backup,
+ )
+ )
+ copy_args.pop('content', None)
+
+ copy_result = self._execute_module(module_name="copy",
+ module_args=copy_args,
+ task_vars=task_vars)
+
+ return copy_result
+
+ def _copy_zip_file(self, dest, files, directories, task_vars, tmp, backup):
+ # create local zip file containing all the files and directories that
+ # need to be copied to the server
+ if self._play_context.check_mode:
+ module_return = dict(changed=True)
+ return module_return
+
+ try:
+ zip_file = self._create_zip_tempfile(files, directories)
+ except Exception as e:
+ module_return = dict(
+ changed=False,
+ failed=True,
+ msg="failed to create tmp zip file: %s" % to_text(e),
+ exception=traceback.format_exc()
+ )
+ return module_return
+
+ zip_path = self._loader.get_real_file(zip_file)
+
+ # send zip file to remote, file must end in .zip so
+ # Com Shell.Application works
+ tmp_src = self._connection._shell.join_path(tmp, 'source.zip')
+ self._transfer_file(zip_path, tmp_src)
+
+ # run the explode operation of win_copy on remote
+ copy_args = self._task.args.copy()
+ copy_args.update(
+ dict(
+ src=tmp_src,
+ dest=dest,
+ _copy_mode="explode",
+ backup=backup,
+ )
+ )
+ copy_args.pop('content', None)
+ module_return = self._execute_module(module_name='copy',
+ module_args=copy_args,
+ task_vars=task_vars)
+ shutil.rmtree(os.path.dirname(zip_path))
+ return module_return
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+ if task_vars is None:
+ task_vars = dict()
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ source = self._task.args.get('src', None)
+ content = self._task.args.get('content', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', False), strict=False)
+ local_follow = boolean(self._task.args.get('local_follow', False), strict=False)
+ force = boolean(self._task.args.get('force', True), strict=False)
+ decrypt = boolean(self._task.args.get('decrypt', True), strict=False)
+ backup = boolean(self._task.args.get('backup', False), strict=False)
+
+ result['src'] = source
+ result['dest'] = dest
+
+ result['failed'] = True
+ if (source is None and content is None) or dest is None:
+ result['msg'] = "src (or content) and dest are required"
+ elif source is not None and content is not None:
+ result['msg'] = "src and content are mutually exclusive"
+ elif content is not None and dest is not None and (
+ dest.endswith(os.path.sep) or dest.endswith(self.WIN_PATH_SEPARATOR)):
+ result['msg'] = "dest must be a file if content is defined"
+ else:
+ del result['failed']
+
+ if result.get('failed'):
+ return result
+
+ # If content is defined make a temp file and write the content into it
+ content_tempfile = None
+ if content is not None:
+ try:
+ # if content comes to us as a dict it should be decoded json.
+ # We need to encode it back into a string and write it out
+ if isinstance(content, dict) or isinstance(content, list):
+ content_tempfile = self._create_content_tempfile(json.dumps(content))
+ else:
+ content_tempfile = self._create_content_tempfile(content)
+ source = content_tempfile
+ except Exception as err:
+ result['failed'] = True
+ result['msg'] = "could not write content tmp file: %s" % to_native(err)
+ return result
+ # all actions should occur on the remote server, run win_copy module
+ elif remote_src:
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ _copy_mode="remote",
+ dest=dest,
+ src=source,
+ force=force,
+ backup=backup,
+ )
+ )
+ new_module_args.pop('content', None)
+ result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))
+ return result
+ # find_needle returns a path that may not have a trailing slash on a
+ # directory so we need to find that out first and append at the end
+ else:
+ trailing_slash = source.endswith(os.path.sep)
+ try:
+ # find in expected paths
+ source = self._find_needle('files', source)
+ except AnsibleError as e:
+ result['failed'] = True
+ result['msg'] = to_text(e)
+ result['exception'] = traceback.format_exc()
+ return result
+
+ if trailing_slash != source.endswith(os.path.sep):
+ if source[-1] == os.path.sep:
+ source = source[:-1]
+ else:
+ source = source + os.path.sep
+
+ # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
+ source_files = {'files': [], 'directories': [], 'symlinks': []}
+
+ # If source is a directory populate our list else source is a file and translate it to a tuple.
+ if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
+ result['operation'] = 'folder_copy'
+
+ # Get a list of the files we want to replicate on the remote side
+ source_files = _walk_dirs(source, self._loader, decrypt=decrypt, local_follow=local_follow,
+ trailing_slash_detector=self._connection._shell.path_has_trailing_slash,
+ checksum_check=force)
+
+ # If it's recursive copy, destination is always a dir,
+ # explicitly mark it so (note - win_copy module relies on this).
+ if not self._connection._shell.path_has_trailing_slash(dest):
+ dest = "%s%s" % (dest, self.WIN_PATH_SEPARATOR)
+
+ check_dest = dest
+ # Source is a file, add details to source_files dict
+ else:
+ result['operation'] = 'file_copy'
+
+ # If the local file does not exist, get_real_file() raises AnsibleFileNotFound
+ try:
+ source_full = self._loader.get_real_file(source, decrypt=decrypt)
+ except AnsibleFileNotFound as e:
+ result['failed'] = True
+ result['msg'] = "could not find src=%s, %s" % (source_full, to_text(e))
+ return result
+
+ original_basename = os.path.basename(source)
+ result['original_basename'] = original_basename
+
+ # check if dest ends with / or \ and append source filename to dest
+ if self._connection._shell.path_has_trailing_slash(dest):
+ check_dest = dest
+ filename = original_basename
+ result['dest'] = self._connection._shell.join_path(dest, filename)
+ else:
+ # replace \\ with / so we can use os.path to get the filename or dirname
+ unix_path = dest.replace(self.WIN_PATH_SEPARATOR, os.path.sep)
+ filename = os.path.basename(unix_path)
+ check_dest = os.path.dirname(unix_path)
+
+ file_checksum = _get_local_checksum(force, source_full)
+ source_files['files'].append(
+ dict(
+ src=source_full,
+ dest=filename,
+ checksum=file_checksum
+ )
+ )
+ result['checksum'] = file_checksum
+ result['size'] = os.path.getsize(to_bytes(source_full, errors='surrogate_or_strict'))
+
+ # find out the files/directories/symlinks that we need to copy to the server
+ query_args = self._task.args.copy()
+ query_args.update(
+ dict(
+ _copy_mode="query",
+ dest=check_dest,
+ force=force,
+ files=source_files['files'],
+ directories=source_files['directories'],
+ symlinks=source_files['symlinks'],
+ )
+ )
+ # src is not required for query, will fail path validation is src has unix allowed chars
+ query_args.pop('src', None)
+
+ query_args.pop('content', None)
+ query_return = self._execute_module(module_args=query_args,
+ task_vars=task_vars)
+
+ if query_return.get('failed') is True:
+ result.update(query_return)
+ return result
+
+ if len(query_return['files']) > 0 or len(query_return['directories']) > 0 and self._connection._shell.tmpdir is None:
+ self._connection._shell.tmpdir = self._make_tmp_path()
+
+ if len(query_return['files']) == 1 and len(query_return['directories']) == 0:
+ # we only need to copy 1 file, don't mess around with zips
+ file_src = query_return['files'][0]['src']
+ file_dest = query_return['files'][0]['dest']
+ result.update(self._copy_single_file(file_src, dest, file_dest,
+ task_vars, self._connection._shell.tmpdir, backup))
+ if result.get('failed') is True:
+ result['msg'] = "failed to copy file %s: %s" % (file_src, result['msg'])
+ result['changed'] = True
+
+ elif len(query_return['files']) > 0 or len(query_return['directories']) > 0:
+ # either multiple files or directories need to be copied, compress
+ # to a zip and 'explode' the zip on the server
+ # TODO: handle symlinks
+ result.update(self._copy_zip_file(dest, source_files['files'],
+ source_files['directories'],
+ task_vars, self._connection._shell.tmpdir, backup))
+ result['changed'] = True
+ else:
+ # no operations need to occur
+ result['failed'] = False
+ result['changed'] = False
+
+ # remove the content tmp file and remote tmp file if it was created
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ self._remove_tmp_path(self._connection._shell.tmpdir)
+ return result
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1
index 6fc438d6..1ce3ff40 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/async_status.ps1
@@ -1 +1,58 @@
-../../../../../../plugins/modules/async_status.ps1 \ No newline at end of file
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$results = @{changed=$false}
+
+$parsed_args = Parse-Args $args
+$jid = Get-AnsibleParam $parsed_args "jid" -failifempty $true -resultobj $results
+$mode = Get-AnsibleParam $parsed_args "mode" -Default "status" -ValidateSet "status","cleanup"
+
+# parsed in from the async_status action plugin
+$async_dir = Get-AnsibleParam $parsed_args "_async_dir" -type "path" -failifempty $true
+
+$log_path = [System.IO.Path]::Combine($async_dir, $jid)
+
+If(-not $(Test-Path $log_path))
+{
+ Fail-Json @{ansible_job_id=$jid; started=1; finished=1} "could not find job at '$async_dir'"
+}
+
+If($mode -eq "cleanup") {
+ Remove-Item $log_path -Recurse
+ Exit-Json @{ansible_job_id=$jid; erased=$log_path}
+}
+
+# NOT in cleanup mode, assume regular status mode
+# no remote kill mode currently exists, but probably should
+# consider log_path + ".pid" file and also unlink that above
+
+$data = $null
+Try {
+ $data_raw = Get-Content $log_path
+
+ # TODO: move this into module_utils/powershell.ps1?
+ $jss = New-Object System.Web.Script.Serialization.JavaScriptSerializer
+ $data = $jss.DeserializeObject($data_raw)
+}
+Catch {
+ If(-not $data_raw) {
+ # file not written yet? That means it is running
+ Exit-Json @{results_file=$log_path; ansible_job_id=$jid; started=1; finished=0}
+ }
+ Else {
+ Fail-Json @{ansible_job_id=$jid; results_file=$log_path; started=1; finished=1} "Could not parse job output: $data"
+ }
+}
+
+If (-not $data.ContainsKey("started")) {
+ $data['finished'] = 1
+ $data['ansible_job_id'] = $jid
+}
+ElseIf (-not $data.ContainsKey("finished")) {
+ $data['finished'] = 0
+}
+
+Exit-Json $data
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1
index 81d8afa3..e3c38130 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.ps1
@@ -1 +1,225 @@
-../../../../../../plugins/modules/win_acl.ps1 \ No newline at end of file
+#!powershell
+
+# Copyright: (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
+# Copyright: (c) 2015, Trond Hindenes
+# Copyright: (c) 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.PrivilegeUtil
+#Requires -Module Ansible.ModuleUtils.SID
+
+$ErrorActionPreference = "Stop"
+
+# win_acl module (File/Resources Permission Additions/Removal)
+
+#Functions
+function Get-UserSID {
+ param(
+ [String]$AccountName
+ )
+
+ $userSID = $null
+ $searchAppPools = $false
+
+ if ($AccountName.Split("\").Count -gt 1) {
+ if ($AccountName.Split("\")[0] -eq "IIS APPPOOL") {
+ $searchAppPools = $true
+ $AccountName = $AccountName.Split("\")[1]
+ }
+ }
+
+ if ($searchAppPools) {
+ Import-Module -Name WebAdministration
+ $testIISPath = Test-Path -LiteralPath "IIS:"
+ if ($testIISPath) {
+ $appPoolObj = Get-ItemProperty -LiteralPath "IIS:\AppPools\$AccountName"
+ $userSID = $appPoolObj.applicationPoolSid
+ }
+ }
+ else {
+ $userSID = Convert-ToSID -account_name $AccountName
+ }
+
+ return $userSID
+}
+
+$params = Parse-Args $args
+
+Function SetPrivilegeTokens() {
+ # Set privilege tokens only if admin.
+ # Admins would have these privs or be able to set these privs in the UI Anyway
+
+ $adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+ $myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+ $myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+
+ if ($myWindowsPrincipal.IsInRole($adminRole)) {
+ # Need to adjust token privs when executing Set-ACL in certain cases.
+ # e.g. d:\testdir is owned by group in which current user is not a member and no perms are inherited from d:\
+ # This also sets us up for setting the owner as a feature.
+ # See the following for details of each privilege
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/bb530716(v=vs.85).aspx
+ $privileges = @(
+ "SeRestorePrivilege", # Grants all write access control to any file, regardless of ACL.
+ "SeBackupPrivilege", # Grants all read access control to any file, regardless of ACL.
+ "SeTakeOwnershipPrivilege" # Grants ability to take owernship of an object w/out being granted discretionary access
+ )
+ foreach ($privilege in $privileges) {
+ $state = Get-AnsiblePrivilege -Name $privilege
+ if ($state -eq $false) {
+ Set-AnsiblePrivilege -Name $privilege -Value $true
+ }
+ }
+ }
+}
+
+
+$result = @{
+ changed = $false
+}
+
+$path = Get-AnsibleParam -obj $params -name "path" -type "str" -failifempty $true
+$user = Get-AnsibleParam -obj $params -name "user" -type "str" -failifempty $true
+$rights = Get-AnsibleParam -obj $params -name "rights" -type "str" -failifempty $true
+
+$type = Get-AnsibleParam -obj $params -name "type" -type "str" -failifempty $true -validateset "allow","deny"
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "absent","present"
+
+$inherit = Get-AnsibleParam -obj $params -name "inherit" -type "str"
+$propagation = Get-AnsibleParam -obj $params -name "propagation" -type "str" -default "None" -validateset "InheritOnly","None","NoPropagateInherit"
+
+# We mount the HKCR, HKU, and HKCC registry hives so PS can access them.
+# Network paths have no qualifiers so we use -EA SilentlyContinue to ignore that
+$path_qualifier = Split-Path -Path $path -Qualifier -ErrorAction SilentlyContinue
+if ($path_qualifier -eq "HKCR:" -and (-not (Test-Path -LiteralPath HKCR:\))) {
+ New-PSDrive -Name HKCR -PSProvider Registry -Root HKEY_CLASSES_ROOT > $null
+}
+if ($path_qualifier -eq "HKU:" -and (-not (Test-Path -LiteralPath HKU:\))) {
+ New-PSDrive -Name HKU -PSProvider Registry -Root HKEY_USERS > $null
+}
+if ($path_qualifier -eq "HKCC:" -and (-not (Test-Path -LiteralPath HKCC:\))) {
+ New-PSDrive -Name HKCC -PSProvider Registry -Root HKEY_CURRENT_CONFIG > $null
+}
+
+If (-Not (Test-Path -LiteralPath $path)) {
+ Fail-Json -obj $result -message "$path file or directory does not exist on the host"
+}
+
+# Test that the user/group is resolvable on the local machine
+$sid = Get-UserSID -AccountName $user
+if (!$sid) {
+ Fail-Json -obj $result -message "$user is not a valid user or group on the host machine or domain"
+}
+
+If (Test-Path -LiteralPath $path -PathType Leaf) {
+ $inherit = "None"
+}
+ElseIf ($null -eq $inherit) {
+ $inherit = "ContainerInherit, ObjectInherit"
+}
+
+# Bug in Set-Acl, Get-Acl where -LiteralPath only works for the Registry provider if the location is in that root
+# qualifier. We also don't have a qualifier for a network path so only change if not null
+if ($null -ne $path_qualifier) {
+ Push-Location -LiteralPath $path_qualifier
+}
+
+Try {
+ SetPrivilegeTokens
+ $path_item = Get-Item -LiteralPath $path -Force
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ $colRights = [System.Security.AccessControl.RegistryRights]$rights
+ }
+ Else {
+ $colRights = [System.Security.AccessControl.FileSystemRights]$rights
+ }
+
+ $InheritanceFlag = [System.Security.AccessControl.InheritanceFlags]$inherit
+ $PropagationFlag = [System.Security.AccessControl.PropagationFlags]$propagation
+
+ If ($type -eq "allow") {
+ $objType =[System.Security.AccessControl.AccessControlType]::Allow
+ }
+ Else {
+ $objType =[System.Security.AccessControl.AccessControlType]::Deny
+ }
+
+ $objUser = New-Object System.Security.Principal.SecurityIdentifier($sid)
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ $objACE = New-Object System.Security.AccessControl.RegistryAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType)
+ }
+ Else {
+ $objACE = New-Object System.Security.AccessControl.FileSystemAccessRule ($objUser, $colRights, $InheritanceFlag, $PropagationFlag, $objType)
+ }
+ $objACL = Get-ACL -LiteralPath $path
+
+ # Check if the ACE exists already in the objects ACL list
+ $match = $false
+
+ ForEach($rule in $objACL.GetAccessRules($true, $true, [System.Security.Principal.SecurityIdentifier])){
+
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ If (($rule.RegistryRights -eq $objACE.RegistryRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($rule.IdentityReference -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) {
+ $match = $true
+ Break
+ }
+ } else {
+ If (($rule.FileSystemRights -eq $objACE.FileSystemRights) -And ($rule.AccessControlType -eq $objACE.AccessControlType) -And ($rule.IdentityReference -eq $objACE.IdentityReference) -And ($rule.IsInherited -eq $objACE.IsInherited) -And ($rule.InheritanceFlags -eq $objACE.InheritanceFlags) -And ($rule.PropagationFlags -eq $objACE.PropagationFlags)) {
+ $match = $true
+ Break
+ }
+ }
+ }
+
+ If ($state -eq "present" -And $match -eq $false) {
+ Try {
+ $objACL.AddAccessRule($objACE)
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ Set-ACL -LiteralPath $path -AclObject $objACL
+ } else {
+ (Get-Item -LiteralPath $path).SetAccessControl($objACL)
+ }
+ $result.changed = $true
+ }
+ Catch {
+ Fail-Json -obj $result -message "an exception occurred when adding the specified rule - $($_.Exception.Message)"
+ }
+ }
+ ElseIf ($state -eq "absent" -And $match -eq $true) {
+ Try {
+ $objACL.RemoveAccessRule($objACE)
+ If ($path_item.PSProvider.Name -eq "Registry") {
+ Set-ACL -LiteralPath $path -AclObject $objACL
+ } else {
+ (Get-Item -LiteralPath $path).SetAccessControl($objACL)
+ }
+ $result.changed = $true
+ }
+ Catch {
+ Fail-Json -obj $result -message "an exception occurred when removing the specified rule - $($_.Exception.Message)"
+ }
+ }
+ Else {
+ # A rule was attempting to be added but already exists
+ If ($match -eq $true) {
+ Exit-Json -obj $result -message "the specified rule already exists"
+ }
+ # A rule didn't exist that was trying to be removed
+ Else {
+ Exit-Json -obj $result -message "the specified rule does not exist"
+ }
+ }
+}
+Catch {
+ Fail-Json -obj $result -message "an error occurred when attempting to $state $rights permission(s) on $path for $user - $($_.Exception.Message)"
+}
+Finally {
+ # Make sure we revert the location stack to the original path just for cleanups sake
+ if ($null -ne $path_qualifier) {
+ Pop-Location
+ }
+}
+
+Exit-Json -obj $result
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py
index 3a2434cf..14fbd82f 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_acl.py
@@ -1 +1,132 @@
-../../../../../../plugins/modules/win_acl.py \ No newline at end of file
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
+# Copyright: (c) 2015, Trond Hindenes
+# Copyright: (c) 2015, Hans-Joachim Kliemeck <git@kliemeck.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_acl
+version_added: "2.0"
+short_description: Set file/directory/registry permissions for a system user or group
+description:
+- Add or remove rights/permissions for a given user or group for the specified
+ file, folder, registry key or AppPool identifies.
+options:
+ path:
+ description:
+ - The path to the file or directory.
+ type: str
+ required: yes
+ user:
+ description:
+ - User or Group to add specified rights to act on src file/folder or
+ registry key.
+ type: str
+ required: yes
+ state:
+ description:
+ - Specify whether to add C(present) or remove C(absent) the specified access rule.
+ type: str
+ choices: [ absent, present ]
+ default: present
+ type:
+ description:
+ - Specify whether to allow or deny the rights specified.
+ type: str
+ required: yes
+ choices: [ allow, deny ]
+ rights:
+ description:
+ - The rights/permissions that are to be allowed/denied for the specified
+ user or group for the item at C(path).
+ - If C(path) is a file or directory, rights can be any right under MSDN
+ FileSystemRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.filesystemrights.aspx).
+ - If C(path) is a registry key, rights can be any right under MSDN
+ RegistryRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.registryrights.aspx).
+ type: str
+ required: yes
+ inherit:
+ description:
+ - Inherit flags on the ACL rules.
+ - Can be specified as a comma separated list, e.g. C(ContainerInherit),
+ C(ObjectInherit).
+ - For more information on the choices see MSDN InheritanceFlags enumeration
+ at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.inheritanceflags.aspx).
+ - Defaults to C(ContainerInherit, ObjectInherit) for Directories.
+ type: str
+ choices: [ ContainerInherit, ObjectInherit ]
+ propagation:
+ description:
+ - Propagation flag on the ACL rules.
+ - For more information on the choices see MSDN PropagationFlags enumeration
+ at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.propagationflags.aspx).
+ type: str
+ choices: [ InheritOnly, None, NoPropagateInherit ]
+ default: "None"
+notes:
+- If adding ACL's for AppPool identities (available since 2.3), the Windows
+ Feature "Web-Scripting-Tools" must be enabled.
+seealso:
+- module: win_acl_inheritance
+- module: win_file
+- module: win_owner
+- module: win_stat
+author:
+- Phil Schwartz (@schwartzmx)
+- Trond Hindenes (@trondhindenes)
+- Hans-Joachim Kliemeck (@h0nIg)
+'''
+
+EXAMPLES = r'''
+- name: Restrict write and execute access to User Fed-Phil
+ win_acl:
+ user: Fed-Phil
+ path: C:\Important\Executable.exe
+ type: deny
+ rights: ExecuteFile,Write
+
+- name: Add IIS_IUSRS allow rights
+ win_acl:
+ path: C:\inetpub\wwwroot\MySite
+ user: IIS_IUSRS
+ rights: FullControl
+ type: allow
+ state: present
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+
+- name: Set registry key right
+ win_acl:
+ path: HKCU:\Bovine\Key
+ user: BUILTIN\Users
+ rights: EnumerateSubKeys
+ type: allow
+ state: present
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+
+- name: Remove FullControl AccessRule for IIS_IUSRS
+ win_acl:
+ path: C:\inetpub\wwwroot\MySite
+ user: IIS_IUSRS
+ rights: FullControl
+ type: allow
+ state: absent
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+
+- name: Deny Intern
+ win_acl:
+ path: C:\Administrator\Documents
+ user: Intern
+ rights: Read,Write,Modify,FullControl,Delete
+ type: deny
+ state: present
+'''
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1
index a34fb012..6a26ee72 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.ps1
@@ -1 +1,403 @@
-../../../../../../plugins/modules/win_copy.ps1 \ No newline at end of file
+#!powershell
+
+# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.Backup
+
+$ErrorActionPreference = 'Stop'
+
+$params = Parse-Args -arguments $args -supports_check_mode $true
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
+$diff_mode = Get-AnsibleParam -obj $params -name "_ansible_diff" -type "bool" -default $false
+
+# there are 4 modes to win_copy which are driven by the action plugins:
+# explode: src is a zip file which needs to be extracted to dest, for use with multiple files
+# query: win_copy action plugin wants to get the state of remote files to check whether it needs to send them
+# remote: all copy action is happening remotely (remote_src=True)
+# single: a single file has been copied, also used with template
+$copy_mode = Get-AnsibleParam -obj $params -name "_copy_mode" -type "str" -default "single" -validateset "explode","query","remote","single"
+
+# used in explode, remote and single mode
+$src = Get-AnsibleParam -obj $params -name "src" -type "path" -failifempty ($copy_mode -in @("explode","process","single"))
+$dest = Get-AnsibleParam -obj $params -name "dest" -type "path" -failifempty $true
+$backup = Get-AnsibleParam -obj $params -name "backup" -type "bool" -default $false
+
+# used in single mode
+$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str"
+
+# used in query and remote mode
+$force = Get-AnsibleParam -obj $params -name "force" -type "bool" -default $true
+
+# used in query mode, contains the local files/directories/symlinks that are to be copied
+$files = Get-AnsibleParam -obj $params -name "files" -type "list"
+$directories = Get-AnsibleParam -obj $params -name "directories" -type "list"
+
+$result = @{
+ changed = $false
+}
+
+if ($diff_mode) {
+ $result.diff = @{}
+}
+
+Function Copy-File($source, $dest) {
+ $diff = ""
+ $copy_file = $false
+ $source_checksum = $null
+ if ($force) {
+ $source_checksum = Get-FileChecksum -path $source
+ }
+
+ if (Test-Path -LiteralPath $dest -PathType Container) {
+ Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': dest is already a folder"
+ } elseif (Test-Path -LiteralPath $dest -PathType Leaf) {
+ if ($force) {
+ $target_checksum = Get-FileChecksum -path $dest
+ if ($source_checksum -ne $target_checksum) {
+ $copy_file = $true
+ }
+ }
+ } else {
+ $copy_file = $true
+ }
+
+ if ($copy_file) {
+ $file_dir = [System.IO.Path]::GetDirectoryName($dest)
+ # validate the parent dir is not a file and that it exists
+ if (Test-Path -LiteralPath $file_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
+ } elseif (-not (Test-Path -LiteralPath $file_dir)) {
+ # directory doesn't exist, need to create
+ New-Item -Path $file_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
+ $diff += "+$file_dir\`n"
+ }
+
+ if ($backup) {
+ $result.backup_file = Backup-File -path $dest -WhatIf:$check_mode
+ }
+
+ if (Test-Path -LiteralPath $dest -PathType Leaf) {
+ Remove-Item -LiteralPath $dest -Force -Recurse -WhatIf:$check_mode | Out-Null
+ $diff += "-$dest`n"
+ }
+
+ if (-not $check_mode) {
+ # cannot run with -WhatIf:$check_mode as if the parent dir didn't
+ # exist and was created above would still not exist in check mode
+ Copy-Item -LiteralPath $source -Destination $dest -Force | Out-Null
+ }
+ $diff += "+$dest`n"
+
+ $result.changed = $true
+ }
+
+ # ugly but to save us from running the checksum twice, let's return it for
+ # the main code to add it to $result
+ return ,@{ diff = $diff; checksum = $source_checksum }
+}
+
+Function Copy-Folder($source, $dest) {
+ $diff = ""
+
+ if (-not (Test-Path -LiteralPath $dest -PathType Container)) {
+ $parent_dir = [System.IO.Path]::GetDirectoryName($dest)
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy file from '$source' to '$dest': object at dest parent dir is not a folder"
+ }
+ if (Test-Path -LiteralPath $dest -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy folder from '$source' to '$dest': dest is already a file"
+ }
+
+ New-Item -Path $dest -ItemType Container -WhatIf:$check_mode | Out-Null
+ $diff += "+$dest\`n"
+ $result.changed = $true
+ }
+
+ $child_items = Get-ChildItem -LiteralPath $source -Force
+ foreach ($child_item in $child_items) {
+ $dest_child_path = Join-Path -Path $dest -ChildPath $child_item.Name
+ if ($child_item.PSIsContainer) {
+ $diff += (Copy-Folder -source $child_item.Fullname -dest $dest_child_path)
+ } else {
+ $diff += (Copy-File -source $child_item.Fullname -dest $dest_child_path).diff
+ }
+ }
+
+ return $diff
+}
+
+Function Get-FileSize($path) {
+ $file = Get-Item -LiteralPath $path -Force
+ if ($file.PSIsContainer) {
+ $size = (Get-ChildItem -Literalpath $file.FullName -Recurse -Force | `
+ Where-Object { $_.PSObject.Properties.Name -contains 'Length' } | `
+ Measure-Object -Property Length -Sum).Sum
+ if ($null -eq $size) {
+ $size = 0
+ }
+ } else {
+ $size = $file.Length
+ }
+
+ $size
+}
+
+Function Extract-Zip($src, $dest) {
+ $archive = [System.IO.Compression.ZipFile]::Open($src, [System.IO.Compression.ZipArchiveMode]::Read, [System.Text.Encoding]::UTF8)
+ foreach ($entry in $archive.Entries) {
+ $archive_name = $entry.FullName
+
+ # FullName may be appended with / or \, determine if it is padded and remove it
+ $padding_length = $archive_name.Length % 4
+ if ($padding_length -eq 0) {
+ $is_dir = $false
+ $base64_name = $archive_name
+ } elseif ($padding_length -eq 1) {
+ $is_dir = $true
+ if ($archive_name.EndsWith("/") -or $archive_name.EndsWith("`\")) {
+ $base64_name = $archive_name.Substring(0, $archive_name.Length - 1)
+ } else {
+ throw "invalid base64 archive name '$archive_name'"
+ }
+ } else {
+ throw "invalid base64 length '$archive_name'"
+ }
+
+ # to handle unicode character, win_copy action plugin has encoded the filename
+ $decoded_archive_name = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($base64_name))
+ # re-add the / to the entry full name if it was a directory
+ if ($is_dir) {
+ $decoded_archive_name = "$decoded_archive_name/"
+ }
+ $entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_name)
+ $entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
+
+ if (-not (Test-Path -LiteralPath $entry_dir)) {
+ New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
+ }
+
+ if ($is_dir -eq $false) {
+ if (-not $check_mode) {
+ [System.IO.Compression.ZipFileExtensions]::ExtractToFile($entry, $entry_target_path, $true)
+ }
+ }
+ }
+ $archive.Dispose() # release the handle of the zip file
+}
+
+Function Extract-ZipLegacy($src, $dest) {
+ if (-not (Test-Path -LiteralPath $dest)) {
+ New-Item -Path $dest -ItemType Directory -WhatIf:$check_mode | Out-Null
+ }
+ $shell = New-Object -ComObject Shell.Application
+ $zip = $shell.NameSpace($src)
+ $dest_path = $shell.NameSpace($dest)
+
+ foreach ($entry in $zip.Items()) {
+ $is_dir = $entry.IsFolder
+ $encoded_archive_entry = $entry.Name
+ # to handle unicode character, win_copy action plugin has encoded the filename
+ $decoded_archive_entry = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($encoded_archive_entry))
+ if ($is_dir) {
+ $decoded_archive_entry = "$decoded_archive_entry/"
+ }
+
+ $entry_target_path = [System.IO.Path]::Combine($dest, $decoded_archive_entry)
+ $entry_dir = [System.IO.Path]::GetDirectoryName($entry_target_path)
+
+ if (-not (Test-Path -LiteralPath $entry_dir)) {
+ New-Item -Path $entry_dir -ItemType Directory -WhatIf:$check_mode | Out-Null
+ }
+
+ if ($is_dir -eq $false -and (-not $check_mode)) {
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/bb787866.aspx
+ # From Folder.CopyHere documentation, 1044 means:
+ # - 1024: do not display a user interface if an error occurs
+ # - 16: respond with "yes to all" for any dialog box that is displayed
+ # - 4: do not display a progress dialog box
+ $dest_path.CopyHere($entry, 1044)
+
+ # once file is extraced, we need to rename it with non base64 name
+ $combined_encoded_path = [System.IO.Path]::Combine($dest, $encoded_archive_entry)
+ Move-Item -LiteralPath $combined_encoded_path -Destination $entry_target_path -Force | Out-Null
+ }
+ }
+}
+
+if ($copy_mode -eq "query") {
+ # we only return a list of files/directories that need to be copied over
+ # the source of the local file will be the key used
+ $changed_files = @()
+ $changed_directories = @()
+ $changed_symlinks = @()
+
+ foreach ($file in $files) {
+ $filename = $file.dest
+ $local_checksum = $file.checksum
+
+ $filepath = Join-Path -Path $dest -ChildPath $filename
+ if (Test-Path -LiteralPath $filepath -PathType Leaf) {
+ if ($force) {
+ $checksum = Get-FileChecksum -path $filepath
+ if ($checksum -ne $local_checksum) {
+ $changed_files += $file
+ }
+ }
+ } elseif (Test-Path -LiteralPath $filepath -PathType Container) {
+ Fail-Json -obj $result -message "cannot copy file to dest '$filepath': object at path is already a directory"
+ } else {
+ $changed_files += $file
+ }
+ }
+
+ foreach ($directory in $directories) {
+ $dirname = $directory.dest
+
+ $dirpath = Join-Path -Path $dest -ChildPath $dirname
+ $parent_dir = [System.IO.Path]::GetDirectoryName($dirpath)
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at parent directory path is already a file"
+ }
+ if (Test-Path -LiteralPath $dirpath -PathType Leaf) {
+ Fail-Json -obj $result -message "cannot copy folder to dest '$dirpath': object at path is already a file"
+ } elseif (-not (Test-Path -LiteralPath $dirpath -PathType Container)) {
+ $changed_directories += $directory
+ }
+ }
+
+ # TODO: Handle symlinks
+
+ $result.files = $changed_files
+ $result.directories = $changed_directories
+ $result.symlinks = $changed_symlinks
+} elseif ($copy_mode -eq "explode") {
+ # a single zip file containing the files and directories needs to be
+ # expanded this will always result in a change as the calculation is done
+ # on the win_copy action plugin and is only run if a change needs to occur
+ if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
+ Fail-Json -obj $result -message "Cannot expand src zip file: '$src' as it does not exist"
+ }
+
+ # Detect if the PS zip assemblies are available or whether to use Shell
+ $use_legacy = $false
+ try {
+ Add-Type -AssemblyName System.IO.Compression.FileSystem | Out-Null
+ Add-Type -AssemblyName System.IO.Compression | Out-Null
+ } catch {
+ $use_legacy = $true
+ }
+ if ($use_legacy) {
+ Extract-ZipLegacy -src $src -dest $dest
+ } else {
+ Extract-Zip -src $src -dest $dest
+ }
+
+ $result.changed = $true
+} elseif ($copy_mode -eq "remote") {
+ # all copy actions are happening on the remote side (windows host), need
+ # too copy source and dest using PS code
+ $result.src = $src
+ $result.dest = $dest
+
+ if (-not (Test-Path -LiteralPath $src)) {
+ Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
+ }
+
+ if (Test-Path -LiteralPath $src -PathType Container) {
+ # we are copying a directory or the contents of a directory
+ $result.operation = 'folder_copy'
+ if ($src.EndsWith("/") -or $src.EndsWith("`\")) {
+ # copying the folder's contents to dest
+ $diff = ""
+ $child_files = Get-ChildItem -LiteralPath $src -Force
+ foreach ($child_file in $child_files) {
+ $dest_child_path = Join-Path -Path $dest -ChildPath $child_file.Name
+ if ($child_file.PSIsContainer) {
+ $diff += Copy-Folder -source $child_file.FullName -dest $dest_child_path
+ } else {
+ $diff += (Copy-File -source $child_file.FullName -dest $dest_child_path).diff
+ }
+ }
+ } else {
+ # copying the folder and it's contents to dest
+ $dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
+ $result.dest = $dest
+ $diff = Copy-Folder -source $src -dest $dest
+ }
+ } else {
+ # we are just copying a single file to dest
+ $result.operation = 'file_copy'
+
+ $source_basename = (Get-Item -LiteralPath $src -Force).Name
+ $result.original_basename = $source_basename
+
+ if ($dest.EndsWith("/") -or $dest.EndsWith("`\")) {
+ $dest = Join-Path -Path $dest -ChildPath (Get-Item -LiteralPath $src -Force).Name
+ $result.dest = $dest
+ } else {
+ # check if the parent dir exists, this is only done if src is a
+ # file and dest if the path to a file (doesn't end with \ or /)
+ $parent_dir = Split-Path -LiteralPath $dest
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
+ } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
+ Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
+ }
+ }
+ $copy_result = Copy-File -source $src -dest $dest
+ $diff = $copy_result.diff
+ $result.checksum = $copy_result.checksum
+ }
+
+ # the file might not exist if running in check mode
+ if (-not $check_mode -or (Test-Path -LiteralPath $dest -PathType Leaf)) {
+ $result.size = Get-FileSize -path $dest
+ } else {
+ $result.size = $null
+ }
+ if ($diff_mode) {
+ $result.diff.prepared = $diff
+ }
+} elseif ($copy_mode -eq "single") {
+ # a single file is located in src and we need to copy to dest, this will
+ # always result in a change as the calculation is done on the Ansible side
+ # before this is run. This should also never run in check mode
+ if (-not (Test-Path -LiteralPath $src -PathType Leaf)) {
+ Fail-Json -obj $result -message "Cannot copy src file: '$src' as it does not exist"
+ }
+
+ # the dest parameter is a directory, we need to append original_basename
+ if ($dest.EndsWith("/") -or $dest.EndsWith("`\") -or (Test-Path -LiteralPath $dest -PathType Container)) {
+ $remote_dest = Join-Path -Path $dest -ChildPath $original_basename
+ $parent_dir = Split-Path -LiteralPath $remote_dest
+
+ # when dest ends with /, we need to create the destination directories
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
+ } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
+ New-Item -Path $parent_dir -ItemType Directory | Out-Null
+ }
+ } else {
+ $remote_dest = $dest
+ $parent_dir = Split-Path -LiteralPath $remote_dest
+
+ # check if the dest parent dirs exist, need to fail if they don't
+ if (Test-Path -LiteralPath $parent_dir -PathType Leaf) {
+ Fail-Json -obj $result -message "object at destination parent dir '$parent_dir' is currently a file"
+ } elseif (-not (Test-Path -LiteralPath $parent_dir -PathType Container)) {
+ Fail-Json -obj $result -message "Destination directory '$parent_dir' does not exist"
+ }
+ }
+
+ if ($backup) {
+ $result.backup_file = Backup-File -path $remote_dest -WhatIf:$check_mode
+ }
+
+ Copy-Item -LiteralPath $src -Destination $remote_dest -Force | Out-Null
+ $result.changed = $true
+}
+
+Exit-Json -obj $result
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py
index 2d2c69a2..a55f4c65 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_copy.py
@@ -1 +1,207 @@
-../../../../../../plugins/modules/win_copy.py \ No newline at end of file
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_copy
+version_added: '1.9.2'
+short_description: Copies files to remote locations on windows hosts
+description:
+- The C(win_copy) module copies a file on the local box to remote windows locations.
+- For non-Windows targets, use the M(copy) module instead.
+options:
+ content:
+ description:
+ - When used instead of C(src), sets the contents of a file directly to the
+ specified value.
+ - This is for simple values, for anything complex or with formatting please
+ switch to the M(template) module.
+ type: str
+ version_added: '2.3'
+ decrypt:
+ description:
+ - This option controls the autodecryption of source files using vault.
+ type: bool
+ default: yes
+ version_added: '2.5'
+ dest:
+ description:
+ - Remote absolute path where the file should be copied to.
+ - If C(src) is a directory, this must be a directory too.
+ - Use \ for path separators or \\ when in "double quotes".
+ - If C(dest) ends with \ then source or the contents of source will be
+ copied to the directory without renaming.
+ - If C(dest) is a nonexistent path, it will only be created if C(dest) ends
+ with "/" or "\", or C(src) is a directory.
+ - If C(src) and C(dest) are files and if the parent directory of C(dest)
+ doesn't exist, then the task will fail.
+ type: path
+ required: yes
+ backup:
+ description:
+ - Determine whether a backup should be created.
+ - When set to C(yes), create a backup file including the timestamp information
+ so you can get the original file back if you somehow clobbered it incorrectly.
+ - No backup is taken when C(remote_src=False) and multiple files are being
+ copied.
+ type: bool
+ default: no
+ version_added: '2.8'
+ force:
+ description:
+ - If set to C(yes), the file will only be transferred if the content
+ is different than destination.
+ - If set to C(no), the file will only be transferred if the
+ destination does not exist.
+ - If set to C(no), no checksuming of the content is performed which can
+ help improve performance on larger files.
+ type: bool
+ default: yes
+ version_added: '2.3'
+ local_follow:
+ description:
+ - This flag indicates that filesystem links in the source tree, if they
+ exist, should be followed.
+ type: bool
+ default: yes
+ version_added: '2.4'
+ remote_src:
+ description:
+ - If C(no), it will search for src at originating/master machine.
+ - If C(yes), it will go to the remote/target machine for the src.
+ type: bool
+ default: no
+ version_added: '2.3'
+ src:
+ description:
+ - Local path to a file to copy to the remote server; can be absolute or
+ relative.
+ - If path is a directory, it is copied (including the source folder name)
+ recursively to C(dest).
+ - If path is a directory and ends with "/", only the inside contents of
+ that directory are copied to the destination. Otherwise, if it does not
+ end with "/", the directory itself with all contents is copied.
+ - If path is a file and dest ends with "\", the file is copied to the
+ folder with the same filename.
+ - Required unless using C(content).
+ type: path
+notes:
+- Currently win_copy does not support copying symbolic links from both local to
+ remote and remote to remote.
+- It is recommended that backslashes C(\) are used instead of C(/) when dealing
+ with remote paths.
+- Because win_copy runs over WinRM, it is not a very efficient transfer
+ mechanism. If sending large files consider hosting them on a web service and
+ using M(win_get_url) instead.
+seealso:
+- module: assemble
+- module: copy
+- module: win_get_url
+- module: win_robocopy
+author:
+- Jon Hawkesworth (@jhawkesworth)
+- Jordan Borean (@jborean93)
+'''
+
+EXAMPLES = r'''
+- name: Copy a single file
+ win_copy:
+ src: /srv/myfiles/foo.conf
+ dest: C:\Temp\renamed-foo.conf
+
+- name: Copy a single file, but keep a backup
+ win_copy:
+ src: /srv/myfiles/foo.conf
+ dest: C:\Temp\renamed-foo.conf
+ backup: yes
+
+- name: Copy a single file keeping the filename
+ win_copy:
+ src: /src/myfiles/foo.conf
+ dest: C:\Temp\
+
+- name: Copy folder to C:\Temp (results in C:\Temp\temp_files)
+ win_copy:
+ src: files/temp_files
+ dest: C:\Temp
+
+- name: Copy folder contents recursively
+ win_copy:
+ src: files/temp_files/
+ dest: C:\Temp
+
+- name: Copy a single file where the source is on the remote host
+ win_copy:
+ src: C:\Temp\foo.txt
+ dest: C:\ansible\foo.txt
+ remote_src: yes
+
+- name: Copy a folder recursively where the source is on the remote host
+ win_copy:
+ src: C:\Temp
+ dest: C:\ansible
+ remote_src: yes
+
+- name: Set the contents of a file
+ win_copy:
+ content: abc123
+ dest: C:\Temp\foo.txt
+
+- name: Copy a single file as another user
+ win_copy:
+ src: NuGet.config
+ dest: '%AppData%\NuGet\NuGet.config'
+ vars:
+ ansible_become_user: user
+ ansible_become_password: pass
+ # The tmp dir must be set when using win_copy as another user
+ # This ensures the become user will have permissions for the operation
+ # Make sure to specify a folder both the ansible_user and the become_user have access to (i.e not %TEMP% which is user specific and requires Admin)
+ ansible_remote_tmp: 'c:\tmp'
+'''
+
+RETURN = r'''
+backup_file:
+ description: Name of the backup file that was created.
+ returned: if backup=yes
+ type: str
+ sample: C:\Path\To\File.txt.11540.20150212-220915.bak
+dest:
+ description: Destination file/path.
+ returned: changed
+ type: str
+ sample: C:\Temp\
+src:
+ description: Source file used for the copy on the target machine.
+ returned: changed
+ type: str
+ sample: /home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source
+checksum:
+ description: SHA1 checksum of the file after running copy.
+ returned: success, src is a file
+ type: str
+ sample: 6e642bb8dd5c2e027bf21dd923337cbb4214f827
+size:
+ description: Size of the target, after execution.
+ returned: changed, src is a file
+ type: int
+ sample: 1220
+operation:
+ description: Whether a single file copy took place or a folder copy.
+ returned: success
+ type: str
+ sample: file_copy
+original_basename:
+ description: Basename of the copied file.
+ returned: changed, src is a file
+ type: str
+ sample: foo.txt
+'''
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1
index 8ee5c2b5..54427549 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.ps1
@@ -1 +1,152 @@
-../../../../../../plugins/modules/win_file.ps1 \ No newline at end of file
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+
+$ErrorActionPreference = "Stop"
+
+$params = Parse-Args $args -supports_check_mode $true
+
+$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -default $false
+$_remote_tmp = Get-AnsibleParam $params "_ansible_remote_tmp" -type "path" -default $env:TMP
+
+$path = Get-AnsibleParam -obj $params -name "path" -type "path" -failifempty $true -aliases "dest","name"
+$state = Get-AnsibleParam -obj $params -name "state" -type "str" -validateset "absent","directory","file","touch"
+
+# used in template/copy when dest is the path to a dir and source is a file
+$original_basename = Get-AnsibleParam -obj $params -name "_original_basename" -type "str"
+if ((Test-Path -LiteralPath $path -PathType Container) -and ($null -ne $original_basename)) {
+ $path = Join-Path -Path $path -ChildPath $original_basename
+}
+
+$result = @{
+ changed = $false
+}
+
+# Used to delete symlinks as powershell cannot delete broken symlinks
+$symlink_util = @"
+using System;
+using System.ComponentModel;
+using System.Runtime.InteropServices;
+
+namespace Ansible.Command {
+ public class SymLinkHelper {
+ [DllImport("kernel32.dll", CharSet=CharSet.Unicode, SetLastError=true)]
+ public static extern bool DeleteFileW(string lpFileName);
+
+ [DllImport("kernel32.dll", CharSet=CharSet.Unicode, SetLastError=true)]
+ public static extern bool RemoveDirectoryW(string lpPathName);
+
+ public static void DeleteDirectory(string path) {
+ if (!RemoveDirectoryW(path))
+ throw new Exception(String.Format("RemoveDirectoryW({0}) failed: {1}", path, new Win32Exception(Marshal.GetLastWin32Error()).Message));
+ }
+
+ public static void DeleteFile(string path) {
+ if (!DeleteFileW(path))
+ throw new Exception(String.Format("DeleteFileW({0}) failed: {1}", path, new Win32Exception(Marshal.GetLastWin32Error()).Message));
+ }
+ }
+}
+"@
+$original_tmp = $env:TMP
+$env:TMP = $_remote_tmp
+Add-Type -TypeDefinition $symlink_util
+$env:TMP = $original_tmp
+
+# Used to delete directories and files with logic on handling symbolic links
+function Remove-File($file, $checkmode) {
+ try {
+ if ($file.Attributes -band [System.IO.FileAttributes]::ReparsePoint) {
+ # Bug with powershell, if you try and delete a symbolic link that is pointing
+ # to an invalid path it will fail, using Win32 API to do this instead
+ if ($file.PSIsContainer) {
+ if (-not $checkmode) {
+ [Ansible.Command.SymLinkHelper]::DeleteDirectory($file.FullName)
+ }
+ } else {
+ if (-not $checkmode) {
+ [Ansible.Command.SymlinkHelper]::DeleteFile($file.FullName)
+ }
+ }
+ } elseif ($file.PSIsContainer) {
+ Remove-Directory -directory $file -checkmode $checkmode
+ } else {
+ Remove-Item -LiteralPath $file.FullName -Force -WhatIf:$checkmode
+ }
+ } catch [Exception] {
+ Fail-Json $result "Failed to delete $($file.FullName): $($_.Exception.Message)"
+ }
+}
+
+function Remove-Directory($directory, $checkmode) {
+ foreach ($file in Get-ChildItem -LiteralPath $directory.FullName) {
+ Remove-File -file $file -checkmode $checkmode
+ }
+ Remove-Item -LiteralPath $directory.FullName -Force -Recurse -WhatIf:$checkmode
+}
+
+
+if ($state -eq "touch") {
+ if (Test-Path -LiteralPath $path) {
+ if (-not $check_mode) {
+ (Get-ChildItem -LiteralPath $path).LastWriteTime = Get-Date
+ }
+ $result.changed = $true
+ } else {
+ Write-Output $null | Out-File -LiteralPath $path -Encoding ASCII -WhatIf:$check_mode
+ $result.changed = $true
+ }
+}
+
+if (Test-Path -LiteralPath $path) {
+ $fileinfo = Get-Item -LiteralPath $path -Force
+ if ($state -eq "absent") {
+ Remove-File -file $fileinfo -checkmode $check_mode
+ $result.changed = $true
+ } else {
+ if ($state -eq "directory" -and -not $fileinfo.PsIsContainer) {
+ Fail-Json $result "path $path is not a directory"
+ }
+
+ if ($state -eq "file" -and $fileinfo.PsIsContainer) {
+ Fail-Json $result "path $path is not a file"
+ }
+ }
+
+} else {
+
+ # If state is not supplied, test the $path to see if it looks like
+ # a file or a folder and set state to file or folder
+ if ($null -eq $state) {
+ $basename = Split-Path -Path $path -Leaf
+ if ($basename.length -gt 0) {
+ $state = "file"
+ } else {
+ $state = "directory"
+ }
+ }
+
+ if ($state -eq "directory") {
+ try {
+ New-Item -Path $path -ItemType Directory -WhatIf:$check_mode | Out-Null
+ } catch {
+ if ($_.CategoryInfo.Category -eq "ResourceExists") {
+ $fileinfo = Get-Item -LiteralPath $_.CategoryInfo.TargetName
+ if ($state -eq "directory" -and -not $fileinfo.PsIsContainer) {
+ Fail-Json $result "path $path is not a directory"
+ }
+ } else {
+ Fail-Json $result $_.Exception.Message
+ }
+ }
+ $result.changed = $true
+ } elseif ($state -eq "file") {
+ Fail-Json $result "path $path will not be created"
+ }
+
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py
index b4bc0583..28149579 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_file.py
@@ -1 +1,70 @@
-../../../../../../plugins/modules/win_file.py \ No newline at end of file
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_file
+version_added: "1.9.2"
+short_description: Creates, touches or removes files or directories
+description:
+ - Creates (empty) files, updates file modification stamps of existing files,
+ and can create or remove directories.
+ - Unlike M(file), does not modify ownership, permissions or manipulate links.
+ - For non-Windows targets, use the M(file) module instead.
+options:
+ path:
+ description:
+ - Path to the file being managed.
+ required: yes
+ type: path
+ aliases: [ dest, name ]
+ state:
+ description:
+ - If C(directory), all immediate subdirectories will be created if they
+ do not exist.
+ - If C(file), the file will NOT be created if it does not exist, see the M(copy)
+ or M(template) module if you want that behavior.
+ - If C(absent), directories will be recursively deleted, and files will be removed.
+ - If C(touch), an empty file will be created if the C(path) does not
+ exist, while an existing file or directory will receive updated file access and
+ modification times (similar to the way C(touch) works from the command line).
+ type: str
+ choices: [ absent, directory, file, touch ]
+seealso:
+- module: file
+- module: win_acl
+- module: win_acl_inheritance
+- module: win_owner
+- module: win_stat
+author:
+- Jon Hawkesworth (@jhawkesworth)
+'''
+
+EXAMPLES = r'''
+- name: Touch a file (creates if not present, updates modification time if present)
+ win_file:
+ path: C:\Temp\foo.conf
+ state: touch
+
+- name: Remove a file, if present
+ win_file:
+ path: C:\Temp\foo.conf
+ state: absent
+
+- name: Create directory structure
+ win_file:
+ path: C:\Temp\folder\subfolder
+ state: directory
+
+- name: Remove directory structure
+ win_file:
+ path: C:\Temp
+ state: absent
+'''
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1
index d7b25ed0..c848b912 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.ps1
@@ -1 +1,21 @@
-../../../../../../plugins/modules/win_ping.ps1 \ No newline at end of file
+#!powershell
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+
+$spec = @{
+ options = @{
+ data = @{ type = "str"; default = "pong" }
+ }
+ supports_check_mode = $true
+}
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+$data = $module.Params.data
+
+if ($data -eq "crash") {
+ throw "boom"
+}
+
+$module.Result.ping = $data
+$module.ExitJson()
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py
index 0b97c87b..6d35f379 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_ping.py
@@ -1 +1,55 @@
-../../../../../../plugins/modules/win_ping.py \ No newline at end of file
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_ping
+version_added: "1.7"
+short_description: A windows version of the classic ping module
+description:
+ - Checks management connectivity of a windows host.
+ - This is NOT ICMP ping, this is just a trivial test module.
+ - For non-Windows targets, use the M(ping) module instead.
+ - For Network targets, use the M(net_ping) module instead.
+options:
+ data:
+ description:
+ - Alternate data to return instead of 'pong'.
+ - If this parameter is set to C(crash), the module will cause an exception.
+ type: str
+ default: pong
+seealso:
+- module: ping
+author:
+- Chris Church (@cchurch)
+'''
+
+EXAMPLES = r'''
+# Test connectivity to a windows host
+# ansible winserver -m win_ping
+
+- name: Example from an Ansible Playbook
+ win_ping:
+
+- name: Induce an exception to see what happens
+ win_ping:
+ data: crash
+'''
+
+RETURN = r'''
+ping:
+ description: Value provided with the data parameter.
+ returned: success
+ type: str
+ sample: pong
+'''
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1
index eb07a017..54aef8de 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.ps1
@@ -1 +1,138 @@
-../../../../../../plugins/modules/win_shell.ps1 \ No newline at end of file
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#Requires -Module Ansible.ModuleUtils.Legacy
+#Requires -Module Ansible.ModuleUtils.CommandUtil
+#Requires -Module Ansible.ModuleUtils.FileUtil
+
+# TODO: add check mode support
+
+Set-StrictMode -Version 2
+$ErrorActionPreference = "Stop"
+
+# Cleanse CLIXML from stderr (sift out error stream data, discard others for now)
+Function Cleanse-Stderr($raw_stderr) {
+ Try {
+ # NB: this regex isn't perfect, but is decent at finding CLIXML amongst other stderr noise
+ If($raw_stderr -match "(?s)(?<prenoise1>.*)#< CLIXML(?<prenoise2>.*)(?<clixml><Objs.+</Objs>)(?<postnoise>.*)") {
+ $clixml = [xml]$matches["clixml"]
+
+ $merged_stderr = "{0}{1}{2}{3}" -f @(
+ $matches["prenoise1"],
+ $matches["prenoise2"],
+ # filter out just the Error-tagged strings for now, and zap embedded CRLF chars
+ ($clixml.Objs.ChildNodes | Where-Object { $_.Name -eq 'S' } | Where-Object { $_.S -eq 'Error' } | ForEach-Object { $_.'#text'.Replace('_x000D__x000A_','') } | Out-String),
+ $matches["postnoise"]) | Out-String
+
+ return $merged_stderr.Trim()
+
+ # FUTURE: parse/return other streams
+ }
+ Else {
+ $raw_stderr
+ }
+ }
+ Catch {
+ "***EXCEPTION PARSING CLIXML: $_***" + $raw_stderr
+ }
+}
+
+$params = Parse-Args $args -supports_check_mode $false
+
+$raw_command_line = Get-AnsibleParam -obj $params -name "_raw_params" -type "str" -failifempty $true
+$chdir = Get-AnsibleParam -obj $params -name "chdir" -type "path"
+$executable = Get-AnsibleParam -obj $params -name "executable" -type "path"
+$creates = Get-AnsibleParam -obj $params -name "creates" -type "path"
+$removes = Get-AnsibleParam -obj $params -name "removes" -type "path"
+$stdin = Get-AnsibleParam -obj $params -name "stdin" -type "str"
+$no_profile = Get-AnsibleParam -obj $params -name "no_profile" -type "bool" -default $false
+$output_encoding_override = Get-AnsibleParam -obj $params -name "output_encoding_override" -type "str"
+
+$raw_command_line = $raw_command_line.Trim()
+
+$result = @{
+ changed = $true
+ cmd = $raw_command_line
+}
+
+if ($creates -and $(Test-AnsiblePath -Path $creates)) {
+ Exit-Json @{msg="skipped, since $creates exists";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0}
+}
+
+if ($removes -and -not $(Test-AnsiblePath -Path $removes)) {
+ Exit-Json @{msg="skipped, since $removes does not exist";cmd=$raw_command_line;changed=$false;skipped=$true;rc=0}
+}
+
+$exec_args = $null
+If(-not $executable -or $executable -eq "powershell") {
+ $exec_application = "powershell.exe"
+
+ # force input encoding to preamble-free UTF8 so PS sub-processes (eg, Start-Job) don't blow up
+ $raw_command_line = "[Console]::InputEncoding = New-Object Text.UTF8Encoding `$false; " + $raw_command_line
+
+ # Base64 encode the command so we don't have to worry about the various levels of escaping
+ $encoded_command = [Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($raw_command_line))
+
+ if ($stdin) {
+ $exec_args = "-encodedcommand $encoded_command"
+ } else {
+ $exec_args = "-noninteractive -encodedcommand $encoded_command"
+ }
+
+ if ($no_profile) {
+ $exec_args = "-noprofile $exec_args"
+ }
+}
+Else {
+ # FUTURE: support arg translation from executable (or executable_args?) to process arguments for arbitrary interpreter?
+ $exec_application = $executable
+ if (-not ($exec_application.EndsWith(".exe"))) {
+ $exec_application = "$($exec_application).exe"
+ }
+ $exec_args = "/c $raw_command_line"
+}
+
+$command = "`"$exec_application`" $exec_args"
+$run_command_arg = @{
+ command = $command
+}
+if ($chdir) {
+ $run_command_arg['working_directory'] = $chdir
+}
+if ($stdin) {
+ $run_command_arg['stdin'] = $stdin
+}
+if ($output_encoding_override) {
+ $run_command_arg['output_encoding_override'] = $output_encoding_override
+}
+
+$start_datetime = [DateTime]::UtcNow
+try {
+ $command_result = Run-Command @run_command_arg
+} catch {
+ $result.changed = $false
+ try {
+ $result.rc = $_.Exception.NativeErrorCode
+ } catch {
+ $result.rc = 2
+ }
+ Fail-Json -obj $result -message $_.Exception.Message
+}
+
+# TODO: decode CLIXML stderr output (and other streams?)
+$result.stdout = $command_result.stdout
+$result.stderr = Cleanse-Stderr $command_result.stderr
+$result.rc = $command_result.rc
+
+$end_datetime = [DateTime]::UtcNow
+$result.start = $start_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.end = $end_datetime.ToString("yyyy-MM-dd hh:mm:ss.ffffff")
+$result.delta = $($end_datetime - $start_datetime).ToString("h\:mm\:ss\.ffffff")
+
+If ($result.rc -ne 0) {
+ Fail-Json -obj $result -message "non-zero return code"
+}
+
+Exit-Json $result
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py
index 3c6f0749..ee2cd762 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_shell.py
@@ -1 +1,167 @@
-../../../../../../plugins/modules/win_shell.py \ No newline at end of file
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2016, Ansible, inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_shell
+short_description: Execute shell commands on target hosts
+version_added: 2.2
+description:
+ - The C(win_shell) module takes the command name followed by a list of space-delimited arguments.
+ It is similar to the M(win_command) module, but runs
+ the command via a shell (defaults to PowerShell) on the target host.
+ - For non-Windows targets, use the M(shell) module instead.
+options:
+ free_form:
+ description:
+ - The C(win_shell) module takes a free form command to run.
+ - There is no parameter actually named 'free form'. See the examples!
+ type: str
+ required: yes
+ creates:
+ description:
+ - A path or path filter pattern; when the referenced path exists on the target host, the task will be skipped.
+ type: path
+ removes:
+ description:
+ - A path or path filter pattern; when the referenced path B(does not) exist on the target host, the task will be skipped.
+ type: path
+ chdir:
+ description:
+ - Set the specified path as the current working directory before executing a command
+ type: path
+ executable:
+ description:
+ - Change the shell used to execute the command (eg, C(cmd)).
+ - The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
+ type: path
+ stdin:
+ description:
+ - Set the stdin of the command directly to the specified value.
+ type: str
+ version_added: '2.5'
+ no_profile:
+ description:
+ - Do not load the user profile before running a command. This is only valid
+ when using PowerShell as the executable.
+ type: bool
+ default: no
+ version_added: '2.8'
+ output_encoding_override:
+ description:
+ - This option overrides the encoding of stdout/stderr output.
+ - You can use this option when you need to run a command which ignore the console's codepage.
+ - You should only need to use this option in very rare circumstances.
+ - This value can be any valid encoding C(Name) based on the output of C([System.Text.Encoding]::GetEncodings()).
+ See U(https://docs.microsoft.com/dotnet/api/system.text.encoding.getencodings).
+ type: str
+ version_added: '2.10'
+notes:
+ - If you want to run an executable securely and predictably, it may be
+ better to use the M(win_command) module instead. Best practices when writing
+ playbooks will follow the trend of using M(win_command) unless C(win_shell) is
+ explicitly required. When running ad-hoc commands, use your best judgement.
+ - WinRM will not return from a command execution until all child processes created have exited.
+ Thus, it is not possible to use C(win_shell) to spawn long-running child or background processes.
+ Consider creating a Windows service for managing background processes.
+seealso:
+- module: psexec
+- module: raw
+- module: script
+- module: shell
+- module: win_command
+- module: win_psexec
+author:
+ - Matt Davis (@nitzmahone)
+'''
+
+EXAMPLES = r'''
+# Execute a command in the remote shell; stdout goes to the specified
+# file on the remote.
+- win_shell: C:\somescript.ps1 >> C:\somelog.txt
+
+# Change the working directory to somedir/ before executing the command.
+- win_shell: C:\somescript.ps1 >> C:\somelog.txt chdir=C:\somedir
+
+# You can also use the 'args' form to provide the options. This command
+# will change the working directory to somedir/ and will only run when
+# somedir/somelog.txt doesn't exist.
+- win_shell: C:\somescript.ps1 >> C:\somelog.txt
+ args:
+ chdir: C:\somedir
+ creates: C:\somelog.txt
+
+# Run a command under a non-Powershell interpreter (cmd in this case)
+- win_shell: echo %HOMEDIR%
+ args:
+ executable: cmd
+ register: homedir_out
+
+- name: Run multi-lined shell commands
+ win_shell: |
+ $value = Test-Path -Path C:\temp
+ if ($value) {
+ Remove-Item -Path C:\temp -Force
+ }
+ New-Item -Path C:\temp -ItemType Directory
+
+- name: Retrieve the input based on stdin
+ win_shell: '$string = [Console]::In.ReadToEnd(); Write-Output $string.Trim()'
+ args:
+ stdin: Input message
+'''
+
+RETURN = r'''
+msg:
+ description: Changed.
+ returned: always
+ type: bool
+ sample: true
+start:
+ description: The command execution start time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.429568'
+end:
+ description: The command execution end time.
+ returned: always
+ type: str
+ sample: '2016-02-25 09:18:26.755339'
+delta:
+ description: The command execution delta time.
+ returned: always
+ type: str
+ sample: '0:00:00.325771'
+stdout:
+ description: The command standard output.
+ returned: always
+ type: str
+ sample: 'Clustering node rabbit@slave1 with rabbit@master ...'
+stderr:
+ description: The command standard error.
+ returned: always
+ type: str
+ sample: 'ls: cannot access foo: No such file or directory'
+cmd:
+ description: The command executed by the task.
+ returned: always
+ type: str
+ sample: 'rabbitmqctl join_cluster rabbit@master'
+rc:
+ description: The command return code (0 means success).
+ returned: always
+ type: int
+ sample: 0
+stdout_lines:
+ description: The command standard output split in lines.
+ returned: always
+ type: list
+ sample: [u'Clustering node rabbit@slave1 with rabbit@master ...']
+'''
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1 b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1
index 62a7a40a..071eb11c 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.ps1
@@ -1 +1,186 @@
-../../../../../../plugins/modules/win_stat.ps1 \ No newline at end of file
+#!powershell
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#AnsibleRequires -CSharpUtil Ansible.Basic
+#Requires -Module Ansible.ModuleUtils.FileUtil
+#Requires -Module Ansible.ModuleUtils.LinkUtil
+
+function ConvertTo-Timestamp($start_date, $end_date) {
+ if ($start_date -and $end_date) {
+ return (New-TimeSpan -Start $start_date -End $end_date).TotalSeconds
+ }
+}
+
+function Get-FileChecksum($path, $algorithm) {
+ switch ($algorithm) {
+ 'md5' { $sp = New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider }
+ 'sha1' { $sp = New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider }
+ 'sha256' { $sp = New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider }
+ 'sha384' { $sp = New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider }
+ 'sha512' { $sp = New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider }
+ default { Fail-Json -obj $result -message "Unsupported hash algorithm supplied '$algorithm'" }
+ }
+
+ $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite)
+ try {
+ $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower()
+ } finally {
+ $fp.Dispose()
+ }
+
+ return $hash
+}
+
+function Get-FileInfo {
+ param([String]$Path, [Switch]$Follow)
+
+ $info = Get-AnsibleItem -Path $Path -ErrorAction SilentlyContinue
+ $link_info = $null
+ if ($null -ne $info) {
+ try {
+ $link_info = Get-Link -link_path $info.FullName
+ } catch {
+ $module.Warn("Failed to check/get link info for file: $($_.Exception.Message)")
+ }
+
+ # If follow=true we want to follow the link all the way back to root object
+ if ($Follow -and $null -ne $link_info -and $link_info.Type -in @("SymbolicLink", "JunctionPoint")) {
+ $info, $link_info = Get-FileInfo -Path $link_info.AbsolutePath -Follow
+ }
+ }
+
+ return $info, $link_info
+}
+
+$spec = @{
+ options = @{
+ path = @{ type='path'; required=$true; aliases=@( 'dest', 'name' ) }
+ get_checksum = @{ type='bool'; default=$true }
+ checksum_algorithm = @{ type='str'; default='sha1'; choices=@( 'md5', 'sha1', 'sha256', 'sha384', 'sha512' ) }
+ follow = @{ type='bool'; default=$false }
+ }
+ supports_check_mode = $true
+}
+
+$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
+
+$path = $module.Params.path
+$get_checksum = $module.Params.get_checksum
+$checksum_algorithm = $module.Params.checksum_algorithm
+$follow = $module.Params.follow
+
+$module.Result.stat = @{ exists=$false }
+
+Load-LinkUtils
+$info, $link_info = Get-FileInfo -Path $path -Follow:$follow
+If ($null -ne $info) {
+ $epoch_date = Get-Date -Date "01/01/1970"
+ $attributes = @()
+ foreach ($attribute in ($info.Attributes -split ',')) {
+ $attributes += $attribute.Trim()
+ }
+
+ # default values that are always set, specific values are set below this
+ # but are kept commented for easier readability
+ $stat = @{
+ exists = $true
+ attributes = $info.Attributes.ToString()
+ isarchive = ($attributes -contains "Archive")
+ isdir = $false
+ ishidden = ($attributes -contains "Hidden")
+ isjunction = $false
+ islnk = $false
+ isreadonly = ($attributes -contains "ReadOnly")
+ isreg = $false
+ isshared = $false
+ nlink = 1 # Number of links to the file (hard links), overriden below if islnk
+ # lnk_target = islnk or isjunction Target of the symlink. Note that relative paths remain relative
+ # lnk_source = islnk os isjunction Target of the symlink normalized for the remote filesystem
+ hlnk_targets = @()
+ creationtime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.CreationTime)
+ lastaccesstime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.LastAccessTime)
+ lastwritetime = (ConvertTo-Timestamp -start_date $epoch_date -end_date $info.LastWriteTime)
+ # size = a file and directory - calculated below
+ path = $info.FullName
+ filename = $info.Name
+ # extension = a file
+ # owner = set outsite this dict in case it fails
+ # sharename = a directory and isshared is True
+ # checksum = a file and get_checksum: True
+ }
+ try {
+ $stat.owner = $info.GetAccessControl().Owner
+ } catch {
+ # may not have rights, historical behaviour was to just set to $null
+ # due to ErrorActionPreference being set to "Continue"
+ $stat.owner = $null
+ }
+
+ # values that are set according to the type of file
+ if ($info.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
+ $stat.isdir = $true
+ $share_info = Get-CimInstance -ClassName Win32_Share -Filter "Path='$($stat.path -replace '\\', '\\')'"
+ if ($null -ne $share_info) {
+ $stat.isshared = $true
+ $stat.sharename = $share_info.Name
+ }
+
+ try {
+ $size = 0
+ foreach ($file in $info.EnumerateFiles("*", [System.IO.SearchOption]::AllDirectories)) {
+ $size += $file.Length
+ }
+ $stat.size = $size
+ } catch {
+ $stat.size = 0
+ }
+ } else {
+ $stat.extension = $info.Extension
+ $stat.isreg = $true
+ $stat.size = $info.Length
+
+ if ($get_checksum) {
+ try {
+ $stat.checksum = Get-FileChecksum -path $path -algorithm $checksum_algorithm
+ } catch {
+ $module.FailJson("Failed to get hash of file, set get_checksum to False to ignore this error: $($_.Exception.Message)", $_)
+ }
+ }
+ }
+
+ # Get symbolic link, junction point, hard link info
+ if ($null -ne $link_info) {
+ switch ($link_info.Type) {
+ "SymbolicLink" {
+ $stat.islnk = $true
+ $stat.isreg = $false
+ $stat.lnk_target = $link_info.TargetPath
+ $stat.lnk_source = $link_info.AbsolutePath
+ break
+ }
+ "JunctionPoint" {
+ $stat.isjunction = $true
+ $stat.isreg = $false
+ $stat.lnk_target = $link_info.TargetPath
+ $stat.lnk_source = $link_info.AbsolutePath
+ break
+ }
+ "HardLink" {
+ $stat.lnk_type = "hard"
+ $stat.nlink = $link_info.HardTargets.Count
+
+ # remove current path from the targets
+ $hlnk_targets = $link_info.HardTargets | Where-Object { $_ -ne $stat.path }
+ $stat.hlnk_targets = @($hlnk_targets)
+ break
+ }
+ }
+ }
+
+ $module.Result.stat = $stat
+}
+
+$module.ExitJson()
+
diff --git a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py
index 1db4c95e..0676b5b2 120000..100644
--- a/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py
+++ b/test/support/windows-integration/collections/ansible_collections/ansible/windows/plugins/modules/win_stat.py
@@ -1 +1,236 @@
-../../../../../../plugins/modules/win_stat.py \ No newline at end of file
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2017, Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+# this is a windows documentation stub. actual code lives in the .ps1
+# file of the same name
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['stableinterface'],
+ 'supported_by': 'core'}
+
+DOCUMENTATION = r'''
+---
+module: win_stat
+version_added: "1.7"
+short_description: Get information about Windows files
+description:
+ - Returns information about a Windows file.
+ - For non-Windows targets, use the M(stat) module instead.
+options:
+ path:
+ description:
+ - The full path of the file/object to get the facts of; both forward and
+ back slashes are accepted.
+ type: path
+ required: yes
+ aliases: [ dest, name ]
+ get_checksum:
+ description:
+ - Whether to return a checksum of the file (default sha1)
+ type: bool
+ default: yes
+ version_added: "2.1"
+ checksum_algorithm:
+ description:
+ - Algorithm to determine checksum of file.
+ - Will throw an error if the host is unable to use specified algorithm.
+ type: str
+ default: sha1
+ choices: [ md5, sha1, sha256, sha384, sha512 ]
+ version_added: "2.3"
+ follow:
+ description:
+ - Whether to follow symlinks or junction points.
+ - In the case of C(path) pointing to another link, then that will
+ be followed until no more links are found.
+ type: bool
+ default: no
+ version_added: "2.8"
+seealso:
+- module: stat
+- module: win_acl
+- module: win_file
+- module: win_owner
+author:
+- Chris Church (@cchurch)
+'''
+
+EXAMPLES = r'''
+- name: Obtain information about a file
+ win_stat:
+ path: C:\foo.ini
+ register: file_info
+
+- name: Obtain information about a folder
+ win_stat:
+ path: C:\bar
+ register: folder_info
+
+- name: Get MD5 checksum of a file
+ win_stat:
+ path: C:\foo.ini
+ get_checksum: yes
+ checksum_algorithm: md5
+ register: md5_checksum
+
+- debug:
+ var: md5_checksum.stat.checksum
+
+- name: Get SHA1 checksum of file
+ win_stat:
+ path: C:\foo.ini
+ get_checksum: yes
+ register: sha1_checksum
+
+- debug:
+ var: sha1_checksum.stat.checksum
+
+- name: Get SHA256 checksum of file
+ win_stat:
+ path: C:\foo.ini
+ get_checksum: yes
+ checksum_algorithm: sha256
+ register: sha256_checksum
+
+- debug:
+ var: sha256_checksum.stat.checksum
+'''
+
+RETURN = r'''
+changed:
+ description: Whether anything was changed
+ returned: always
+ type: bool
+ sample: true
+stat:
+ description: dictionary containing all the stat data
+ returned: success
+ type: complex
+ contains:
+ attributes:
+ description: Attributes of the file at path in raw form.
+ returned: success, path exists
+ type: str
+ sample: "Archive, Hidden"
+ checksum:
+ description: The checksum of a file based on checksum_algorithm specified.
+ returned: success, path exist, path is a file, get_checksum == True
+ checksum_algorithm specified is supported
+ type: str
+ sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
+ creationtime:
+ description: The create time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ exists:
+ description: If the path exists or not.
+ returned: success
+ type: bool
+ sample: true
+ extension:
+ description: The extension of the file at path.
+ returned: success, path exists, path is a file
+ type: str
+ sample: ".ps1"
+ filename:
+ description: The name of the file (without path).
+ returned: success, path exists, path is a file
+ type: str
+ sample: foo.ini
+ hlnk_targets:
+ description: List of other files pointing to the same file (hard links), excludes the current file.
+ returned: success, path exists
+ type: list
+ sample:
+ - C:\temp\file.txt
+ - C:\Windows\update.log
+ isarchive:
+ description: If the path is ready for archiving or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isdir:
+ description: If the path is a directory or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ ishidden:
+ description: If the path is hidden or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isjunction:
+ description: If the path is a junction point or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ islnk:
+ description: If the path is a symbolic link or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isreadonly:
+ description: If the path is read only or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isreg:
+ description: If the path is a regular file.
+ returned: success, path exists
+ type: bool
+ sample: true
+ isshared:
+ description: If the path is shared or not.
+ returned: success, path exists
+ type: bool
+ sample: true
+ lastaccesstime:
+ description: The last access time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ lastwritetime:
+ description: The last modification time of the file represented in seconds since epoch.
+ returned: success, path exists
+ type: float
+ sample: 1477984205.15
+ lnk_source:
+ description: Target of the symlink normalized for the remote filesystem.
+ returned: success, path exists and the path is a symbolic link or junction point
+ type: str
+ sample: C:\temp\link
+ lnk_target:
+ description: Target of the symlink. Note that relative paths remain relative.
+ returned: success, path exists and the path is a symbolic link or junction point
+ type: str
+ sample: ..\link
+ nlink:
+ description: Number of links to the file (hard links).
+ returned: success, path exists
+ type: int
+ sample: 1
+ owner:
+ description: The owner of the file.
+ returned: success, path exists
+ type: str
+ sample: BUILTIN\Administrators
+ path:
+ description: The full absolute path to the file.
+ returned: success, path exists, file exists
+ type: str
+ sample: C:\foo.ini
+ sharename:
+ description: The name of share if folder is shared.
+ returned: success, path exists, file is a directory and isshared == True
+ type: str
+ sample: file-share
+ size:
+ description: The size in bytes of a file or folder.
+ returned: success, path exists, file is not a link
+ type: int
+ sample: 1024
+'''
diff --git a/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1 b/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1
deleted file mode 100644
index 593ee763..00000000
--- a/test/support/windows-integration/plugins/modules/win_data_deduplication.ps1
+++ /dev/null
@@ -1,129 +0,0 @@
-#!powershell
-
-# Copyright: 2019, rnsc(@rnsc) <github@rnsc.be>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt
-
-#AnsibleRequires -CSharpUtil Ansible.Basic
-#AnsibleRequires -OSVersion 6.3
-
-$spec = @{
- options = @{
- drive_letter = @{ type = "str"; required = $true }
- state = @{ type = "str"; choices = "absent", "present"; default = "present"; }
- settings = @{
- type = "dict"
- required = $false
- options = @{
- minimum_file_size = @{ type = "int"; default = 32768 }
- minimum_file_age_days = @{ type = "int"; default = 2 }
- no_compress = @{ type = "bool"; required = $false; default = $false }
- optimize_in_use_files = @{ type = "bool"; required = $false; default = $false }
- verify = @{ type = "bool"; required = $false; default = $false }
- }
- }
- }
- supports_check_mode = $true
-}
-
-$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
-
-$drive_letter = $module.Params.drive_letter
-$state = $module.Params.state
-$settings = $module.Params.settings
-
-$module.Result.changed = $false
-$module.Result.reboot_required = $false
-$module.Result.msg = ""
-
-function Set-DataDeduplication($volume, $state, $settings, $dedup_job) {
-
- $current_state = 'absent'
-
- try {
- $dedup_info = Get-DedupVolume -Volume "$($volume.DriveLetter):"
- } catch {
- $dedup_info = $null
- }
-
- if ($dedup_info.Enabled) {
- $current_state = 'present'
- }
-
- if ( $state -ne $current_state ) {
- if( -not $module.CheckMode) {
- if($state -eq 'present') {
- # Enable-DedupVolume -Volume <String>
- Enable-DedupVolume -Volume "$($volume.DriveLetter):"
- } elseif ($state -eq 'absent') {
- Disable-DedupVolume -Volume "$($volume.DriveLetter):"
- }
- }
- $module.Result.changed = $true
- }
-
- if ($state -eq 'present') {
- if ($null -ne $settings) {
- Set-DataDedupJobSettings -volume $volume -settings $settings
- }
- }
-}
-
-function Set-DataDedupJobSettings ($volume, $settings) {
-
- try {
- $dedup_info = Get-DedupVolume -Volume "$($volume.DriveLetter):"
- } catch {
- $dedup_info = $null
- }
-
- ForEach ($key in $settings.keys) {
-
- # See Microsoft documentation:
- # https://docs.microsoft.com/en-us/powershell/module/deduplication/set-dedupvolume?view=win10-ps
-
- $update_key = $key
- $update_value = $settings.$($key)
- # Transform Ansible style options to Powershell params
- $update_key = $update_key -replace('_', '')
-
- if ($update_key -eq "MinimumFileSize" -and $update_value -lt 32768) {
- $update_value = 32768
- }
-
- $current_value = ($dedup_info | Select-Object -ExpandProperty $update_key)
-
- if ($update_value -ne $current_value) {
- $command_param = @{
- $($update_key) = $update_value
- }
-
- # Set-DedupVolume -Volume <String>`
- # -NoCompress <bool> `
- # -MinimumFileAgeDays <UInt32> `
- # -MinimumFileSize <UInt32> (minimum 32768)
- if( -not $module.CheckMode ) {
- Set-DedupVolume -Volume "$($volume.DriveLetter):" @command_param
- }
-
- $module.Result.changed = $true
- }
- }
-
-}
-
-# Install required feature
-$feature_name = "FS-Data-Deduplication"
-if( -not $module.CheckMode) {
- $feature = Install-WindowsFeature -Name $feature_name
-
- if ($feature.RestartNeeded -eq 'Yes') {
- $module.Result.reboot_required = $true
- $module.FailJson("$feature_name was installed but requires Windows to be rebooted to work.")
- }
-}
-
-$volume = Get-Volume -DriveLetter $drive_letter
-
-Set-DataDeduplication -volume $volume -state $state -settings $settings -dedup_job $dedup_job
-
-$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_data_deduplication.py b/test/support/windows-integration/plugins/modules/win_data_deduplication.py
deleted file mode 100644
index d320b9f7..00000000
--- a/test/support/windows-integration/plugins/modules/win_data_deduplication.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: 2019, rnsc(@rnsc) <github@rnsc.be>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: win_data_deduplication
-version_added: "2.10"
-short_description: Module to enable Data Deduplication on a volume.
-description:
-- This module can be used to enable Data Deduplication on a Windows volume.
-- The module will install the FS-Data-Deduplication feature (a reboot will be necessary).
-options:
- drive_letter:
- description:
- - Windows drive letter on which to enable data deduplication.
- required: yes
- type: str
- state:
- description:
- - Wether to enable or disable data deduplication on the selected volume.
- default: present
- type: str
- choices: [ present, absent ]
- settings:
- description:
- - Dictionary of settings to pass to the Set-DedupVolume powershell command.
- type: dict
- suboptions:
- minimum_file_size:
- description:
- - Minimum file size you want to target for deduplication.
- - It will default to 32768 if not defined or if the value is less than 32768.
- type: int
- default: 32768
- minimum_file_age_days:
- description:
- - Minimum file age you want to target for deduplication.
- type: int
- default: 2
- no_compress:
- description:
- - Wether you want to enabled filesystem compression or not.
- type: bool
- default: no
- optimize_in_use_files:
- description:
- - Indicates that the server attempts to optimize currently open files.
- type: bool
- default: no
- verify:
- description:
- - Indicates whether the deduplication engine performs a byte-for-byte verification for each duplicate chunk
- that optimization creates, rather than relying on a cryptographically strong hash.
- - This option is not recommend.
- - Setting this parameter to True can degrade optimization performance.
- type: bool
- default: no
-author:
-- rnsc (@rnsc)
-'''
-
-EXAMPLES = r'''
-- name: Enable Data Deduplication on D
- win_data_deduplication:
- drive_letter: 'D'
- state: present
-
-- name: Enable Data Deduplication on D
- win_data_deduplication:
- drive_letter: 'D'
- state: present
- settings:
- no_compress: true
- minimum_file_age_days: 1
- minimum_file_size: 0
-'''
-
-RETURN = r'''
-#
-'''
diff --git a/test/support/windows-integration/plugins/modules/win_dsc.ps1 b/test/support/windows-integration/plugins/modules/win_dsc.ps1
deleted file mode 100644
index 690f391a..00000000
--- a/test/support/windows-integration/plugins/modules/win_dsc.ps1
+++ /dev/null
@@ -1,398 +0,0 @@
-#!powershell
-
-# Copyright: (c) 2015, Trond Hindenes <trond@hindenes.com>, and others
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#AnsibleRequires -CSharpUtil Ansible.Basic
-#Requires -Version 5
-
-Function ConvertTo-ArgSpecType {
- <#
- .SYNOPSIS
- Converts the DSC parameter type to the arg spec type required for Ansible.
- #>
- param(
- [Parameter(Mandatory=$true)][String]$CimType
- )
-
- $arg_type = switch($CimType) {
- Boolean { "bool" }
- Char16 { [Func[[Object], [Char]]]{ [System.Char]::Parse($args[0].ToString()) } }
- DateTime { [Func[[Object], [DateTime]]]{ [System.DateTime]($args[0].ToString()) } }
- Instance { "dict" }
- Real32 { "float" }
- Real64 { [Func[[Object], [Double]]]{ [System.Double]::Parse($args[0].ToString()) } }
- Reference { "dict" }
- SInt16 { [Func[[Object], [Int16]]]{ [System.Int16]::Parse($args[0].ToString()) } }
- SInt32 { "int" }
- SInt64 { [Func[[Object], [Int64]]]{ [System.Int64]::Parse($args[0].ToString()) } }
- SInt8 { [Func[[Object], [SByte]]]{ [System.SByte]::Parse($args[0].ToString()) } }
- String { "str" }
- UInt16 { [Func[[Object], [UInt16]]]{ [System.UInt16]::Parse($args[0].ToString()) } }
- UInt32 { [Func[[Object], [UInt32]]]{ [System.UInt32]::Parse($args[0].ToString()) } }
- UInt64 { [Func[[Object], [UInt64]]]{ [System.UInt64]::Parse($args[0].ToString()) } }
- UInt8 { [Func[[Object], [Byte]]]{ [System.Byte]::Parse($args[0].ToString()) } }
- Unknown { "raw" }
- default { "raw" }
- }
- return $arg_type
-}
-
-Function Get-DscCimClassProperties {
- <#
- .SYNOPSIS
- Get's a list of CimProperties of a CIM Class. It filters out any magic or
- read only properties that we don't need to know about.
- #>
- param([Parameter(Mandatory=$true)][String]$ClassName)
-
- $resource = Get-CimClass -ClassName $ClassName -Namespace root\Microsoft\Windows\DesiredStateConfiguration
-
- # Filter out any magic properties that are used internally on an OMI_BaseResource
- # https://github.com/PowerShell/PowerShell/blob/master/src/System.Management.Automation/DscSupport/CimDSCParser.cs#L1203
- $magic_properties = @("ResourceId", "SourceInfo", "ModuleName", "ModuleVersion", "ConfigurationName")
- $properties = $resource.CimClassProperties | Where-Object {
-
- ($resource.CimSuperClassName -ne "OMI_BaseResource" -or $_.Name -notin $magic_properties) -and
- -not $_.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::ReadOnly)
- }
-
- return ,$properties
-}
-
-Function Add-PropertyOption {
- <#
- .SYNOPSIS
- Adds the spec for the property type to the existing module specification.
- #>
- param(
- [Parameter(Mandatory=$true)][Hashtable]$Spec,
- [Parameter(Mandatory=$true)]
- [Microsoft.Management.Infrastructure.CimPropertyDeclaration]$Property
- )
-
- $option = @{
- required = $false
- }
- $property_name = $Property.Name
- $property_type = $Property.CimType.ToString()
-
- if ($Property.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::Key) -or
- $Property.Flags.HasFlag([Microsoft.Management.Infrastructure.CimFlags]::Required)) {
- $option.required = $true
- }
-
- if ($null -ne $Property.Qualifiers['Values']) {
- $option.choices = [System.Collections.Generic.List`1[Object]]$Property.Qualifiers['Values'].Value
- }
-
- if ($property_name -eq "Name") {
- # For backwards compatibility we support specifying the Name DSC property as item_name
- $option.aliases = @("item_name")
- } elseif ($property_name -ceq "key") {
- # There seems to be a bug in the CIM property parsing when the property name is 'Key'. The CIM instance will
- # think the name is 'key' when the MOF actually defines it as 'Key'. We set the proper casing so the module arg
- # validator won't fire a case sensitive warning
- $property_name = "Key"
- }
-
- if ($Property.ReferenceClassName -eq "MSFT_Credential") {
- # Special handling for the MSFT_Credential type (PSCredential), we handle this with having 2 options that
- # have the suffix _username and _password.
- $option_spec_pass = @{
- type = "str"
- required = $option.required
- no_log = $true
- }
- $Spec.options."$($property_name)_password" = $option_spec_pass
- $Spec.required_together.Add(@("$($property_name)_username", "$($property_name)_password")) > $null
-
- $property_name = "$($property_name)_username"
- $option.type = "str"
- } elseif ($Property.ReferenceClassName -eq "MSFT_KeyValuePair") {
- $option.type = "dict"
- } elseif ($property_type.EndsWith("Array")) {
- $option.type = "list"
- $option.elements = ConvertTo-ArgSpecType -CimType $property_type.Substring(0, $property_type.Length - 5)
- } else {
- $option.type = ConvertTo-ArgSpecType -CimType $property_type
- }
-
- if (($option.type -eq "dict" -or ($option.type -eq "list" -and $option.elements -eq "dict")) -and
- $Property.ReferenceClassName -ne "MSFT_KeyValuePair") {
- # Get the sub spec if the type is a Instance (CimInstance/dict)
- $sub_option_spec = Get-OptionSpec -ClassName $Property.ReferenceClassName
- $option += $sub_option_spec
- }
-
- $Spec.options.$property_name = $option
-}
-
-Function Get-OptionSpec {
- <#
- .SYNOPSIS
- Generates the specifiec used in AnsibleModule for a CIM MOF resource name.
-
- .NOTES
- This won't be able to retrieve the default values for an option as that is not defined in the MOF for a resource.
- Default values are still preserved in the DSC engine if we don't pass in the property at all, we just can't report
- on what they are automatically.
- #>
- param(
- [Parameter(Mandatory=$true)][String]$ClassName
- )
-
- $spec = @{
- options = @{}
- required_together = [System.Collections.ArrayList]@()
- }
- $properties = Get-DscCimClassProperties -ClassName $ClassName
- foreach ($property in $properties) {
- Add-PropertyOption -Spec $spec -Property $property
- }
-
- return $spec
-}
-
-Function ConvertTo-CimInstance {
- <#
- .SYNOPSIS
- Converts a dict to a CimInstance of the specified Class. Also provides a
- better error message if this fails that contains the option name that failed.
- #>
- param(
- [Parameter(Mandatory=$true)][String]$Name,
- [Parameter(Mandatory=$true)][String]$ClassName,
- [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Value,
- [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module,
- [Switch]$Recurse
- )
-
- $properties = @{}
- foreach ($value_info in $Value.GetEnumerator()) {
- # Need to remove all null values from existing dict so the conversion works
- if ($null -eq $value_info.Value) {
- continue
- }
- $properties.($value_info.Key) = $value_info.Value
- }
-
- if ($Recurse) {
- # We want to validate and convert and values to what's required by DSC
- $properties = ConvertTo-DscProperty -ClassName $ClassName -Params $properties -Module $Module
- }
-
- try {
- return (New-CimInstance -ClassName $ClassName -Property $properties -ClientOnly)
- } catch {
- # New-CimInstance raises a poor error message, make sure we mention what option it is for
- $Module.FailJson("Failed to cast dict value for option '$Name' to a CimInstance: $($_.Exception.Message)", $_)
- }
-}
-
-Function ConvertTo-DscProperty {
- <#
- .SYNOPSIS
- Converts the input module parameters that have been validated and casted
- into the types expected by the DSC engine. This is mostly done to deal with
- types like PSCredential and Dictionaries.
- #>
- param(
- [Parameter(Mandatory=$true)][String]$ClassName,
- [Parameter(Mandatory=$true)][System.Collections.IDictionary]$Params,
- [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module
- )
- $properties = Get-DscCimClassProperties -ClassName $ClassName
-
- $dsc_properties = @{}
- foreach ($property in $properties) {
- $property_name = $property.Name
- $property_type = $property.CimType.ToString()
-
- if ($property.ReferenceClassName -eq "MSFT_Credential") {
- $username = $Params."$($property_name)_username"
- $password = $Params."$($property_name)_password"
-
- # No user set == No option set in playbook, skip this property
- if ($null -eq $username) {
- continue
- }
- $sec_password = ConvertTo-SecureString -String $password -AsPlainText -Force
- $value = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $username, $sec_password
- } else {
- $value = $Params.$property_name
-
- # The actual value wasn't set, skip adding this property
- if ($null -eq $value) {
- continue
- }
-
- if ($property.ReferenceClassName -eq "MSFT_KeyValuePair") {
- $key_value_pairs = [System.Collections.Generic.List`1[CimInstance]]@()
- foreach ($value_info in $value.GetEnumerator()) {
- $kvp = @{Key = $value_info.Key; Value = $value_info.Value.ToString()}
- $cim_instance = ConvertTo-CimInstance -Name $property_name -ClassName MSFT_KeyValuePair `
- -Value $kvp -Module $Module
- $key_value_pairs.Add($cim_instance) > $null
- }
- $value = $key_value_pairs.ToArray()
- } elseif ($null -ne $property.ReferenceClassName) {
- # Convert the dict to a CimInstance (or list of CimInstances)
- $convert_args = @{
- ClassName = $property.ReferenceClassName
- Module = $Module
- Name = $property_name
- Recurse = $true
- }
- if ($property_type.EndsWith("Array")) {
- $value = [System.Collections.Generic.List`1[CimInstance]]@()
- foreach ($raw in $Params.$property_name.GetEnumerator()) {
- $cim_instance = ConvertTo-CimInstance -Value $raw @convert_args
- $value.Add($cim_instance) > $null
- }
- $value = $value.ToArray() # Need to make sure we are dealing with an Array not a List
- } else {
- $value = ConvertTo-CimInstance -Value $value @convert_args
- }
- }
- }
- $dsc_properties.$property_name = $value
- }
-
- return $dsc_properties
-}
-
-Function Invoke-DscMethod {
- <#
- .SYNOPSIS
- Invokes the DSC Resource Method specified in another PS pipeline. This is
- done so we can retrieve the Verbose stream and return it back to the user
- for futher debugging.
- #>
- param(
- [Parameter(Mandatory=$true)][Ansible.Basic.AnsibleModule]$Module,
- [Parameter(Mandatory=$true)][String]$Method,
- [Parameter(Mandatory=$true)][Hashtable]$Arguments
- )
-
- # Invoke the DSC resource in a separate runspace so we can capture the Verbose output
- $ps = [PowerShell]::Create()
- $ps.AddCommand("Invoke-DscResource").AddParameter("Method", $Method) > $null
- $ps.AddParameters($Arguments) > $null
-
- $result = $ps.Invoke()
-
- # Pass the warnings through to the AnsibleModule return result
- foreach ($warning in $ps.Streams.Warning) {
- $Module.Warn($warning.Message)
- }
-
- # If running at a high enough verbosity, add the verbose output to the AnsibleModule return result
- if ($Module.Verbosity -ge 3) {
- $verbose_logs = [System.Collections.Generic.List`1[String]]@()
- foreach ($verbosity in $ps.Streams.Verbose) {
- $verbose_logs.Add($verbosity.Message) > $null
- }
- $Module.Result."verbose_$($Method.ToLower())" = $verbose_logs
- }
-
- if ($ps.HadErrors) {
- # Cannot pass in the ErrorRecord as it's a RemotingErrorRecord and doesn't contain the ScriptStackTrace
- # or other info that would be useful
- $Module.FailJson("Failed to invoke DSC $Method method: $($ps.Streams.Error[0].Exception.Message)")
- }
-
- return $result
-}
-
-# win_dsc is unique in that is builds the arg spec based on DSC Resource input. To get this info
-# we need to read the resource_name and module_version value which is done outside of Ansible.Basic
-if ($args.Length -gt 0) {
- $params = Get-Content -Path $args[0] | ConvertFrom-Json
-} else {
- $params = $complex_args
-}
-if (-not $params.ContainsKey("resource_name")) {
- $res = @{
- msg = "missing required argument: resource_name"
- failed = $true
- }
- Write-Output -InputObject (ConvertTo-Json -Compress -InputObject $res)
- exit 1
-}
-$resource_name = $params.resource_name
-
-if ($params.ContainsKey("module_version")) {
- $module_version = $params.module_version
-} else {
- $module_version = "latest"
-}
-
-$module_versions = (Get-DscResource -Name $resource_name -ErrorAction SilentlyContinue | Sort-Object -Property Version)
-$resource = $null
-if ($module_version -eq "latest" -and $null -ne $module_versions) {
- $resource = $module_versions[-1]
-} elseif ($module_version -ne "latest") {
- $resource = $module_versions | Where-Object { $_.Version -eq $module_version }
-}
-
-if (-not $resource) {
- if ($module_version -eq "latest") {
- $msg = "Resource '$resource_name' not found."
- } else {
- $msg = "Resource '$resource_name' with version '$module_version' not found."
- $msg += " Versions installed: '$($module_versions.Version -join "', '")'."
- }
-
- Write-Output -InputObject (ConvertTo-Json -Compress -InputObject @{ failed = $true; msg = $msg })
- exit 1
-}
-
-# Build the base args for the DSC Invocation based on the resource selected
-$dsc_args = @{
- Name = $resource.Name
-}
-
-# Binary resources are not working very well with that approach - need to guesstimate module name/version
-$module_version = $null
-if ($resource.Module) {
- $dsc_args.ModuleName = @{
- ModuleName = $resource.Module.Name
- ModuleVersion = $resource.Module.Version
- }
- $module_version = $resource.Module.Version.ToString()
-} else {
- $dsc_args.ModuleName = "PSDesiredStateConfiguration"
-}
-
-# To ensure the class registered with CIM is the one based on our version, we want to run the Get method so the DSC
-# engine updates the metadata propery. We don't care about any errors here
-try {
- Invoke-DscResource -Method Get -Property @{Fake="Fake"} @dsc_args > $null
-} catch {}
-
-# Dynamically build the option spec based on the resource_name specified and create the module object
-$spec = Get-OptionSpec -ClassName $resource.ResourceType
-$spec.supports_check_mode = $true
-$spec.options.module_version = @{ type = "str"; default = "latest" }
-$spec.options.resource_name = @{ type = "str"; required = $true }
-
-$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
-$module.Result.reboot_required = $false
-$module.Result.module_version = $module_version
-
-# Build the DSC invocation arguments and invoke the resource
-$dsc_args.Property = ConvertTo-DscProperty -ClassName $resource.ResourceType -Module $module -Params $Module.Params
-$dsc_args.Verbose = $true
-
-$test_result = Invoke-DscMethod -Module $module -Method Test -Arguments $dsc_args
-if ($test_result.InDesiredState -ne $true) {
- if (-not $module.CheckMode) {
- $result = Invoke-DscMethod -Module $module -Method Set -Arguments $dsc_args
- $module.Result.reboot_required = $result.RebootRequired
- }
- $module.Result.changed = $true
-}
-
-$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_dsc.py b/test/support/windows-integration/plugins/modules/win_dsc.py
deleted file mode 100644
index 200d025e..00000000
--- a/test/support/windows-integration/plugins/modules/win_dsc.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2015, Trond Hindenes <trond@hindenes.com>, and others
-# Copyright: (c) 2017, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: win_dsc
-version_added: "2.4"
-short_description: Invokes a PowerShell DSC configuration
-description:
-- Configures a resource using PowerShell DSC.
-- Requires PowerShell version 5.0 or newer.
-- Most of the options for this module are dynamic and will vary depending on
- the DSC Resource specified in I(resource_name).
-- See :doc:`/user_guide/windows_dsc` for more information on how to use this module.
-options:
- resource_name:
- description:
- - The name of the DSC Resource to use.
- - Must be accessible to PowerShell using any of the default paths.
- type: str
- required: yes
- module_version:
- description:
- - Can be used to configure the exact version of the DSC resource to be
- invoked.
- - Useful if the target node has multiple versions installed of the module
- containing the DSC resource.
- - If not specified, the module will follow standard PowerShell convention
- and use the highest version available.
- type: str
- default: latest
- free_form:
- description:
- - The M(win_dsc) module takes in multiple free form options based on the
- DSC resource being invoked by I(resource_name).
- - There is no option actually named C(free_form) so see the examples.
- - This module will try and convert the option to the correct type required
- by the DSC resource and throw a warning if it fails.
- - If the type of the DSC resource option is a C(CimInstance) or
- C(CimInstance[]), this means the value should be a dictionary or list
- of dictionaries based on the values required by that option.
- - If the type of the DSC resource option is a C(PSCredential) then there
- needs to be 2 options set in the Ansible task definition suffixed with
- C(_username) and C(_password).
- - If the type of the DSC resource option is an array, then a list should be
- provided but a comma separated string also work. Use a list where
- possible as no escaping is required and it works with more complex types
- list C(CimInstance[]).
- - If the type of the DSC resource option is a C(DateTime), you should use
- a string in the form of an ISO 8901 string to ensure the exact date is
- used.
- - Since Ansible 2.8, Ansible will now validate the input fields against the
- DSC resource definition automatically. Older versions will silently
- ignore invalid fields.
- type: str
- required: true
-notes:
-- By default there are a few builtin resources that come with PowerShell 5.0,
- see U(https://docs.microsoft.com/en-us/powershell/scripting/dsc/resources/resources) for
- more information on these resources.
-- Custom DSC resources can be installed with M(win_psmodule) using the I(name)
- option.
-- The DSC engine run's each task as the SYSTEM account, any resources that need
- to be accessed with a different account need to have C(PsDscRunAsCredential)
- set.
-- To see the valid options for a DSC resource, run the module with C(-vvv) to
- show the possible module invocation. Default values are not shown in this
- output but are applied within the DSC engine.
-author:
-- Trond Hindenes (@trondhindenes)
-'''
-
-EXAMPLES = r'''
-- name: Extract zip file
- win_dsc:
- resource_name: Archive
- Ensure: Present
- Path: C:\Temp\zipfile.zip
- Destination: C:\Temp\Temp2
-
-- name: Install a Windows feature with the WindowsFeature resource
- win_dsc:
- resource_name: WindowsFeature
- Name: telnet-client
-
-- name: Edit HKCU reg key under specific user
- win_dsc:
- resource_name: Registry
- Ensure: Present
- Key: HKEY_CURRENT_USER\ExampleKey
- ValueName: TestValue
- ValueData: TestData
- PsDscRunAsCredential_username: '{{ansible_user}}'
- PsDscRunAsCredential_password: '{{ansible_password}}'
- no_log: true
-
-- name: Create file with multiple attributes
- win_dsc:
- resource_name: File
- DestinationPath: C:\ansible\dsc
- Attributes: # can also be a comma separated string, e.g. 'Hidden, System'
- - Hidden
- - System
- Ensure: Present
- Type: Directory
-
-- name: Call DSC resource with DateTime option
- win_dsc:
- resource_name: DateTimeResource
- DateTimeOption: '2019-02-22T13:57:31.2311892+00:00'
-
-# more complex example using custom DSC resource and dict values
-- name: Setup the xWebAdministration module
- win_psmodule:
- name: xWebAdministration
- state: present
-
-- name: Create IIS Website with Binding and Authentication options
- win_dsc:
- resource_name: xWebsite
- Ensure: Present
- Name: DSC Website
- State: Started
- PhysicalPath: C:\inetpub\wwwroot
- BindingInfo: # Example of a CimInstance[] DSC parameter (list of dicts)
- - Protocol: https
- Port: 1234
- CertificateStoreName: MY
- CertificateThumbprint: C676A89018C4D5902353545343634F35E6B3A659
- HostName: DSCTest
- IPAddress: '*'
- SSLFlags: '1'
- - Protocol: http
- Port: 4321
- IPAddress: '*'
- AuthenticationInfo: # Example of a CimInstance DSC parameter (dict)
- Anonymous: no
- Basic: true
- Digest: false
- Windows: yes
-'''
-
-RETURN = r'''
-module_version:
- description: The version of the dsc resource/module used.
- returned: always
- type: str
- sample: "1.0.1"
-reboot_required:
- description: Flag returned from the DSC engine indicating whether or not
- the machine requires a reboot for the invoked changes to take effect.
- returned: always
- type: bool
- sample: true
-verbose_test:
- description: The verbose output as a list from executing the DSC test
- method.
- returned: Ansible verbosity is -vvv or greater
- type: list
- sample: [
- "Perform operation 'Invoke CimMethod' with the following parameters, ",
- "[SERVER]: LCM: [Start Test ] [[File]DirectResourceAccess]",
- "Operation 'Invoke CimMethod' complete."
- ]
-verbose_set:
- description: The verbose output as a list from executing the DSC Set
- method.
- returned: Ansible verbosity is -vvv or greater and a change occurred
- type: list
- sample: [
- "Perform operation 'Invoke CimMethod' with the following parameters, ",
- "[SERVER]: LCM: [Start Set ] [[File]DirectResourceAccess]",
- "Operation 'Invoke CimMethod' complete."
- ]
-'''
diff --git a/test/support/windows-integration/plugins/modules/win_feature.ps1 b/test/support/windows-integration/plugins/modules/win_feature.ps1
deleted file mode 100644
index 9a7e1c30..00000000
--- a/test/support/windows-integration/plugins/modules/win_feature.ps1
+++ /dev/null
@@ -1,111 +0,0 @@
-#!powershell
-
-# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#Requires -Module Ansible.ModuleUtils.Legacy
-
-Import-Module -Name ServerManager
-
-$result = @{
- changed = $false
-}
-
-$params = Parse-Args $args -supports_check_mode $true
-$check_mode = Get-AnsibleParam -obj $params -name "_ansible_check_mode" -type "bool" -default $false
-
-$name = Get-AnsibleParam -obj $params -name "name" -type "list" -failifempty $true
-$state = Get-AnsibleParam -obj $params -name "state" -type "str" -default "present" -validateset "present","absent"
-
-$include_sub_features = Get-AnsibleParam -obj $params -name "include_sub_features" -type "bool" -default $false
-$include_management_tools = Get-AnsibleParam -obj $params -name "include_management_tools" -type "bool" -default $false
-$source = Get-AnsibleParam -obj $params -name "source" -type "str"
-
-$install_cmdlet = $false
-if (Get-Command -Name Install-WindowsFeature -ErrorAction SilentlyContinue) {
- Set-Alias -Name Install-AnsibleWindowsFeature -Value Install-WindowsFeature
- Set-Alias -Name Uninstall-AnsibleWindowsFeature -Value Uninstall-WindowsFeature
- $install_cmdlet = $true
-} elseif (Get-Command -Name Add-WindowsFeature -ErrorAction SilentlyContinue) {
- Set-Alias -Name Install-AnsibleWindowsFeature -Value Add-WindowsFeature
- Set-Alias -Name Uninstall-AnsibleWindowsFeature -Value Remove-WindowsFeature
-} else {
- Fail-Json -obj $result -message "This version of Windows does not support the cmdlets Install-WindowsFeature or Add-WindowsFeature"
-}
-
-if ($state -eq "present") {
- $install_args = @{
- Name = $name
- IncludeAllSubFeature = $include_sub_features
- Restart = $false
- WhatIf = $check_mode
- ErrorAction = "Stop"
- }
-
- if ($install_cmdlet) {
- $install_args.IncludeManagementTools = $include_management_tools
- $install_args.Confirm = $false
- if ($source) {
- if (-not (Test-Path -Path $source)) {
- Fail-Json -obj $result -message "Failed to find source path $source for feature install"
- }
- $install_args.Source = $source
- }
- }
-
- try {
- $action_results = Install-AnsibleWindowsFeature @install_args
- } catch {
- Fail-Json -obj $result -message "Failed to install Windows Feature: $($_.Exception.Message)"
- }
-} else {
- $uninstall_args = @{
- Name = $name
- Restart = $false
- WhatIf = $check_mode
- ErrorAction = "Stop"
- }
- if ($install_cmdlet) {
- $uninstall_args.IncludeManagementTools = $include_management_tools
- }
-
- try {
- $action_results = Uninstall-AnsibleWindowsFeature @uninstall_args
- } catch {
- Fail-Json -obj $result -message "Failed to uninstall Windows Feature: $($_.Exception.Message)"
- }
-}
-
-# Loop through results and create a hash containing details about
-# each role/feature that is installed/removed
-# $action_results.FeatureResult is not empty if anything was changed
-$feature_results = @()
-foreach ($action_result in $action_results.FeatureResult) {
- $message = @()
- foreach ($msg in $action_result.Message) {
- $message += @{
- message_type = $msg.MessageType.ToString()
- error_code = $msg.ErrorCode
- text = $msg.Text
- }
- }
-
- $feature_results += @{
- id = $action_result.Id
- display_name = $action_result.DisplayName
- message = $message
- reboot_required = ConvertTo-Bool -obj $action_result.RestartNeeded
- skip_reason = $action_result.SkipReason.ToString()
- success = ConvertTo-Bool -obj $action_result.Success
- restart_needed = ConvertTo-Bool -obj $action_result.RestartNeeded
- }
- $result.changed = $true
-}
-$result.feature_result = $feature_results
-$result.success = ConvertTo-Bool -obj $action_results.Success
-$result.exitcode = $action_results.ExitCode.ToString()
-$result.reboot_required = ConvertTo-Bool -obj $action_results.RestartNeeded
-# controls whether Ansible will fail or not
-$result.failed = (-not $action_results.Success)
-
-Exit-Json -obj $result
diff --git a/test/support/windows-integration/plugins/modules/win_feature.py b/test/support/windows-integration/plugins/modules/win_feature.py
deleted file mode 100644
index 62e310b2..00000000
--- a/test/support/windows-integration/plugins/modules/win_feature.py
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2014, Paul Durivage <paul.durivage@rackspace.com>
-# Copyright: (c) 2014, Trond Hindenes <trond@hindenes.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# this is a windows documentation stub. actual code lives in the .ps1
-# file of the same name
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: win_feature
-version_added: "1.7"
-short_description: Installs and uninstalls Windows Features on Windows Server
-description:
- - Installs or uninstalls Windows Roles or Features on Windows Server.
- - This module uses the Add/Remove-WindowsFeature Cmdlets on Windows 2008 R2
- and Install/Uninstall-WindowsFeature Cmdlets on Windows 2012, which are not available on client os machines.
-options:
- name:
- description:
- - Names of roles or features to install as a single feature or a comma-separated list of features.
- - To list all available features use the PowerShell command C(Get-WindowsFeature).
- type: list
- required: yes
- state:
- description:
- - State of the features or roles on the system.
- type: str
- choices: [ absent, present ]
- default: present
- include_sub_features:
- description:
- - Adds all subfeatures of the specified feature.
- type: bool
- default: no
- include_management_tools:
- description:
- - Adds the corresponding management tools to the specified feature.
- - Not supported in Windows 2008 R2 and will be ignored.
- type: bool
- default: no
- source:
- description:
- - Specify a source to install the feature from.
- - Not supported in Windows 2008 R2 and will be ignored.
- - Can either be C({driveletter}:\sources\sxs) or C(\\{IP}\share\sources\sxs).
- type: str
- version_added: "2.1"
-seealso:
-- module: win_chocolatey
-- module: win_package
-author:
- - Paul Durivage (@angstwad)
- - Trond Hindenes (@trondhindenes)
-'''
-
-EXAMPLES = r'''
-- name: Install IIS (Web-Server only)
- win_feature:
- name: Web-Server
- state: present
-
-- name: Install IIS (Web-Server and Web-Common-Http)
- win_feature:
- name:
- - Web-Server
- - Web-Common-Http
- state: present
-
-- name: Install NET-Framework-Core from file
- win_feature:
- name: NET-Framework-Core
- source: C:\Temp\iso\sources\sxs
- state: present
-
-- name: Install IIS Web-Server with sub features and management tools
- win_feature:
- name: Web-Server
- state: present
- include_sub_features: yes
- include_management_tools: yes
- register: win_feature
-
-- name: Reboot if installing Web-Server feature requires it
- win_reboot:
- when: win_feature.reboot_required
-'''
-
-RETURN = r'''
-exitcode:
- description: The stringified exit code from the feature installation/removal command.
- returned: always
- type: str
- sample: Success
-feature_result:
- description: List of features that were installed or removed.
- returned: success
- type: complex
- sample:
- contains:
- display_name:
- description: Feature display name.
- returned: always
- type: str
- sample: "Telnet Client"
- id:
- description: A list of KB article IDs that apply to the update.
- returned: always
- type: int
- sample: 44
- message:
- description: Any messages returned from the feature subsystem that occurred during installation or removal of this feature.
- returned: always
- type: list
- elements: str
- sample: []
- reboot_required:
- description: True when the target server requires a reboot as a result of installing or removing this feature.
- returned: always
- type: bool
- sample: true
- restart_needed:
- description: DEPRECATED in Ansible 2.4 (refer to C(reboot_required) instead). True when the target server requires a reboot as a
- result of installing or removing this feature.
- returned: always
- type: bool
- sample: true
- skip_reason:
- description: The reason a feature installation or removal was skipped.
- returned: always
- type: str
- sample: NotSkipped
- success:
- description: If the feature installation or removal was successful.
- returned: always
- type: bool
- sample: true
-reboot_required:
- description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot).
- returned: success
- type: bool
- sample: true
-'''
diff --git a/test/support/windows-integration/plugins/modules/win_find.ps1 b/test/support/windows-integration/plugins/modules/win_find.ps1
deleted file mode 100644
index bc57c5ff..00000000
--- a/test/support/windows-integration/plugins/modules/win_find.ps1
+++ /dev/null
@@ -1,416 +0,0 @@
-#!powershell
-
-# Copyright: (c) 2016, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#AnsibleRequires -CSharpUtil Ansible.Basic
-#Requires -Module Ansible.ModuleUtils.LinkUtil
-
-$spec = @{
- options = @{
- paths = @{ type = "list"; elements = "str"; required = $true }
- age = @{ type = "str" }
- age_stamp = @{ type = "str"; default = "mtime"; choices = "mtime", "ctime", "atime" }
- file_type = @{ type = "str"; default = "file"; choices = "file", "directory" }
- follow = @{ type = "bool"; default = $false }
- hidden = @{ type = "bool"; default = $false }
- patterns = @{ type = "list"; elements = "str"; aliases = "regex", "regexp" }
- recurse = @{ type = "bool"; default = $false }
- size = @{ type = "str" }
- use_regex = @{ type = "bool"; default = $false }
- get_checksum = @{ type = "bool"; default = $true }
- checksum_algorithm = @{ type = "str"; default = "sha1"; choices = "md5", "sha1", "sha256", "sha384", "sha512" }
- }
- supports_check_mode = $true
-}
-
-$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
-
-$paths = $module.Params.paths
-$age = $module.Params.age
-$age_stamp = $module.Params.age_stamp
-$file_type = $module.Params.file_type
-$follow = $module.Params.follow
-$hidden = $module.Params.hidden
-$patterns = $module.Params.patterns
-$recurse = $module.Params.recurse
-$size = $module.Params.size
-$use_regex = $module.Params.use_regex
-$get_checksum = $module.Params.get_checksum
-$checksum_algorithm = $module.Params.checksum_algorithm
-
-$module.Result.examined = 0
-$module.Result.files = @()
-$module.Result.matched = 0
-
-Load-LinkUtils
-
-Function Assert-Age {
- Param (
- [System.IO.FileSystemInfo]$File,
- [System.Int64]$Age,
- [System.String]$AgeStamp
- )
-
- $actual_age = switch ($AgeStamp) {
- mtime { $File.LastWriteTime.Ticks }
- ctime { $File.CreationTime.Ticks }
- atime { $File.LastAccessTime.Ticks }
- }
-
- if ($Age -ge 0) {
- return $Age -ge $actual_age
- } else {
- return ($Age * -1) -le $actual_age
- }
-}
-
-Function Assert-FileType {
- Param (
- [System.IO.FileSystemInfo]$File,
- [System.String]$FileType
- )
-
- $is_dir = $File.Attributes.HasFlag([System.IO.FileAttributes]::Directory)
- return ($FileType -eq 'directory' -and $is_dir) -or ($FileType -eq 'file' -and -not $is_dir)
-}
-
-Function Assert-FileHidden {
- Param (
- [System.IO.FileSystemInfo]$File,
- [Switch]$IsHidden
- )
-
- $file_is_hidden = $File.Attributes.HasFlag([System.IO.FileAttributes]::Hidden)
- return $IsHidden.IsPresent -eq $file_is_hidden
-}
-
-
-Function Assert-FileNamePattern {
- Param (
- [System.IO.FileSystemInfo]$File,
- [System.String[]]$Patterns,
- [Switch]$UseRegex
- )
-
- $valid_match = $false
- foreach ($pattern in $Patterns) {
- if ($UseRegex) {
- if ($File.Name -match $pattern) {
- $valid_match = $true
- break
- }
- } else {
- if ($File.Name -like $pattern) {
- $valid_match = $true
- break
- }
- }
- }
- return $valid_match
-}
-
-Function Assert-FileSize {
- Param (
- [System.IO.FileSystemInfo]$File,
- [System.Int64]$Size
- )
-
- if ($Size -ge 0) {
- return $File.Length -ge $Size
- } else {
- return $File.Length -le ($Size * -1)
- }
-}
-
-Function Get-FileChecksum {
- Param (
- [System.String]$Path,
- [System.String]$Algorithm
- )
-
- $sp = switch ($algorithm) {
- 'md5' { New-Object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider }
- 'sha1' { New-Object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider }
- 'sha256' { New-Object -TypeName System.Security.Cryptography.SHA256CryptoServiceProvider }
- 'sha384' { New-Object -TypeName System.Security.Cryptography.SHA384CryptoServiceProvider }
- 'sha512' { New-Object -TypeName System.Security.Cryptography.SHA512CryptoServiceProvider }
- }
-
- $fp = [System.IO.File]::Open($Path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read, [System.IO.FileShare]::ReadWrite)
- try {
- $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower()
- } finally {
- $fp.Dispose()
- }
-
- return $hash
-}
-
-Function Search-Path {
- [CmdletBinding()]
- Param (
- [Parameter(Mandatory=$true)]
- [System.String]
- $Path,
-
- [Parameter(Mandatory=$true)]
- [AllowEmptyCollection()]
- [System.Collections.Generic.HashSet`1[System.String]]
- $CheckedPaths,
-
- [Parameter(Mandatory=$true)]
- [Object]
- $Module,
-
- [System.Int64]
- $Age,
-
- [System.String]
- $AgeStamp,
-
- [System.String]
- $FileType,
-
- [Switch]
- $Follow,
-
- [Switch]
- $GetChecksum,
-
- [Switch]
- $IsHidden,
-
- [System.String[]]
- $Patterns,
-
- [Switch]
- $Recurse,
-
- [System.Int64]
- $Size,
-
- [Switch]
- $UseRegex
- )
-
- $dir_obj = New-Object -TypeName System.IO.DirectoryInfo -ArgumentList $Path
- if ([Int32]$dir_obj.Attributes -eq -1) {
- $Module.Warn("Argument path '$Path' does not exist, skipping")
- return
- } elseif (-not $dir_obj.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
- $Module.Warn("Argument path '$Path' is a file not a directory, skipping")
- return
- }
-
- $dir_files = @()
- try {
- $dir_files = $dir_obj.EnumerateFileSystemInfos("*", [System.IO.SearchOption]::TopDirectoryOnly)
- } catch [System.IO.DirectoryNotFoundException] { # Broken ReparsePoint/Symlink, cannot enumerate
- } catch [System.UnauthorizedAccessException] {} # No ListDirectory permissions, Get-ChildItem ignored this
-
- foreach ($dir_child in $dir_files) {
- if ($dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory) -and $Recurse) {
- if ($Follow -or -not $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::ReparsePoint)) {
- $PSBoundParameters.Remove('Path') > $null
- Search-Path -Path $dir_child.FullName @PSBoundParameters
- }
- }
-
- # Check to see if we've already encountered this path and skip if we have.
- if (-not $CheckedPaths.Add($dir_child.FullName.ToLowerInvariant())) {
- continue
- }
-
- $Module.Result.examined++
-
- if ($PSBoundParameters.ContainsKey('Age')) {
- $age_match = Assert-Age -File $dir_child -Age $Age -AgeStamp $AgeStamp
- } else {
- $age_match = $true
- }
-
- $file_type_match = Assert-FileType -File $dir_child -FileType $FileType
- $hidden_match = Assert-FileHidden -File $dir_child -IsHidden:$IsHidden
-
- if ($PSBoundParameters.ContainsKey('Patterns')) {
- $pattern_match = Assert-FileNamePattern -File $dir_child -Patterns $Patterns -UseRegex:$UseRegex.IsPresent
- } else {
- $pattern_match = $true
- }
-
- if ($PSBoundParameters.ContainsKey('Size')) {
- $size_match = Assert-FileSize -File $dir_child -Size $Size
- } else {
- $size_match = $true
- }
-
- if (-not ($age_match -and $file_type_match -and $hidden_match -and $pattern_match -and $size_match)) {
- continue
- }
-
- # It passed all our filters so add it
- $module.Result.matched++
-
- # TODO: Make this generic so it can be shared with win_find and win_stat.
- $epoch = New-Object -Type System.DateTime -ArgumentList 1970, 1, 1, 0, 0, 0, 0
- $file_info = @{
- attributes = $dir_child.Attributes.ToString()
- checksum = $null
- creationtime = (New-TimeSpan -Start $epoch -End $dir_child.CreationTime).TotalSeconds
- exists = $true
- extension = $null
- filename = $dir_child.Name
- isarchive = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Archive)
- isdir = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory)
- ishidden = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Hidden)
- isreadonly = $dir_child.Attributes.HasFlag([System.IO.FileAttributes]::ReadOnly)
- isreg = $false
- isshared = $false
- lastaccesstime = (New-TimeSpan -Start $epoch -End $dir_child.LastAccessTime).TotalSeconds
- lastwritetime = (New-TimeSpan -Start $epoch -End $dir_child.LastWriteTime).TotalSeconds
- owner = $null
- path = $dir_child.FullName
- sharename = $null
- size = $null
- }
-
- try {
- $file_info.owner = $dir_child.GetAccessControl().Owner
- } catch {} # May not have rights to get the Owner, historical behaviour is to ignore.
-
- if ($dir_child.Attributes.HasFlag([System.IO.FileAttributes]::Directory)) {
- $share_info = Get-CimInstance -ClassName Win32_Share -Filter "Path='$($dir_child.FullName -replace '\\', '\\')'"
- if ($null -ne $share_info) {
- $file_info.isshared = $true
- $file_info.sharename = $share_info.Name
- }
- } else {
- $file_info.extension = $dir_child.Extension
- $file_info.isreg = $true
- $file_info.size = $dir_child.Length
-
- if ($GetChecksum) {
- try {
- $file_info.checksum = Get-FileChecksum -Path $dir_child.FullName -Algorithm $checksum_algorithm
- } catch {} # Just keep the checksum as $null in the case of a failure.
- }
- }
-
- # Append the link information if the path is a link
- $link_info = @{
- isjunction = $false
- islnk = $false
- nlink = 1
- lnk_source = $null
- lnk_target = $null
- hlnk_targets = @()
- }
- $link_stat = Get-Link -link_path $dir_child.FullName
- if ($null -ne $link_stat) {
- switch ($link_stat.Type) {
- "SymbolicLink" {
- $link_info.islnk = $true
- $link_info.isreg = $false
- $link_info.lnk_source = $link_stat.AbsolutePath
- $link_info.lnk_target = $link_stat.TargetPath
- break
- }
- "JunctionPoint" {
- $link_info.isjunction = $true
- $link_info.isreg = $false
- $link_info.lnk_source = $link_stat.AbsolutePath
- $link_info.lnk_target = $link_stat.TargetPath
- break
- }
- "HardLink" {
- $link_info.nlink = $link_stat.HardTargets.Count
-
- # remove current path from the targets
- $hlnk_targets = $link_info.HardTargets | Where-Object { $_ -ne $dir_child.FullName }
- $link_info.hlnk_targets = @($hlnk_targets)
- break
- }
- }
- }
- foreach ($kv in $link_info.GetEnumerator()) {
- $file_info.$($kv.Key) = $kv.Value
- }
-
- # Output the file_info object
- $file_info
- }
-}
-
-$search_params = @{
- CheckedPaths = [System.Collections.Generic.HashSet`1[System.String]]@()
- GetChecksum = $get_checksum
- Module = $module
- FileType = $file_type
- Follow = $follow
- IsHidden = $hidden
- Recurse = $recurse
-}
-
-if ($null -ne $age) {
- $seconds_per_unit = @{'s'=1; 'm'=60; 'h'=3600; 'd'=86400; 'w'=604800}
- $seconds_pattern = '^(-?\d+)(s|m|h|d|w)?$'
- $match = $age -match $seconds_pattern
- if ($Match) {
- $specified_seconds = [Int64]$Matches[1]
- if ($null -eq $Matches[2]) {
- $chosen_unit = 's'
- } else {
- $chosen_unit = $Matches[2]
- }
-
- $total_seconds = $specified_seconds * ($seconds_per_unit.$chosen_unit)
-
- if ($total_seconds -ge 0) {
- $search_params.Age = (Get-Date).AddSeconds($total_seconds * -1).Ticks
- } else {
- # Make sure we add the positive value of seconds to current time then make it negative for later comparisons.
- $age = (Get-Date).AddSeconds($total_seconds).Ticks
- $search_params.Age = $age * -1
- }
- $search_params.AgeStamp = $age_stamp
- } else {
- $module.FailJson("Invalid age pattern specified")
- }
-}
-
-if ($null -ne $patterns) {
- $search_params.Patterns = $patterns
- $search_params.UseRegex = $use_regex
-}
-
-if ($null -ne $size) {
- $bytes_per_unit = @{'b'=1; 'k'=1KB; 'm'=1MB; 'g'=1GB;'t'=1TB}
- $size_pattern = '^(-?\d+)(b|k|m|g|t)?$'
- $match = $size -match $size_pattern
- if ($Match) {
- $specified_size = [Int64]$Matches[1]
- if ($null -eq $Matches[2]) {
- $chosen_byte = 'b'
- } else {
- $chosen_byte = $Matches[2]
- }
-
- $search_params.Size = $specified_size * ($bytes_per_unit.$chosen_byte)
- } else {
- $module.FailJson("Invalid size pattern specified")
- }
-}
-
-$matched_files = foreach ($path in $paths) {
- # Ensure we pass in an absolute path. We use the ExecutionContext as this is based on the PSProvider path not the
- # process location which can be different.
- $abs_path = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($path)
- Search-Path -Path $abs_path @search_params
-}
-
-# Make sure we sort the files in alphabetical order.
-$module.Result.files = @() + ($matched_files | Sort-Object -Property {$_.path})
-
-$module.ExitJson()
-
diff --git a/test/support/windows-integration/plugins/modules/win_find.py b/test/support/windows-integration/plugins/modules/win_find.py
deleted file mode 100644
index f506f956..00000000
--- a/test/support/windows-integration/plugins/modules/win_find.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# this is a windows documentation stub. actual code lives in the .ps1
-# file of the same name
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: win_find
-version_added: "2.3"
-short_description: Return a list of files based on specific criteria
-description:
- - Return a list of files based on specified criteria.
- - Multiple criteria are AND'd together.
- - For non-Windows targets, use the M(find) module instead.
-options:
- age:
- description:
- - Select files or folders whose age is equal to or greater than
- the specified time.
- - Use a negative age to find files equal to or less than
- the specified time.
- - You can choose seconds, minutes, hours, days or weeks
- by specifying the first letter of an of
- those words (e.g., "2s", "10d", 1w").
- type: str
- age_stamp:
- description:
- - Choose the file property against which we compare C(age).
- - The default attribute we compare with is the last modification time.
- type: str
- choices: [ atime, ctime, mtime ]
- default: mtime
- checksum_algorithm:
- description:
- - Algorithm to determine the checksum of a file.
- - Will throw an error if the host is unable to use specified algorithm.
- type: str
- choices: [ md5, sha1, sha256, sha384, sha512 ]
- default: sha1
- file_type:
- description: Type of file to search for.
- type: str
- choices: [ directory, file ]
- default: file
- follow:
- description:
- - Set this to C(yes) to follow symlinks in the path.
- - This needs to be used in conjunction with C(recurse).
- type: bool
- default: no
- get_checksum:
- description:
- - Whether to return a checksum of the file in the return info (default sha1),
- use C(checksum_algorithm) to change from the default.
- type: bool
- default: yes
- hidden:
- description: Set this to include hidden files or folders.
- type: bool
- default: no
- paths:
- description:
- - List of paths of directories to search for files or folders in.
- - This can be supplied as a single path or a list of paths.
- type: list
- required: yes
- patterns:
- description:
- - One or more (powershell or regex) patterns to compare filenames with.
- - The type of pattern matching is controlled by C(use_regex) option.
- - The patterns restrict the list of files or folders to be returned based on the filenames.
- - For a file to be matched it only has to match with one pattern in a list provided.
- type: list
- aliases: [ "regex", "regexp" ]
- recurse:
- description:
- - Will recursively descend into the directory looking for files or folders.
- type: bool
- default: no
- size:
- description:
- - Select files or folders whose size is equal to or greater than the specified size.
- - Use a negative value to find files equal to or less than the specified size.
- - You can specify the size with a suffix of the byte type i.e. kilo = k, mega = m...
- - Size is not evaluated for symbolic links.
- type: str
- use_regex:
- description:
- - Will set patterns to run as a regex check if set to C(yes).
- type: bool
- default: no
-author:
-- Jordan Borean (@jborean93)
-'''
-
-EXAMPLES = r'''
-- name: Find files in path
- win_find:
- paths: D:\Temp
-
-- name: Find hidden files in path
- win_find:
- paths: D:\Temp
- hidden: yes
-
-- name: Find files in multiple paths
- win_find:
- paths:
- - C:\Temp
- - D:\Temp
-
-- name: Find files in directory while searching recursively
- win_find:
- paths: D:\Temp
- recurse: yes
-
-- name: Find files in directory while following symlinks
- win_find:
- paths: D:\Temp
- recurse: yes
- follow: yes
-
-- name: Find files with .log and .out extension using powershell wildcards
- win_find:
- paths: D:\Temp
- patterns: [ '*.log', '*.out' ]
-
-- name: Find files in path based on regex pattern
- win_find:
- paths: D:\Temp
- patterns: out_\d{8}-\d{6}.log
-
-- name: Find files older than 1 day
- win_find:
- paths: D:\Temp
- age: 86400
-
-- name: Find files older than 1 day based on create time
- win_find:
- paths: D:\Temp
- age: 86400
- age_stamp: ctime
-
-- name: Find files older than 1 day with unit syntax
- win_find:
- paths: D:\Temp
- age: 1d
-
-- name: Find files newer than 1 hour
- win_find:
- paths: D:\Temp
- age: -3600
-
-- name: Find files newer than 1 hour with unit syntax
- win_find:
- paths: D:\Temp
- age: -1h
-
-- name: Find files larger than 1MB
- win_find:
- paths: D:\Temp
- size: 1048576
-
-- name: Find files larger than 1GB with unit syntax
- win_find:
- paths: D:\Temp
- size: 1g
-
-- name: Find files smaller than 1MB
- win_find:
- paths: D:\Temp
- size: -1048576
-
-- name: Find files smaller than 1GB with unit syntax
- win_find:
- paths: D:\Temp
- size: -1g
-
-- name: Find folders/symlinks in multiple paths
- win_find:
- paths:
- - C:\Temp
- - D:\Temp
- file_type: directory
-
-- name: Find files and return SHA256 checksum of files found
- win_find:
- paths: C:\Temp
- get_checksum: yes
- checksum_algorithm: sha256
-
-- name: Find files and do not return the checksum
- win_find:
- paths: C:\Temp
- get_checksum: no
-'''
-
-RETURN = r'''
-examined:
- description: The number of files/folders that was checked.
- returned: always
- type: int
- sample: 10
-matched:
- description: The number of files/folders that match the criteria.
- returned: always
- type: int
- sample: 2
-files:
- description: Information on the files/folders that match the criteria returned as a list of dictionary elements
- for each file matched. The entries are sorted by the path value alphabetically.
- returned: success
- type: complex
- contains:
- attributes:
- description: attributes of the file at path in raw form.
- returned: success, path exists
- type: str
- sample: "Archive, Hidden"
- checksum:
- description: The checksum of a file based on checksum_algorithm specified.
- returned: success, path exists, path is a file, get_checksum == True
- type: str
- sample: 09cb79e8fc7453c84a07f644e441fd81623b7f98
- creationtime:
- description: The create time of the file represented in seconds since epoch.
- returned: success, path exists
- type: float
- sample: 1477984205.15
- exists:
- description: Whether the file exists, will always be true for M(win_find).
- returned: success, path exists
- type: bool
- sample: true
- extension:
- description: The extension of the file at path.
- returned: success, path exists, path is a file
- type: str
- sample: ".ps1"
- filename:
- description: The name of the file.
- returned: success, path exists
- type: str
- sample: temp
- hlnk_targets:
- description: List of other files pointing to the same file (hard links), excludes the current file.
- returned: success, path exists
- type: list
- sample:
- - C:\temp\file.txt
- - C:\Windows\update.log
- isarchive:
- description: If the path is ready for archiving or not.
- returned: success, path exists
- type: bool
- sample: true
- isdir:
- description: If the path is a directory or not.
- returned: success, path exists
- type: bool
- sample: true
- ishidden:
- description: If the path is hidden or not.
- returned: success, path exists
- type: bool
- sample: true
- isjunction:
- description: If the path is a junction point.
- returned: success, path exists
- type: bool
- sample: true
- islnk:
- description: If the path is a symbolic link.
- returned: success, path exists
- type: bool
- sample: true
- isreadonly:
- description: If the path is read only or not.
- returned: success, path exists
- type: bool
- sample: true
- isreg:
- description: If the path is a regular file or not.
- returned: success, path exists
- type: bool
- sample: true
- isshared:
- description: If the path is shared or not.
- returned: success, path exists
- type: bool
- sample: true
- lastaccesstime:
- description: The last access time of the file represented in seconds since epoch.
- returned: success, path exists
- type: float
- sample: 1477984205.15
- lastwritetime:
- description: The last modification time of the file represented in seconds since epoch.
- returned: success, path exists
- type: float
- sample: 1477984205.15
- lnk_source:
- description: The target of the symlink normalized for the remote filesystem.
- returned: success, path exists, path is a symbolic link or junction point
- type: str
- sample: C:\temp
- lnk_target:
- description: The target of the symlink. Note that relative paths remain relative, will return null if not a link.
- returned: success, path exists, path is a symbolic link or junction point
- type: str
- sample: temp
- nlink:
- description: Number of links to the file (hard links)
- returned: success, path exists
- type: int
- sample: 1
- owner:
- description: The owner of the file.
- returned: success, path exists
- type: str
- sample: BUILTIN\Administrators
- path:
- description: The full absolute path to the file.
- returned: success, path exists
- type: str
- sample: BUILTIN\Administrators
- sharename:
- description: The name of share if folder is shared.
- returned: success, path exists, path is a directory and isshared == True
- type: str
- sample: file-share
- size:
- description: The size in bytes of the file.
- returned: success, path exists, path is a file
- type: int
- sample: 1024
-'''
diff --git a/test/support/windows-integration/plugins/modules/win_format.ps1 b/test/support/windows-integration/plugins/modules/win_format.ps1
deleted file mode 100644
index b5fd3ae0..00000000
--- a/test/support/windows-integration/plugins/modules/win_format.ps1
+++ /dev/null
@@ -1,200 +0,0 @@
-#!powershell
-
-# Copyright: (c) 2019, Varun Chopra (@chopraaa) <v@chopraaa.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#AnsibleRequires -CSharpUtil Ansible.Basic
-#AnsibleRequires -OSVersion 6.2
-
-Set-StrictMode -Version 2
-
-$ErrorActionPreference = "Stop"
-
-$spec = @{
- options = @{
- drive_letter = @{ type = "str" }
- path = @{ type = "str" }
- label = @{ type = "str" }
- new_label = @{ type = "str" }
- file_system = @{ type = "str"; choices = "ntfs", "refs", "exfat", "fat32", "fat" }
- allocation_unit_size = @{ type = "int" }
- large_frs = @{ type = "bool" }
- full = @{ type = "bool"; default = $false }
- compress = @{ type = "bool" }
- integrity_streams = @{ type = "bool" }
- force = @{ type = "bool"; default = $false }
- }
- mutually_exclusive = @(
- ,@('drive_letter', 'path', 'label')
- )
- required_one_of = @(
- ,@('drive_letter', 'path', 'label')
- )
- supports_check_mode = $true
-}
-
-$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
-
-$drive_letter = $module.Params.drive_letter
-$path = $module.Params.path
-$label = $module.Params.label
-$new_label = $module.Params.new_label
-$file_system = $module.Params.file_system
-$allocation_unit_size = $module.Params.allocation_unit_size
-$large_frs = $module.Params.large_frs
-$full_format = $module.Params.full
-$compress_volume = $module.Params.compress
-$integrity_streams = $module.Params.integrity_streams
-$force_format = $module.Params.force
-
-# Some pre-checks
-if ($null -ne $drive_letter -and $drive_letter -notmatch "^[a-zA-Z]$") {
- $module.FailJson("The parameter drive_letter should be a single character A-Z")
-}
-if ($integrity_streams -eq $true -and $file_system -ne "refs") {
- $module.FailJson("Integrity streams can be enabled only on ReFS volumes. You specified: $($file_system)")
-}
-if ($compress_volume -eq $true) {
- if ($file_system -eq "ntfs") {
- if ($null -ne $allocation_unit_size -and $allocation_unit_size -gt 4096) {
- $module.FailJson("NTFS compression is not supported for allocation unit sizes above 4096")
- }
- }
- else {
- $module.FailJson("Compression can be enabled only on NTFS volumes. You specified: $($file_system)")
- }
-}
-
-function Get-AnsibleVolume {
- param(
- $DriveLetter,
- $Path,
- $Label
- )
-
- if ($null -ne $DriveLetter) {
- try {
- $volume = Get-Volume -DriveLetter $DriveLetter
- } catch {
- $module.FailJson("There was an error retrieving the volume using drive_letter $($DriveLetter): $($_.Exception.Message)", $_)
- }
- }
- elseif ($null -ne $Path) {
- try {
- $volume = Get-Volume -Path $Path
- } catch {
- $module.FailJson("There was an error retrieving the volume using path $($Path): $($_.Exception.Message)", $_)
- }
- }
- elseif ($null -ne $Label) {
- try {
- $volume = Get-Volume -FileSystemLabel $Label
- } catch {
- $module.FailJson("There was an error retrieving the volume using label $($Label): $($_.Exception.Message)", $_)
- }
- }
- else {
- $module.FailJson("Unable to locate volume: drive_letter, path and label were not specified")
- }
-
- return $volume
-}
-
-function Format-AnsibleVolume {
- param(
- $Path,
- $Label,
- $FileSystem,
- $Full,
- $UseLargeFRS,
- $Compress,
- $SetIntegrityStreams,
- $AllocationUnitSize
- )
- $parameters = @{
- Path = $Path
- Full = $Full
- }
- if ($null -ne $UseLargeFRS) {
- $parameters.Add("UseLargeFRS", $UseLargeFRS)
- }
- if ($null -ne $SetIntegrityStreams) {
- $parameters.Add("SetIntegrityStreams", $SetIntegrityStreams)
- }
- if ($null -ne $Compress){
- $parameters.Add("Compress", $Compress)
- }
- if ($null -ne $Label) {
- $parameters.Add("NewFileSystemLabel", $Label)
- }
- if ($null -ne $FileSystem) {
- $parameters.Add("FileSystem", $FileSystem)
- }
- if ($null -ne $AllocationUnitSize) {
- $parameters.Add("AllocationUnitSize", $AllocationUnitSize)
- }
-
- Format-Volume @parameters -Confirm:$false | Out-Null
-
-}
-
-$ansible_volume = Get-AnsibleVolume -DriveLetter $drive_letter -Path $path -Label $label
-$ansible_file_system = $ansible_volume.FileSystem
-$ansible_volume_size = $ansible_volume.Size
-$ansible_volume_alu = (Get-CimInstance -ClassName Win32_Volume -Filter "DeviceId = '$($ansible_volume.path.replace('\','\\'))'" -Property BlockSize).BlockSize
-
-$ansible_partition = Get-Partition -Volume $ansible_volume
-
-if (-not $force_format -and $null -ne $allocation_unit_size -and $ansible_volume_alu -ne 0 -and $null -ne $ansible_volume_alu -and $allocation_unit_size -ne $ansible_volume_alu) {
- $module.FailJson("Force format must be specified since target allocation unit size: $($allocation_unit_size) is different from the current allocation unit size of the volume: $($ansible_volume_alu)")
-}
-
-foreach ($access_path in $ansible_partition.AccessPaths) {
- if ($access_path -ne $Path) {
- if ($null -ne $file_system -and
- -not [string]::IsNullOrEmpty($ansible_file_system) -and
- $file_system -ne $ansible_file_system)
- {
- if (-not $force_format)
- {
- $no_files_in_volume = (Get-ChildItem -LiteralPath $access_path -ErrorAction SilentlyContinue | Measure-Object).Count -eq 0
- if($no_files_in_volume)
- {
- $module.FailJson("Force format must be specified since target file system: $($file_system) is different from the current file system of the volume: $($ansible_file_system.ToLower())")
- }
- else
- {
- $module.FailJson("Force format must be specified to format non-pristine volumes")
- }
- }
- }
- else
- {
- $pristine = -not $force_format
- }
- }
-}
-
-if ($force_format) {
- if (-not $module.CheckMode) {
- Format-AnsibleVolume -Path $ansible_volume.Path -Full $full_format -Label $new_label -FileSystem $file_system -SetIntegrityStreams $integrity_streams -UseLargeFRS $large_frs -Compress $compress_volume -AllocationUnitSize $allocation_unit_size
- }
- $module.Result.changed = $true
-}
-else {
- if ($pristine) {
- if ($null -eq $new_label) {
- $new_label = $ansible_volume.FileSystemLabel
- }
- # Conditions for formatting
- if ($ansible_volume_size -eq 0 -or
- $ansible_volume.FileSystemLabel -ne $new_label) {
- if (-not $module.CheckMode) {
- Format-AnsibleVolume -Path $ansible_volume.Path -Full $full_format -Label $new_label -FileSystem $file_system -SetIntegrityStreams $integrity_streams -UseLargeFRS $large_frs -Compress $compress_volume -AllocationUnitSize $allocation_unit_size
- }
- $module.Result.changed = $true
- }
- }
-}
-
-$module.ExitJson()
diff --git a/test/support/windows-integration/plugins/modules/win_format.py b/test/support/windows-integration/plugins/modules/win_format.py
deleted file mode 100644
index f8f18ed7..00000000
--- a/test/support/windows-integration/plugins/modules/win_format.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2019, Varun Chopra (@chopraaa) <v@chopraaa.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-ANSIBLE_METADATA = {
- 'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'
-}
-
-DOCUMENTATION = r'''
-module: win_format
-version_added: '2.8'
-short_description: Formats an existing volume or a new volume on an existing partition on Windows
-description:
- - The M(win_format) module formats an existing volume or a new volume on an existing partition on Windows
-options:
- drive_letter:
- description:
- - Used to specify the drive letter of the volume to be formatted.
- type: str
- path:
- description:
- - Used to specify the path to the volume to be formatted.
- type: str
- label:
- description:
- - Used to specify the label of the volume to be formatted.
- type: str
- new_label:
- description:
- - Used to specify the new file system label of the formatted volume.
- type: str
- file_system:
- description:
- - Used to specify the file system to be used when formatting the target volume.
- type: str
- choices: [ ntfs, refs, exfat, fat32, fat ]
- allocation_unit_size:
- description:
- - Specifies the cluster size to use when formatting the volume.
- - If no cluster size is specified when you format a partition, defaults are selected based on
- the size of the partition.
- - This value must be a multiple of the physical sector size of the disk.
- type: int
- large_frs:
- description:
- - Specifies that large File Record System (FRS) should be used.
- type: bool
- compress:
- description:
- - Enable compression on the resulting NTFS volume.
- - NTFS compression is not supported where I(allocation_unit_size) is more than 4096.
- type: bool
- integrity_streams:
- description:
- - Enable integrity streams on the resulting ReFS volume.
- type: bool
- full:
- description:
- - A full format writes to every sector of the disk, takes much longer to perform than the
- default (quick) format, and is not recommended on storage that is thinly provisioned.
- - Specify C(true) for full format.
- type: bool
- force:
- description:
- - Specify if formatting should be forced for volumes that are not created from new partitions
- or if the source and target file system are different.
- type: bool
-notes:
- - Microsoft Windows Server 2012 or Microsoft Windows 8 or newer is required to use this module. To check if your system is compatible, see
- U(https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version).
- - One of three parameters (I(drive_letter), I(path) and I(label)) are mandatory to identify the target
- volume but more than one cannot be specified at the same time.
- - This module is idempotent if I(force) is not specified and file system labels remain preserved.
- - For more information, see U(https://docs.microsoft.com/en-us/previous-versions/windows/desktop/stormgmt/format-msft-volume)
-seealso:
- - module: win_disk_facts
- - module: win_partition
-author:
- - Varun Chopra (@chopraaa) <v@chopraaa.com>
-'''
-
-EXAMPLES = r'''
-- name: Create a partition with drive letter D and size 5 GiB
- win_partition:
- drive_letter: D
- partition_size: 5 GiB
- disk_number: 1
-
-- name: Full format the newly created partition as NTFS and label it
- win_format:
- drive_letter: D
- file_system: NTFS
- new_label: Formatted
- full: True
-'''
-
-RETURN = r'''
-#
-'''
diff --git a/test/support/windows-integration/plugins/modules/win_path.ps1 b/test/support/windows-integration/plugins/modules/win_path.ps1
deleted file mode 100644
index 04eb41a3..00000000
--- a/test/support/windows-integration/plugins/modules/win_path.ps1
+++ /dev/null
@@ -1,145 +0,0 @@
-#!powershell
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-#Requires -Module Ansible.ModuleUtils.Legacy
-
-Set-StrictMode -Version 2
-$ErrorActionPreference = "Stop"
-
-$system_path = "System\CurrentControlSet\Control\Session Manager\Environment"
-$user_path = "Environment"
-
-# list/arraylist methods don't allow IEqualityComparer override for case/backslash/quote-insensitivity, roll our own search
-Function Get-IndexOfPathElement ($list, [string]$value) {
- $idx = 0
- $value = $value.Trim('"').Trim('\')
- ForEach($el in $list) {
- If ([string]$el.Trim('"').Trim('\') -ieq $value) {
- return $idx
- }
-
- $idx++
- }
-
- return -1
-}
-
-# alters list in place, returns true if at least one element was added
-Function Add-Elements ($existing_elements, $elements_to_add) {
- $last_idx = -1
- $changed = $false
-
- ForEach($el in $elements_to_add) {
- $idx = Get-IndexOfPathElement $existing_elements $el
-
- # add missing elements at the end
- If ($idx -eq -1) {
- $last_idx = $existing_elements.Add($el)
- $changed = $true
- }
- ElseIf ($idx -lt $last_idx) {
- $existing_elements.RemoveAt($idx) | Out-Null
- $existing_elements.Add($el) | Out-Null
- $last_idx = $existing_elements.Count - 1
- $changed = $true
- }
- Else {
- $last_idx = $idx
- }
- }
-
- return $changed
-}
-
-# alters list in place, returns true if at least one element was removed
-Function Remove-Elements ($existing_elements, $elements_to_remove) {
- $count = $existing_elements.Count
-
- ForEach($el in $elements_to_remove) {
- $idx = Get-IndexOfPathElement $existing_elements $el
- $result.removed_idx = $idx
- If ($idx -gt -1) {
- $existing_elements.RemoveAt($idx)
- }
- }
-
- return $count -ne $existing_elements.Count
-}
-
-# PS registry provider doesn't allow access to unexpanded REG_EXPAND_SZ; fall back to .NET
-Function Get-RawPathVar ($scope) {
- If ($scope -eq "user") {
- $env_key = [Microsoft.Win32.Registry]::CurrentUser.OpenSubKey($user_path)
- }
- ElseIf ($scope -eq "machine") {
- $env_key = [Microsoft.Win32.Registry]::LocalMachine.OpenSubKey($system_path)
- }
-
- return $env_key.GetValue($var_name, "", [Microsoft.Win32.RegistryValueOptions]::DoNotExpandEnvironmentNames)
-}
-
-Function Set-RawPathVar($path_value, $scope) {
- If ($scope -eq "user") {
- $var_path = "HKCU:\" + $user_path
- }
- ElseIf ($scope -eq "machine") {
- $var_path = "HKLM:\" + $system_path
- }
-
- Set-ItemProperty $var_path -Name $var_name -Value $path_value -Type ExpandString | Out-Null
-
- return $path_value
-}
-
-$parsed_args = Parse-Args $args -supports_check_mode $true
-
-$result = @{changed=$false}
-
-$var_name = Get-AnsibleParam $parsed_args "name" -Default "PATH"
-$elements = Get-AnsibleParam $parsed_args "elements" -FailIfEmpty $result
-$state = Get-AnsibleParam $parsed_args "state" -Default "present" -ValidateSet "present","absent"
-$scope = Get-AnsibleParam $parsed_args "scope" -Default "machine" -ValidateSet "machine","user"
-
-$check_mode = Get-AnsibleParam $parsed_args "_ansible_check_mode" -Default $false
-
-If ($elements -is [string]) {
- $elements = @($elements)
-}
-
-If ($elements -isnot [Array]) {
- Fail-Json $result "elements must be a string or list of path strings"
-}
-
-$current_value = Get-RawPathVar $scope
-$result.path_value = $current_value
-
-# TODO: test case-canonicalization on wacky unicode values (eg turkish i)
-# TODO: detect and warn/fail on unparseable path? (eg, unbalanced quotes, invalid path chars)
-# TODO: detect and warn/fail if system path and Powershell isn't on it?
-
-$existing_elements = New-Object System.Collections.ArrayList
-
-# split on semicolons, accounting for quoted values with embedded semicolons (which may or may not be wrapped in whitespace)
-$pathsplit_re = [regex] '((?<q>\s*"[^"]+"\s*)|(?<q>[^;]+))(;$|$|;)'
-
-ForEach ($m in $pathsplit_re.Matches($current_value)) {
- $existing_elements.Add($m.Groups['q'].Value) | Out-Null
-}
-
-If ($state -eq "absent") {
- $result.changed = Remove-Elements $existing_elements $elements
-}
-ElseIf ($state -eq "present") {
- $result.changed = Add-Elements $existing_elements $elements
-}
-
-# calculate the new path value from the existing elements
-$path_value = [String]::Join(";", $existing_elements.ToArray())
-$result.path_value = $path_value
-
-If ($result.changed -and -not $check_mode) {
- Set-RawPathVar $path_value $scope | Out-Null
-}
-
-Exit-Json $result
diff --git a/test/support/windows-integration/plugins/modules/win_path.py b/test/support/windows-integration/plugins/modules/win_path.py
deleted file mode 100644
index 6404504f..00000000
--- a/test/support/windows-integration/plugins/modules/win_path.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Red Hat | Ansible
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# This is a windows documentation stub. Actual code lives in the .ps1
-# file of the same name
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'core'}
-
-DOCUMENTATION = r'''
----
-module: win_path
-version_added: "2.3"
-short_description: Manage Windows path environment variables
-description:
- - Allows element-based ordering, addition, and removal of Windows path environment variables.
-options:
- name:
- description:
- - Target path environment variable name.
- type: str
- default: PATH
- elements:
- description:
- - A single path element, or a list of path elements (ie, directories) to add or remove.
- - When multiple elements are included in the list (and C(state) is C(present)), the elements are guaranteed to appear in the same relative order
- in the resultant path value.
- - Variable expansions (eg, C(%VARNAME%)) are allowed, and are stored unexpanded in the target path element.
- - Any existing path elements not mentioned in C(elements) are always preserved in their current order.
- - New path elements are appended to the path, and existing path elements may be moved closer to the end to satisfy the requested ordering.
- - Paths are compared in a case-insensitive fashion, and trailing backslashes are ignored for comparison purposes. However, note that trailing
- backslashes in YAML require quotes.
- type: list
- required: yes
- state:
- description:
- - Whether the path elements specified in C(elements) should be present or absent.
- type: str
- choices: [ absent, present ]
- scope:
- description:
- - The level at which the environment variable specified by C(name) should be managed (either for the current user or global machine scope).
- type: str
- choices: [ machine, user ]
- default: machine
-notes:
- - This module is for modifying individual elements of path-like
- environment variables. For general-purpose management of other
- environment vars, use the M(win_environment) module.
- - This module does not broadcast change events.
- This means that the minority of windows applications which can have
- their environment changed without restarting will not be notified and
- therefore will need restarting to pick up new environment settings.
- - User level environment variables will require an interactive user to
- log out and in again before they become available.
-seealso:
-- module: win_environment
-author:
-- Matt Davis (@nitzmahone)
-'''
-
-EXAMPLES = r'''
-- name: Ensure that system32 and Powershell are present on the global system path, and in the specified order
- win_path:
- elements:
- - '%SystemRoot%\system32'
- - '%SystemRoot%\system32\WindowsPowerShell\v1.0'
-
-- name: Ensure that C:\Program Files\MyJavaThing is not on the current user's CLASSPATH
- win_path:
- name: CLASSPATH
- elements: C:\Program Files\MyJavaThing
- scope: user
- state: absent
-'''
diff --git a/test/support/windows-integration/plugins/modules/win_tempfile.py b/test/support/windows-integration/plugins/modules/win_tempfile.py
deleted file mode 100644
index 58dd6501..00000000
--- a/test/support/windows-integration/plugins/modules/win_tempfile.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/python
-# coding: utf-8 -*-
-
-# Copyright: (c) 2017, Dag Wieers <dag@wieers.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: win_tempfile
-version_added: "2.3"
-short_description: Creates temporary files and directories
-description:
- - Creates temporary files and directories.
- - For non-Windows targets, please use the M(tempfile) module instead.
-options:
- state:
- description:
- - Whether to create file or directory.
- type: str
- choices: [ directory, file ]
- default: file
- path:
- description:
- - Location where temporary file or directory should be created.
- - If path is not specified default system temporary directory (%TEMP%) will be used.
- type: path
- default: '%TEMP%'
- aliases: [ dest ]
- prefix:
- description:
- - Prefix of file/directory name created by module.
- type: str
- default: ansible.
- suffix:
- description:
- - Suffix of file/directory name created by module.
- type: str
- default: ''
-seealso:
-- module: tempfile
-author:
-- Dag Wieers (@dagwieers)
-'''
-
-EXAMPLES = r"""
-- name: Create temporary build directory
- win_tempfile:
- state: directory
- suffix: build
-
-- name: Create temporary file
- win_tempfile:
- state: file
- suffix: temp
-"""
-
-RETURN = r'''
-path:
- description: The absolute path to the created file or directory.
- returned: success
- type: str
- sample: C:\Users\Administrator\AppData\Local\Temp\ansible.bMlvdk
-'''
diff --git a/test/support/windows-integration/plugins/modules/win_template.py b/test/support/windows-integration/plugins/modules/win_template.py
deleted file mode 100644
index bd8b2492..00000000
--- a/test/support/windows-integration/plugins/modules/win_template.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# this is a virtual module that is entirely implemented server side
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['stableinterface'],
- 'supported_by': 'core'}
-
-DOCUMENTATION = r'''
----
-module: win_template
-version_added: "1.9.2"
-short_description: Template a file out to a remote server
-options:
- backup:
- description:
- - Determine whether a backup should be created.
- - When set to C(yes), create a backup file including the timestamp information
- so you can get the original file back if you somehow clobbered it incorrectly.
- type: bool
- default: no
- version_added: '2.8'
- newline_sequence:
- default: '\r\n'
- force:
- version_added: '2.4'
-notes:
-- Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE,
- and regedit's export facility add a Byte Order Mark as the first character of the file, which can cause tracebacks.
-- You can use the M(win_copy) module with the C(content:) option if you prefer the template inline, as part of the
- playbook.
-- For Linux you can use M(template) which uses '\\n' as C(newline_sequence) by default.
-seealso:
-- module: win_copy
-- module: copy
-- module: template
-author:
-- Jon Hawkesworth (@jhawkesworth)
-extends_documentation_fragment:
-- template_common
-'''
-
-EXAMPLES = r'''
-- name: Create a file from a Jinja2 template
- win_template:
- src: /mytemplates/file.conf.j2
- dest: C:\Temp\file.conf
-
-- name: Create a Unix-style file from a Jinja2 template
- win_template:
- src: unix/config.conf.j2
- dest: C:\share\unix\config.conf
- newline_sequence: '\n'
- backup: yes
-'''
-
-RETURN = r'''
-backup_file:
- description: Name of the backup file that was created.
- returned: if backup=yes
- type: str
- sample: C:\Path\To\File.txt.11540.20150212-220915.bak
-'''