2015-10-26 21:23:09 +00:00
|
|
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
2017-09-10 01:40:07 +00:00
|
|
|
# Copyright: (c) 2017, Ansible Project
|
|
|
|
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
# Make coding more python3-ish
|
|
|
|
from __future__ import (absolute_import, division, print_function)
|
|
|
|
__metaclass__ = type
|
|
|
|
|
|
|
|
import copy
|
2017-09-10 01:40:07 +00:00
|
|
|
import os
|
|
|
|
import os.path
|
2017-07-03 19:27:53 +00:00
|
|
|
import re
|
2016-01-22 16:23:10 +00:00
|
|
|
import tempfile
|
2017-09-10 01:40:07 +00:00
|
|
|
|
2018-04-10 21:14:38 +00:00
|
|
|
from ansible import constants as C
|
2017-02-21 21:45:57 +00:00
|
|
|
from ansible.errors import AnsibleFileNotFound, AnsibleParserError
|
2016-09-07 05:54:17 +00:00
|
|
|
from ansible.module_utils.basic import is_executable
|
2018-01-20 19:56:18 +00:00
|
|
|
from ansible.module_utils.six import binary_type, text_type
|
2016-09-07 05:54:17 +00:00
|
|
|
from ansible.module_utils._text import to_bytes, to_native, to_text
|
2015-10-26 21:23:09 +00:00
|
|
|
from ansible.parsing.quoting import unquote
|
2018-01-16 22:12:23 +00:00
|
|
|
from ansible.parsing.utils.yaml import from_yaml
|
2017-09-10 01:40:07 +00:00
|
|
|
from ansible.parsing.vault import VaultLib, b_HEADER, is_encrypted, is_encrypted_file, parse_vaulttext_envelope
|
2015-10-26 21:23:09 +00:00
|
|
|
from ansible.utils.path import unfrackpath
|
2018-11-20 23:06:51 +00:00
|
|
|
from ansible.utils.display import Display
|
2015-10-26 21:23:09 +00:00
|
|
|
|
2018-11-20 23:06:51 +00:00
|
|
|
display = Display()
|
2016-06-28 21:23:30 +00:00
|
|
|
|
2017-09-10 01:40:07 +00:00
|
|
|
|
2017-07-03 19:27:53 +00:00
|
|
|
# Tries to determine if a path is inside a role, last dir must be 'tasks'
|
|
|
|
# this is not perfect but people should really avoid 'tasks' dirs outside roles when using Ansible.
|
|
|
|
RE_TASKS = re.compile(u'(?:^|%s)+tasks%s?$' % (os.path.sep, os.path.sep))
|
|
|
|
|
2016-09-07 05:54:17 +00:00
|
|
|
|
2017-03-23 20:35:05 +00:00
|
|
|
class DataLoader:
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
'''
|
|
|
|
The DataLoader class is used to load and parse YAML or JSON content,
|
|
|
|
either from a given file name or from a string that was previously
|
|
|
|
read in through other means. A Vault password can be specified, and
|
|
|
|
any vault-encrypted files will be decrypted.
|
|
|
|
|
|
|
|
Data read from files will also be cached, so the file will never be
|
|
|
|
read from disk more than once.
|
|
|
|
|
|
|
|
Usage:
|
|
|
|
|
|
|
|
dl = DataLoader()
|
2015-11-18 10:20:34 +00:00
|
|
|
# optionally: dl.set_vault_password('foo')
|
2015-10-26 21:23:09 +00:00
|
|
|
ds = dl.load('...')
|
|
|
|
ds = dl.load_from_file('/path/to/file')
|
|
|
|
'''
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self._basedir = '.'
|
|
|
|
self._FILE_CACHE = dict()
|
2016-01-22 16:23:10 +00:00
|
|
|
self._tempfiles = set()
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
# initialize the vault stuff with an empty password
|
Support multiple vault passwords (#22756)
Fixes #13243
** Add --vault-id to name/identify multiple vault passwords
Use --vault-id to indicate id and path/type
--vault-id=prompt # prompt for default vault id password
--vault-id=myorg@prompt # prompt for a vault_id named 'myorg'
--vault-id=a_password_file # load ./a_password_file for default id
--vault-id=myorg@a_password_file # load file for 'myorg' vault id
vault_id's are created implicitly for existing --vault-password-file
and --ask-vault-pass options.
Vault ids are just for UX purposes and bookkeeping. Only the vault
payload and the password bytestring is needed to decrypt a
vault blob.
Replace passing password around everywhere with
a VaultSecrets object.
If we specify a vault_id, mention that in password prompts
Specifying multiple -vault-password-files will
now try each until one works
** Rev vault format in a backwards compatible way
The 1.2 vault format adds the vault_id to the header line
of the vault text. This is backwards compatible with older
versions of ansible. Old versions will just ignore it and
treat it as the default (and only) vault id.
Note: only 2.4+ supports multiple vault passwords, so while
earlier ansible versions can read the vault-1.2 format, it
does not make them magically support multiple vault passwords.
use 1.1 format for 'default' vault_id
Vaulted items that need to include a vault_id will be
written in 1.2 format.
If we set a new DEFAULT_VAULT_IDENTITY, then the default will
use version 1.2
vault will only use a vault_id if one is specified. So if none
is specified and C.DEFAULT_VAULT_IDENTITY is 'default'
we use the old format.
** Changes/refactors needed to implement multiple vault passwords
raise exceptions on decrypt fail, check vault id early
split out parsing the vault plaintext envelope (with the
sha/original plaintext) to _split_plaintext_envelope()
some cli fixups for specifying multiple paths in
the unfrack_paths optparse callback
fix py3 dict.keys() 'dict_keys object is not indexable' error
pluralize cli.options.vault_password_file -> vault_password_files
pluralize cli.options.new_vault_password_file -> new_vault_password_files
pluralize cli.options.vault_id -> cli.options.vault_ids
** Add a config option (vault_id_match) to force vault id matching.
With 'vault_id_match=True' and an ansible
vault that provides a vault_id, then decryption will require
that a matching vault_id is required. (via
--vault-id=my_vault_id@password_file, for ex).
In other words, if the config option is true, then only
the vault secrets with matching vault ids are candidates for
decrypting a vault. If option is false (the default), then
all of the provided vault secrets will be selected.
If a user doesn't want all vault secrets to be tried to
decrypt any vault content, they can enable this option.
Note: The vault id used for the match is not encrypted or
cryptographically signed. It is just a label/id/nickname used
for referencing a specific vault secret.
2017-07-28 19:20:58 +00:00
|
|
|
# TODO: replace with a ref to something that can get the password
|
|
|
|
# a creds/auth provider
|
|
|
|
# self.set_vault_password(None)
|
|
|
|
self._vaults = {}
|
|
|
|
self._vault = VaultLib()
|
|
|
|
self.set_vault_secrets(None)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
Support multiple vault passwords (#22756)
Fixes #13243
** Add --vault-id to name/identify multiple vault passwords
Use --vault-id to indicate id and path/type
--vault-id=prompt # prompt for default vault id password
--vault-id=myorg@prompt # prompt for a vault_id named 'myorg'
--vault-id=a_password_file # load ./a_password_file for default id
--vault-id=myorg@a_password_file # load file for 'myorg' vault id
vault_id's are created implicitly for existing --vault-password-file
and --ask-vault-pass options.
Vault ids are just for UX purposes and bookkeeping. Only the vault
payload and the password bytestring is needed to decrypt a
vault blob.
Replace passing password around everywhere with
a VaultSecrets object.
If we specify a vault_id, mention that in password prompts
Specifying multiple -vault-password-files will
now try each until one works
** Rev vault format in a backwards compatible way
The 1.2 vault format adds the vault_id to the header line
of the vault text. This is backwards compatible with older
versions of ansible. Old versions will just ignore it and
treat it as the default (and only) vault id.
Note: only 2.4+ supports multiple vault passwords, so while
earlier ansible versions can read the vault-1.2 format, it
does not make them magically support multiple vault passwords.
use 1.1 format for 'default' vault_id
Vaulted items that need to include a vault_id will be
written in 1.2 format.
If we set a new DEFAULT_VAULT_IDENTITY, then the default will
use version 1.2
vault will only use a vault_id if one is specified. So if none
is specified and C.DEFAULT_VAULT_IDENTITY is 'default'
we use the old format.
** Changes/refactors needed to implement multiple vault passwords
raise exceptions on decrypt fail, check vault id early
split out parsing the vault plaintext envelope (with the
sha/original plaintext) to _split_plaintext_envelope()
some cli fixups for specifying multiple paths in
the unfrack_paths optparse callback
fix py3 dict.keys() 'dict_keys object is not indexable' error
pluralize cli.options.vault_password_file -> vault_password_files
pluralize cli.options.new_vault_password_file -> new_vault_password_files
pluralize cli.options.vault_id -> cli.options.vault_ids
** Add a config option (vault_id_match) to force vault id matching.
With 'vault_id_match=True' and an ansible
vault that provides a vault_id, then decryption will require
that a matching vault_id is required. (via
--vault-id=my_vault_id@password_file, for ex).
In other words, if the config option is true, then only
the vault secrets with matching vault ids are candidates for
decrypting a vault. If option is false (the default), then
all of the provided vault secrets will be selected.
If a user doesn't want all vault secrets to be tried to
decrypt any vault content, they can enable this option.
Note: The vault id used for the match is not encrypted or
cryptographically signed. It is just a label/id/nickname used
for referencing a specific vault secret.
2017-07-28 19:20:58 +00:00
|
|
|
# TODO: since we can query vault_secrets late, we could provide this to DataLoader init
|
|
|
|
def set_vault_secrets(self, vault_secrets):
|
|
|
|
self._vault.secrets = vault_secrets
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
def load(self, data, file_name='<string>', show_content=True):
|
2018-01-16 22:12:23 +00:00
|
|
|
'''Backwards compat for now'''
|
|
|
|
return from_yaml(data, file_name, show_content, self._vault.secrets)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
2017-05-23 21:16:49 +00:00
|
|
|
def load_from_file(self, file_name, cache=True, unsafe=False):
|
2015-10-26 21:23:09 +00:00
|
|
|
''' Loads data from a file, which can contain either JSON or YAML. '''
|
|
|
|
|
|
|
|
file_name = self.path_dwim(file_name)
|
2017-05-23 21:16:49 +00:00
|
|
|
display.debug("Loading data from %s" % file_name)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
# if the file has already been read in and cached, we'll
|
|
|
|
# return those results to avoid more file/vault operations
|
2017-05-23 21:16:49 +00:00
|
|
|
if cache and file_name in self._FILE_CACHE:
|
2015-10-26 21:23:09 +00:00
|
|
|
parsed_data = self._FILE_CACHE[file_name]
|
|
|
|
else:
|
|
|
|
# read the file contents and load the data structure from them
|
2016-11-07 15:07:26 +00:00
|
|
|
(b_file_data, show_content) = self._get_file_contents(file_name)
|
|
|
|
|
|
|
|
file_data = to_text(b_file_data, errors='surrogate_or_strict')
|
2015-10-26 21:23:09 +00:00
|
|
|
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
|
|
|
|
|
|
|
|
# cache the file contents for next time
|
|
|
|
self._FILE_CACHE[file_name] = parsed_data
|
|
|
|
|
2017-05-23 21:16:49 +00:00
|
|
|
if unsafe:
|
|
|
|
return parsed_data
|
|
|
|
else:
|
|
|
|
# return a deep copy here, so the cache is not affected
|
|
|
|
return copy.deepcopy(parsed_data)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
def path_exists(self, path):
|
|
|
|
path = self.path_dwim(path)
|
2016-09-07 05:54:17 +00:00
|
|
|
return os.path.exists(to_bytes(path, errors='surrogate_or_strict'))
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
def is_file(self, path):
|
|
|
|
path = self.path_dwim(path)
|
2016-09-07 05:54:17 +00:00
|
|
|
return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
def is_directory(self, path):
|
|
|
|
path = self.path_dwim(path)
|
2016-09-07 05:54:17 +00:00
|
|
|
return os.path.isdir(to_bytes(path, errors='surrogate_or_strict'))
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
def list_directory(self, path):
|
|
|
|
path = self.path_dwim(path)
|
|
|
|
return os.listdir(path)
|
|
|
|
|
|
|
|
def is_executable(self, path):
|
|
|
|
'''is the given path executable?'''
|
|
|
|
path = self.path_dwim(path)
|
|
|
|
return is_executable(path)
|
|
|
|
|
2018-01-20 19:56:18 +00:00
|
|
|
def _decrypt_if_vault_data(self, b_vault_data, b_file_name=None):
|
|
|
|
'''Decrypt b_vault_data if encrypted and return b_data and the show_content flag'''
|
|
|
|
|
|
|
|
if not is_encrypted(b_vault_data):
|
|
|
|
show_content = True
|
|
|
|
return b_vault_data, show_content
|
|
|
|
|
|
|
|
b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(b_vault_data)
|
|
|
|
b_data = self._vault.decrypt(b_vault_data, filename=b_file_name)
|
|
|
|
|
|
|
|
show_content = False
|
|
|
|
return b_data, show_content
|
|
|
|
|
2017-07-28 16:14:08 +00:00
|
|
|
def _get_file_contents(self, file_name):
|
2015-10-26 21:23:09 +00:00
|
|
|
'''
|
2017-07-28 16:14:08 +00:00
|
|
|
Reads the file contents from the given file name
|
|
|
|
|
|
|
|
If the contents are vault-encrypted, it will decrypt them and return
|
|
|
|
the decrypted data
|
|
|
|
|
|
|
|
:arg file_name: The name of the file to read. If this is a relative
|
|
|
|
path, it will be expanded relative to the basedir
|
2019-01-15 11:37:00 +00:00
|
|
|
:raises AnsibleFileNotFound: if the file_name does not refer to a file
|
2017-07-28 16:14:08 +00:00
|
|
|
:raises AnsibleParserError: if we were unable to read the file
|
|
|
|
:return: Returns a byte string of the file contents
|
2015-10-26 21:23:09 +00:00
|
|
|
'''
|
2017-05-09 05:19:54 +00:00
|
|
|
if not file_name or not isinstance(file_name, (binary_type, text_type)):
|
2018-05-24 02:29:37 +00:00
|
|
|
raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_name))
|
2015-10-26 21:23:09 +00:00
|
|
|
|
2017-07-28 16:14:08 +00:00
|
|
|
b_file_name = to_bytes(self.path_dwim(file_name))
|
|
|
|
# This is what we really want but have to fix unittests to make it pass
|
|
|
|
# if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name):
|
2018-05-24 02:29:37 +00:00
|
|
|
if not self.path_exists(b_file_name):
|
2017-07-03 19:27:53 +00:00
|
|
|
raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
try:
|
2016-04-30 05:19:12 +00:00
|
|
|
with open(b_file_name, 'rb') as f:
|
2017-07-28 16:14:08 +00:00
|
|
|
data = f.read()
|
2018-01-20 19:56:18 +00:00
|
|
|
return self._decrypt_if_vault_data(data, b_file_name)
|
2015-10-26 21:23:09 +00:00
|
|
|
except (IOError, OSError) as e:
|
2018-05-24 02:29:37 +00:00
|
|
|
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, to_native(e)), orig_exc=e)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
def get_basedir(self):
|
|
|
|
''' returns the current basedir '''
|
|
|
|
return self._basedir
|
|
|
|
|
|
|
|
def set_basedir(self, basedir):
|
|
|
|
''' sets the base directory, used to find files when a relative path is given '''
|
|
|
|
|
|
|
|
if basedir is not None:
|
2016-09-07 05:54:17 +00:00
|
|
|
self._basedir = to_text(basedir)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
def path_dwim(self, given):
|
|
|
|
'''
|
|
|
|
make relative paths work like folks expect.
|
|
|
|
'''
|
|
|
|
|
|
|
|
given = unquote(given)
|
2016-09-07 05:54:17 +00:00
|
|
|
given = to_text(given, errors='surrogate_or_strict')
|
2015-10-26 21:23:09 +00:00
|
|
|
|
2017-07-03 19:27:53 +00:00
|
|
|
if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'):
|
|
|
|
path = given
|
2015-10-26 21:23:09 +00:00
|
|
|
else:
|
2016-09-07 05:54:17 +00:00
|
|
|
basedir = to_text(self._basedir, errors='surrogate_or_strict')
|
2017-07-03 19:27:53 +00:00
|
|
|
path = os.path.join(basedir, given)
|
|
|
|
|
|
|
|
return unfrackpath(path, follow=False)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
2017-03-15 20:01:04 +00:00
|
|
|
def _is_role(self, path):
|
2017-07-03 19:27:53 +00:00
|
|
|
''' imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc '''
|
2017-03-15 20:01:04 +00:00
|
|
|
|
|
|
|
b_path = to_bytes(path, errors='surrogate_or_strict')
|
2017-07-03 19:27:53 +00:00
|
|
|
b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict')
|
|
|
|
|
2017-09-10 01:40:07 +00:00
|
|
|
for b_finddir in (b'meta', b'tasks'):
|
|
|
|
for b_suffix in (b'.yml', b'.yaml', b''):
|
|
|
|
b_main = b'main%s' % (b_suffix)
|
|
|
|
b_tasked = os.path.join(b_finddir, b_main)
|
2017-07-03 19:27:53 +00:00
|
|
|
|
|
|
|
if (
|
|
|
|
RE_TASKS.search(path) and
|
|
|
|
os.path.exists(os.path.join(b_path, b_main)) or
|
|
|
|
os.path.exists(os.path.join(b_upath, b_tasked)) or
|
|
|
|
os.path.exists(os.path.join(os.path.dirname(b_path), b_tasked))
|
|
|
|
):
|
|
|
|
return True
|
|
|
|
return False
|
2017-03-15 20:01:04 +00:00
|
|
|
|
2017-02-21 21:45:57 +00:00
|
|
|
def path_dwim_relative(self, path, dirname, source, is_role=False):
|
2015-10-26 21:23:09 +00:00
|
|
|
'''
|
|
|
|
find one file in either a role or playbook dir with or without
|
|
|
|
explicitly named dirname subdirs
|
|
|
|
|
|
|
|
Used in action plugins and lookups to find supplemental files that
|
|
|
|
could be in either place.
|
|
|
|
'''
|
|
|
|
|
|
|
|
search = []
|
2017-07-03 19:27:53 +00:00
|
|
|
source = to_text(source, errors='surrogate_or_strict')
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
# I have full path, nothing else needs to be looked at
|
2017-07-03 19:27:53 +00:00
|
|
|
if source.startswith(to_text(os.path.sep)) or source.startswith(u'~'):
|
|
|
|
search.append(unfrackpath(source, follow=False))
|
2015-10-26 21:23:09 +00:00
|
|
|
else:
|
|
|
|
# base role/play path + templates/files/vars + relative filename
|
|
|
|
search.append(os.path.join(path, dirname, source))
|
2017-07-03 19:27:53 +00:00
|
|
|
basedir = unfrackpath(path, follow=False)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
2017-03-15 20:01:04 +00:00
|
|
|
# not told if role, but detect if it is a role and if so make sure you get correct base path
|
|
|
|
if not is_role:
|
|
|
|
is_role = self._is_role(path)
|
2017-02-21 21:45:57 +00:00
|
|
|
|
2017-07-03 19:27:53 +00:00
|
|
|
if is_role and RE_TASKS.search(path):
|
|
|
|
basedir = unfrackpath(os.path.dirname(path), follow=False)
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
cur_basedir = self._basedir
|
|
|
|
self.set_basedir(basedir)
|
|
|
|
# resolved base role/play path + templates/files/vars + relative filename
|
2017-07-03 19:27:53 +00:00
|
|
|
search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False))
|
2015-10-26 21:23:09 +00:00
|
|
|
self.set_basedir(cur_basedir)
|
|
|
|
|
2017-03-15 20:01:04 +00:00
|
|
|
if is_role and not source.endswith(dirname):
|
2015-10-26 21:23:09 +00:00
|
|
|
# look in role's tasks dir w/o dirname
|
2017-07-03 19:27:53 +00:00
|
|
|
search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False))
|
2015-10-26 21:23:09 +00:00
|
|
|
|
|
|
|
# try to create absolute path for loader basedir + templates/files/vars + filename
|
2017-07-03 19:27:53 +00:00
|
|
|
search.append(unfrackpath(os.path.join(dirname, source), follow=False))
|
2017-07-20 15:06:48 +00:00
|
|
|
|
|
|
|
# try to create absolute path for loader basedir
|
|
|
|
search.append(unfrackpath(os.path.join(basedir, source), follow=False))
|
|
|
|
|
|
|
|
# try to create absolute path for dirname + filename
|
2017-07-05 23:45:09 +00:00
|
|
|
search.append(self.path_dwim(os.path.join(dirname, source)))
|
2015-10-26 21:23:09 +00:00
|
|
|
|
2017-07-20 15:06:48 +00:00
|
|
|
# try to create absolute path for filename
|
2015-10-26 21:23:09 +00:00
|
|
|
search.append(self.path_dwim(source))
|
|
|
|
|
|
|
|
for candidate in search:
|
2016-09-07 05:54:17 +00:00
|
|
|
if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')):
|
2015-10-26 21:23:09 +00:00
|
|
|
break
|
|
|
|
|
|
|
|
return candidate
|
|
|
|
|
2017-02-21 21:45:57 +00:00
|
|
|
def path_dwim_relative_stack(self, paths, dirname, source, is_role=False):
|
2016-06-28 21:23:30 +00:00
|
|
|
'''
|
|
|
|
find one file in first path in stack taking roles into account and adding play basedir as fallback
|
2016-08-23 04:55:30 +00:00
|
|
|
|
|
|
|
:arg paths: A list of text strings which are the paths to look for the filename in.
|
|
|
|
:arg dirname: A text string representing a directory. The directory
|
|
|
|
is prepended to the source to form the path to search for.
|
|
|
|
:arg source: A text string which is the filename to search for
|
|
|
|
:rtype: A text string
|
2017-07-03 19:27:53 +00:00
|
|
|
:returns: An absolute path to the filename ``source`` if found
|
|
|
|
:raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths
|
2016-06-28 21:23:30 +00:00
|
|
|
'''
|
2016-08-23 04:55:30 +00:00
|
|
|
b_dirname = to_bytes(dirname)
|
|
|
|
b_source = to_bytes(source)
|
|
|
|
|
2016-06-28 21:23:30 +00:00
|
|
|
result = None
|
2017-07-03 19:27:53 +00:00
|
|
|
search = []
|
2016-09-21 20:52:26 +00:00
|
|
|
if source is None:
|
|
|
|
display.warning('Invalid request to find a file that matches a "null" value')
|
|
|
|
elif source and (source.startswith('~') or source.startswith(os.path.sep)):
|
2016-06-28 21:23:30 +00:00
|
|
|
# path is absolute, no relative needed, check existence and return source
|
2017-07-03 19:27:53 +00:00
|
|
|
test_path = unfrackpath(b_source, follow=False)
|
2016-09-07 05:54:17 +00:00
|
|
|
if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')):
|
2016-06-28 21:23:30 +00:00
|
|
|
result = test_path
|
|
|
|
else:
|
2016-12-09 09:18:15 +00:00
|
|
|
display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths))
|
2016-06-28 21:23:30 +00:00
|
|
|
for path in paths:
|
2017-07-03 19:27:53 +00:00
|
|
|
upath = unfrackpath(path, follow=False)
|
2016-09-07 05:54:17 +00:00
|
|
|
b_upath = to_bytes(upath, errors='surrogate_or_strict')
|
2016-08-23 04:55:30 +00:00
|
|
|
b_mydir = os.path.dirname(b_upath)
|
2016-06-28 21:23:30 +00:00
|
|
|
|
|
|
|
# if path is in role and 'tasks' not there already, add it into the search
|
2017-08-26 02:00:07 +00:00
|
|
|
if (is_role or self._is_role(path)) and b_mydir.endswith(b'tasks'):
|
2019-02-14 02:39:26 +00:00
|
|
|
search.append(os.path.join(os.path.dirname(b_mydir), b_dirname, b_source))
|
|
|
|
search.append(os.path.join(b_mydir, b_source))
|
2017-08-26 02:00:07 +00:00
|
|
|
else:
|
2016-06-28 21:23:30 +00:00
|
|
|
# don't add dirname if user already is using it in source
|
2016-12-09 09:18:15 +00:00
|
|
|
if b_source.split(b'/')[0] != dirname:
|
|
|
|
search.append(os.path.join(b_upath, b_dirname, b_source))
|
2016-08-23 04:55:30 +00:00
|
|
|
search.append(os.path.join(b_upath, b_source))
|
2016-06-28 21:23:30 +00:00
|
|
|
|
|
|
|
# always append basedir as last resort
|
2016-12-09 09:18:15 +00:00
|
|
|
# don't add dirname if user already is using it in source
|
|
|
|
if b_source.split(b'/')[0] != dirname:
|
|
|
|
search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source))
|
2016-08-23 04:55:30 +00:00
|
|
|
search.append(os.path.join(to_bytes(self.get_basedir()), b_source))
|
|
|
|
|
2016-09-07 05:54:17 +00:00
|
|
|
display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search)))
|
2016-08-23 04:55:30 +00:00
|
|
|
for b_candidate in search:
|
2016-09-07 05:54:17 +00:00
|
|
|
display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate)))
|
2016-08-23 04:55:30 +00:00
|
|
|
if os.path.exists(b_candidate):
|
2016-09-07 05:54:17 +00:00
|
|
|
result = to_text(b_candidate)
|
2016-06-28 21:23:30 +00:00
|
|
|
break
|
|
|
|
|
2017-07-03 19:27:53 +00:00
|
|
|
if result is None:
|
2017-05-09 05:19:54 +00:00
|
|
|
raise AnsibleFileNotFound(file_name=source, paths=[to_text(p) for p in search])
|
2017-07-03 19:27:53 +00:00
|
|
|
|
2016-06-28 21:23:30 +00:00
|
|
|
return result
|
|
|
|
|
2016-01-22 16:23:10 +00:00
|
|
|
def _create_content_tempfile(self, content):
|
|
|
|
''' Create a tempfile containing defined content '''
|
|
|
|
fd, content_tempfile = tempfile.mkstemp()
|
|
|
|
f = os.fdopen(fd, 'wb')
|
|
|
|
content = to_bytes(content)
|
|
|
|
try:
|
|
|
|
f.write(content)
|
|
|
|
except Exception as err:
|
|
|
|
os.remove(content_tempfile)
|
|
|
|
raise Exception(err)
|
|
|
|
finally:
|
|
|
|
f.close()
|
|
|
|
return content_tempfile
|
|
|
|
|
2017-03-24 19:39:25 +00:00
|
|
|
def get_real_file(self, file_path, decrypt=True):
|
2016-01-22 16:23:10 +00:00
|
|
|
"""
|
|
|
|
If the file is vault encrypted return a path to a temporary decrypted file
|
|
|
|
If the file is not encrypted then the path is returned
|
|
|
|
Temporary files are cleanup in the destructor
|
|
|
|
"""
|
|
|
|
|
2017-05-09 05:19:54 +00:00
|
|
|
if not file_path or not isinstance(file_path, (binary_type, text_type)):
|
2016-09-07 05:54:17 +00:00
|
|
|
raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path))
|
2016-01-22 16:23:10 +00:00
|
|
|
|
2016-09-07 05:54:17 +00:00
|
|
|
b_file_path = to_bytes(file_path, errors='surrogate_or_strict')
|
2016-08-23 04:55:30 +00:00
|
|
|
if not self.path_exists(b_file_path) or not self.is_file(b_file_path):
|
2017-07-03 19:27:53 +00:00
|
|
|
raise AnsibleFileNotFound(file_name=file_path)
|
2016-01-22 16:23:10 +00:00
|
|
|
|
|
|
|
real_path = self.path_dwim(file_path)
|
|
|
|
|
|
|
|
try:
|
2017-03-24 19:39:25 +00:00
|
|
|
if decrypt:
|
|
|
|
with open(to_bytes(real_path), 'rb') as f:
|
|
|
|
# Limit how much of the file is read since we do not know
|
|
|
|
# whether this is a vault file and therefore it could be very
|
|
|
|
# large.
|
|
|
|
if is_encrypted_file(f, count=len(b_HEADER)):
|
|
|
|
# if the file is encrypted and no password was specified,
|
|
|
|
# the decrypt call would throw an error, but we check first
|
|
|
|
# since the decrypt function doesn't know the file name
|
|
|
|
data = f.read()
|
Support multiple vault passwords (#22756)
Fixes #13243
** Add --vault-id to name/identify multiple vault passwords
Use --vault-id to indicate id and path/type
--vault-id=prompt # prompt for default vault id password
--vault-id=myorg@prompt # prompt for a vault_id named 'myorg'
--vault-id=a_password_file # load ./a_password_file for default id
--vault-id=myorg@a_password_file # load file for 'myorg' vault id
vault_id's are created implicitly for existing --vault-password-file
and --ask-vault-pass options.
Vault ids are just for UX purposes and bookkeeping. Only the vault
payload and the password bytestring is needed to decrypt a
vault blob.
Replace passing password around everywhere with
a VaultSecrets object.
If we specify a vault_id, mention that in password prompts
Specifying multiple -vault-password-files will
now try each until one works
** Rev vault format in a backwards compatible way
The 1.2 vault format adds the vault_id to the header line
of the vault text. This is backwards compatible with older
versions of ansible. Old versions will just ignore it and
treat it as the default (and only) vault id.
Note: only 2.4+ supports multiple vault passwords, so while
earlier ansible versions can read the vault-1.2 format, it
does not make them magically support multiple vault passwords.
use 1.1 format for 'default' vault_id
Vaulted items that need to include a vault_id will be
written in 1.2 format.
If we set a new DEFAULT_VAULT_IDENTITY, then the default will
use version 1.2
vault will only use a vault_id if one is specified. So if none
is specified and C.DEFAULT_VAULT_IDENTITY is 'default'
we use the old format.
** Changes/refactors needed to implement multiple vault passwords
raise exceptions on decrypt fail, check vault id early
split out parsing the vault plaintext envelope (with the
sha/original plaintext) to _split_plaintext_envelope()
some cli fixups for specifying multiple paths in
the unfrack_paths optparse callback
fix py3 dict.keys() 'dict_keys object is not indexable' error
pluralize cli.options.vault_password_file -> vault_password_files
pluralize cli.options.new_vault_password_file -> new_vault_password_files
pluralize cli.options.vault_id -> cli.options.vault_ids
** Add a config option (vault_id_match) to force vault id matching.
With 'vault_id_match=True' and an ansible
vault that provides a vault_id, then decryption will require
that a matching vault_id is required. (via
--vault-id=my_vault_id@password_file, for ex).
In other words, if the config option is true, then only
the vault secrets with matching vault ids are candidates for
decrypting a vault. If option is false (the default), then
all of the provided vault secrets will be selected.
If a user doesn't want all vault secrets to be tried to
decrypt any vault content, they can enable this option.
Note: The vault id used for the match is not encrypted or
cryptographically signed. It is just a label/id/nickname used
for referencing a specific vault secret.
2017-07-28 19:20:58 +00:00
|
|
|
if not self._vault.secrets:
|
|
|
|
raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path))
|
2017-03-24 19:39:25 +00:00
|
|
|
|
|
|
|
data = self._vault.decrypt(data, filename=real_path)
|
|
|
|
# Make a temp file
|
|
|
|
real_path = self._create_content_tempfile(data)
|
|
|
|
self._tempfiles.add(real_path)
|
2016-01-22 16:23:10 +00:00
|
|
|
|
|
|
|
return real_path
|
|
|
|
|
|
|
|
except (IOError, OSError) as e:
|
2017-06-09 17:13:15 +00:00
|
|
|
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e)
|
2016-01-22 16:23:10 +00:00
|
|
|
|
2016-04-14 14:31:39 +00:00
|
|
|
def cleanup_tmp_file(self, file_path):
|
2016-01-22 16:23:10 +00:00
|
|
|
"""
|
|
|
|
Removes any temporary files created from a previous call to
|
|
|
|
get_real_file. file_path must be the path returned from a
|
|
|
|
previous call to get_real_file.
|
|
|
|
"""
|
|
|
|
if file_path in self._tempfiles:
|
|
|
|
os.unlink(file_path)
|
2016-09-07 05:54:17 +00:00
|
|
|
self._tempfiles.remove(file_path)
|
2016-04-14 14:31:39 +00:00
|
|
|
|
|
|
|
def cleanup_all_tmp_files(self):
|
|
|
|
for f in self._tempfiles:
|
|
|
|
try:
|
|
|
|
self.cleanup_tmp_file(f)
|
2017-05-23 21:16:49 +00:00
|
|
|
except Exception as e:
|
|
|
|
display.warning("Unable to cleanup temp files: %s" % to_native(e))
|
2018-04-10 21:14:38 +00:00
|
|
|
|
|
|
|
def find_vars_files(self, path, name, extensions=None, allow_dir=True):
|
|
|
|
"""
|
|
|
|
Find vars files in a given path with specified name. This will find
|
|
|
|
files in a dir named <name>/ or a file called <name> ending in known
|
|
|
|
extensions.
|
|
|
|
"""
|
|
|
|
|
|
|
|
b_path = to_bytes(os.path.join(path, name))
|
|
|
|
found = []
|
|
|
|
|
|
|
|
if extensions is None:
|
|
|
|
# Look for file with no extension first to find dir before file
|
|
|
|
extensions = [''] + C.YAML_FILENAME_EXTENSIONS
|
|
|
|
# add valid extensions to name
|
|
|
|
for ext in extensions:
|
|
|
|
|
|
|
|
if '.' in ext:
|
|
|
|
full_path = b_path + to_bytes(ext)
|
|
|
|
elif ext:
|
|
|
|
full_path = b'.'.join([b_path, to_bytes(ext)])
|
|
|
|
else:
|
|
|
|
full_path = b_path
|
|
|
|
|
|
|
|
if self.path_exists(full_path):
|
|
|
|
if self.is_directory(full_path):
|
|
|
|
if allow_dir:
|
|
|
|
found.extend(self._get_dir_vars_files(to_text(full_path), extensions))
|
|
|
|
else:
|
2018-06-07 14:18:07 +00:00
|
|
|
continue
|
2018-04-10 21:14:38 +00:00
|
|
|
else:
|
|
|
|
found.append(full_path)
|
|
|
|
break
|
|
|
|
return found
|
|
|
|
|
|
|
|
def _get_dir_vars_files(self, path, extensions):
|
|
|
|
found = []
|
|
|
|
for spath in sorted(self.list_directory(path)):
|
|
|
|
if not spath.startswith(u'.') and not spath.endswith(u'~'): # skip hidden and backups
|
|
|
|
|
|
|
|
ext = os.path.splitext(spath)[-1]
|
|
|
|
full_spath = os.path.join(path, spath)
|
|
|
|
|
|
|
|
if self.is_directory(full_spath) and not ext: # recursive search if dir
|
|
|
|
found.extend(self._get_dir_vars_files(full_spath, extensions))
|
|
|
|
elif self.is_file(full_spath) and (not ext or to_text(ext) in extensions):
|
|
|
|
# only consider files with valid extensions or no extension
|
|
|
|
found.append(full_spath)
|
|
|
|
|
|
|
|
return found
|