2014-03-16 20:02:37 +00:00
|
|
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
|
|
|
#
|
|
|
|
# This file is part of Ansible
|
|
|
|
#
|
|
|
|
# Ansible is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Ansible is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
import os
|
2015-07-20 19:33:07 +00:00
|
|
|
import sys
|
2014-05-26 02:55:32 +00:00
|
|
|
import stat
|
2015-11-02 20:02:26 +00:00
|
|
|
import time
|
|
|
|
import shlex
|
2014-04-08 18:21:42 +00:00
|
|
|
import errno
|
2014-03-16 20:02:37 +00:00
|
|
|
import fnmatch
|
|
|
|
import glob
|
|
|
|
import platform
|
|
|
|
import re
|
2014-04-08 18:21:42 +00:00
|
|
|
import signal
|
2014-03-16 20:02:37 +00:00
|
|
|
import socket
|
|
|
|
import struct
|
|
|
|
import datetime
|
|
|
|
import getpass
|
2014-09-24 21:05:31 +00:00
|
|
|
import pwd
|
2016-04-27 07:15:01 +00:00
|
|
|
|
2016-08-18 13:36:03 +00:00
|
|
|
from ansible.module_utils.basic import get_all_subclasses
|
|
|
|
from ansible.module_utils.six import PY3, iteritems
|
|
|
|
|
|
|
|
# py2 vs py3; replace with six via ansiballz
|
2016-04-27 07:15:01 +00:00
|
|
|
try:
|
|
|
|
# python2
|
|
|
|
import ConfigParser as configparser
|
|
|
|
except ImportError:
|
|
|
|
# python3
|
|
|
|
import configparser
|
2016-08-18 00:58:51 +00:00
|
|
|
|
2016-02-27 00:42:18 +00:00
|
|
|
try:
|
2016-04-27 07:15:01 +00:00
|
|
|
# python2
|
2016-02-27 00:42:18 +00:00
|
|
|
from StringIO import StringIO
|
|
|
|
except ImportError:
|
2016-04-27 07:15:01 +00:00
|
|
|
# python3
|
2016-02-27 00:42:18 +00:00
|
|
|
from io import StringIO
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-04-27 07:15:01 +00:00
|
|
|
try:
|
|
|
|
# python2
|
|
|
|
from string import maketrans
|
|
|
|
except ImportError:
|
|
|
|
# python3
|
|
|
|
maketrans = str.maketrans # TODO: is this really identical?
|
|
|
|
|
2016-05-23 14:30:06 +00:00
|
|
|
try:
|
|
|
|
# Python 2
|
|
|
|
long
|
|
|
|
except NameError:
|
|
|
|
# Python 3
|
|
|
|
long = int
|
2014-06-30 21:23:55 +00:00
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
try:
|
|
|
|
import selinux
|
|
|
|
HAVE_SELINUX=True
|
|
|
|
except ImportError:
|
|
|
|
HAVE_SELINUX=False
|
|
|
|
|
2016-02-23 17:28:04 +00:00
|
|
|
try:
|
|
|
|
# Check if we have SSLContext support
|
|
|
|
from ssl import create_default_context, SSLContext
|
|
|
|
del create_default_context
|
|
|
|
del SSLContext
|
|
|
|
HAS_SSLCONTEXT = True
|
|
|
|
except ImportError:
|
|
|
|
HAS_SSLCONTEXT = False
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
try:
|
|
|
|
import json
|
2015-07-20 19:33:07 +00:00
|
|
|
# Detect python-json which is incompatible and fallback to simplejson in
|
|
|
|
# that case
|
|
|
|
try:
|
|
|
|
json.loads
|
|
|
|
json.dumps
|
|
|
|
except AttributeError:
|
|
|
|
raise ImportError
|
2014-03-16 20:02:37 +00:00
|
|
|
except ImportError:
|
|
|
|
import simplejson as json
|
|
|
|
|
2015-11-03 17:51:21 +00:00
|
|
|
# The distutils module is not shipped with SUNWPython on Solaris.
|
|
|
|
# It's in the SUNWPython-devel package which also contains development files
|
|
|
|
# that don't belong on production boxes. Since our Solaris code doesn't
|
|
|
|
# depend on LooseVersion, do not import it on Solaris.
|
|
|
|
if platform.system() != 'SunOS':
|
|
|
|
from distutils.version import LooseVersion
|
|
|
|
|
2015-07-20 19:33:07 +00:00
|
|
|
|
2014-04-08 18:21:42 +00:00
|
|
|
# --------------------------------------------------------------
|
2015-01-06 23:00:19 +00:00
|
|
|
# timeout function to make sure some fact gathering
|
2014-04-08 18:21:42 +00:00
|
|
|
# steps do not exceed a time limit
|
|
|
|
|
2016-07-08 21:46:41 +00:00
|
|
|
GATHER_TIMEOUT=None
|
|
|
|
|
2014-04-08 18:21:42 +00:00
|
|
|
class TimeoutError(Exception):
|
|
|
|
pass
|
|
|
|
|
2014-04-20 01:42:56 +00:00
|
|
|
def timeout(seconds=10, error_message="Timer expired"):
|
2016-07-08 21:46:41 +00:00
|
|
|
|
2014-04-08 18:21:42 +00:00
|
|
|
def decorator(func):
|
|
|
|
def _handle_timeout(signum, frame):
|
|
|
|
raise TimeoutError(error_message)
|
|
|
|
|
|
|
|
def wrapper(*args, **kwargs):
|
2016-07-08 21:46:41 +00:00
|
|
|
if 'GATHER_TIMEOUT' in globals():
|
|
|
|
if GATHER_TIMEOUT:
|
|
|
|
seconds = GATHER_TIMEOUT
|
2014-04-08 18:21:42 +00:00
|
|
|
signal.signal(signal.SIGALRM, _handle_timeout)
|
|
|
|
signal.alarm(seconds)
|
|
|
|
try:
|
|
|
|
result = func(*args, **kwargs)
|
|
|
|
finally:
|
|
|
|
signal.alarm(0)
|
|
|
|
return result
|
|
|
|
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
# --------------------------------------------------------------
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
class Facts(object):
|
|
|
|
"""
|
|
|
|
This class should only attempt to populate those facts that
|
|
|
|
are mostly generic to all systems. This includes platform facts,
|
2014-05-03 16:40:05 +00:00
|
|
|
service facts (e.g. ssh keys or selinux), and distribution facts.
|
2014-03-16 20:02:37 +00:00
|
|
|
Anything that requires extensive code or may have more than one
|
|
|
|
possible implementation to establish facts for a given topic should
|
|
|
|
subclass Facts.
|
|
|
|
"""
|
|
|
|
|
2015-01-07 22:36:02 +00:00
|
|
|
# i86pc is a Solaris and derivatives-ism
|
|
|
|
_I386RE = re.compile(r'i([3456]86|86pc)')
|
2014-03-16 20:02:37 +00:00
|
|
|
# For the most part, we assume that platform.dist() will tell the truth.
|
|
|
|
# This is the fallback to handle unknowns or exceptions
|
|
|
|
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
|
|
|
|
|
|
|
|
# A list of dicts. If there is a platform with more than one
|
|
|
|
# package manager, put the preferred one last. If there is an
|
|
|
|
# ansible module, use that as the value for the 'name' key.
|
|
|
|
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
|
2015-04-15 20:11:08 +00:00
|
|
|
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
|
2014-03-16 20:02:37 +00:00
|
|
|
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
|
|
|
|
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
|
|
|
|
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
|
|
|
|
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
|
|
|
|
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
|
2016-05-16 14:13:49 +00:00
|
|
|
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
|
2014-03-16 20:02:37 +00:00
|
|
|
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
|
|
|
|
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
|
2015-07-19 00:46:26 +00:00
|
|
|
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
|
2014-03-16 20:02:37 +00:00
|
|
|
{ 'path' : '/sbin/apk', 'name' : 'apk' },
|
|
|
|
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
|
|
|
|
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
|
|
|
|
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
|
2014-05-27 06:40:47 +00:00
|
|
|
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
|
|
|
|
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
|
2015-11-01 02:03:12 +00:00
|
|
|
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
|
2015-08-30 17:04:30 +00:00
|
|
|
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
|
2016-04-27 07:15:01 +00:00
|
|
|
]
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-05-20 02:34:19 +00:00
|
|
|
def __init__(self, module, load_on_init=True, cached_facts=None):
|
2014-11-13 23:32:27 +00:00
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
self.module = module
|
2016-05-20 02:34:19 +00:00
|
|
|
if not cached_facts:
|
|
|
|
self.facts = {}
|
|
|
|
else:
|
2016-07-25 01:39:47 +00:00
|
|
|
self.facts = cached_facts
|
2016-03-14 21:50:27 +00:00
|
|
|
### TODO: Eventually, these should all get moved to populate(). But
|
|
|
|
# some of the values are currently being used by other subclasses (for
|
|
|
|
# instance, os_family and distribution). Have to sort out what to do
|
|
|
|
# about those first.
|
2014-11-13 23:32:27 +00:00
|
|
|
if load_on_init:
|
|
|
|
self.get_platform_facts()
|
2016-05-09 13:59:26 +00:00
|
|
|
self.facts.update(Distribution(module).populate())
|
2014-11-13 23:32:27 +00:00
|
|
|
self.get_cmdline()
|
|
|
|
self.get_public_ssh_host_keys()
|
|
|
|
self.get_selinux_facts()
|
2016-04-13 18:13:45 +00:00
|
|
|
self.get_caps_facts()
|
2014-11-13 23:32:27 +00:00
|
|
|
self.get_fips_facts()
|
|
|
|
self.get_pkg_mgr_facts()
|
2015-08-30 17:04:30 +00:00
|
|
|
self.get_service_mgr_facts()
|
2014-11-13 23:32:27 +00:00
|
|
|
self.get_lsb_facts()
|
|
|
|
self.get_date_time_facts()
|
|
|
|
self.get_user_facts()
|
|
|
|
self.get_local_facts()
|
|
|
|
self.get_env_facts()
|
2014-09-29 00:24:16 +00:00
|
|
|
self.get_dns_facts()
|
2016-02-19 18:59:58 +00:00
|
|
|
self.get_python_facts()
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-04-13 18:13:45 +00:00
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
def populate(self):
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
# Platform
|
|
|
|
# platform.system() can be Linux, Darwin, Java, or Windows
|
|
|
|
def get_platform_facts(self):
|
|
|
|
self.facts['system'] = platform.system()
|
|
|
|
self.facts['kernel'] = platform.release()
|
|
|
|
self.facts['machine'] = platform.machine()
|
|
|
|
self.facts['python_version'] = platform.python_version()
|
|
|
|
self.facts['fqdn'] = socket.getfqdn()
|
|
|
|
self.facts['hostname'] = platform.node().split('.')[0]
|
2014-03-25 15:20:19 +00:00
|
|
|
self.facts['nodename'] = platform.node()
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
|
|
|
|
arch_bits = platform.architecture()[0]
|
|
|
|
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
|
|
|
|
if self.facts['machine'] == 'x86_64':
|
|
|
|
self.facts['architecture'] = self.facts['machine']
|
|
|
|
if self.facts['userspace_bits'] == '64':
|
|
|
|
self.facts['userspace_architecture'] = 'x86_64'
|
|
|
|
elif self.facts['userspace_bits'] == '32':
|
|
|
|
self.facts['userspace_architecture'] = 'i386'
|
|
|
|
elif Facts._I386RE.search(self.facts['machine']):
|
|
|
|
self.facts['architecture'] = 'i386'
|
|
|
|
if self.facts['userspace_bits'] == '64':
|
|
|
|
self.facts['userspace_architecture'] = 'x86_64'
|
|
|
|
elif self.facts['userspace_bits'] == '32':
|
|
|
|
self.facts['userspace_architecture'] = 'i386'
|
|
|
|
else:
|
|
|
|
self.facts['architecture'] = self.facts['machine']
|
2016-04-27 07:15:01 +00:00
|
|
|
if self.facts['system'] == 'AIX':
|
2015-07-31 11:25:42 +00:00
|
|
|
# Attempt to use getconf to figure out architecture
|
2014-09-30 18:50:01 +00:00
|
|
|
# fall back to bootinfo if needed
|
2016-03-14 16:45:28 +00:00
|
|
|
getconf_bin = self.module.get_bin_path('getconf')
|
|
|
|
if getconf_bin:
|
|
|
|
rc, out, err = self.module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
|
2014-09-30 18:50:01 +00:00
|
|
|
data = out.split('\n')
|
|
|
|
self.facts['architecture'] = data[0]
|
|
|
|
else:
|
2016-03-14 16:45:28 +00:00
|
|
|
bootinfo_bin = self.module.get_bin_path('bootinfo')
|
|
|
|
rc, out, err = self.module.run_command([bootinfo_bin, '-p'])
|
2015-04-08 07:30:21 +00:00
|
|
|
data = out.split('\n')
|
|
|
|
self.facts['architecture'] = data[0]
|
2014-10-30 13:25:50 +00:00
|
|
|
elif self.facts['system'] == 'OpenBSD':
|
|
|
|
self.facts['architecture'] = platform.uname()[5]
|
2016-04-27 07:29:39 +00:00
|
|
|
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
|
|
|
|
if machine_id:
|
|
|
|
machine_id = machine_id.split('\n')[0]
|
|
|
|
self.facts["machine_id"] = machine_id
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_local_facts(self):
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
fact_path = self.module.params.get('fact_path', None)
|
2014-03-16 20:02:37 +00:00
|
|
|
if not fact_path or not os.path.exists(fact_path):
|
|
|
|
return
|
|
|
|
|
|
|
|
local = {}
|
|
|
|
for fn in sorted(glob.glob(fact_path + '/*.fact')):
|
|
|
|
# where it will sit under local facts
|
|
|
|
fact_base = os.path.basename(fn).replace('.fact','')
|
2014-05-26 02:55:32 +00:00
|
|
|
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
|
2014-03-16 20:02:37 +00:00
|
|
|
# run it
|
|
|
|
# try to read it as json first
|
|
|
|
# if that fails read it with ConfigParser
|
|
|
|
# if that fails, skip it
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(fn)
|
2016-08-18 13:36:03 +00:00
|
|
|
try:
|
|
|
|
out = out.decode('utf-8', 'strict')
|
|
|
|
except UnicodeError:
|
|
|
|
fact = 'error loading fact - output of running %s was not utf-8' % fn
|
|
|
|
local[fact_base] = fact
|
|
|
|
self.facts['local'] = local
|
|
|
|
return
|
2014-03-16 20:02:37 +00:00
|
|
|
else:
|
2015-02-11 22:52:41 +00:00
|
|
|
out = get_file_content(fn, default='')
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
# load raw json
|
|
|
|
fact = 'loading %s' % fact_base
|
|
|
|
try:
|
|
|
|
fact = json.loads(out)
|
2015-11-03 17:51:21 +00:00
|
|
|
except ValueError:
|
2014-03-16 20:02:37 +00:00
|
|
|
# load raw ini
|
2016-04-27 07:15:01 +00:00
|
|
|
cp = configparser.ConfigParser()
|
2014-03-16 20:02:37 +00:00
|
|
|
try:
|
2016-03-02 13:05:43 +00:00
|
|
|
cp.readfp(StringIO(out))
|
2016-04-27 07:15:01 +00:00
|
|
|
except configparser.Error:
|
2015-11-03 17:51:21 +00:00
|
|
|
fact = "error loading fact - please check content"
|
2014-03-16 20:02:37 +00:00
|
|
|
else:
|
|
|
|
fact = {}
|
|
|
|
for sect in cp.sections():
|
|
|
|
if sect not in fact:
|
|
|
|
fact[sect] = {}
|
|
|
|
for opt in cp.options(sect):
|
|
|
|
val = cp.get(sect, opt)
|
|
|
|
fact[sect][opt]=val
|
|
|
|
|
|
|
|
local[fact_base] = fact
|
|
|
|
if not local:
|
|
|
|
return
|
|
|
|
self.facts['local'] = local
|
|
|
|
|
|
|
|
def get_cmdline(self):
|
|
|
|
data = get_file_content('/proc/cmdline')
|
|
|
|
if data:
|
|
|
|
self.facts['cmdline'] = {}
|
2014-07-30 19:47:50 +00:00
|
|
|
try:
|
|
|
|
for piece in shlex.split(data):
|
|
|
|
item = piece.split('=', 1)
|
|
|
|
if len(item) == 1:
|
|
|
|
self.facts['cmdline'][item[0]] = True
|
|
|
|
else:
|
|
|
|
self.facts['cmdline'][item[0]] = item[1]
|
2015-11-03 17:51:21 +00:00
|
|
|
except ValueError:
|
2014-07-30 19:47:50 +00:00
|
|
|
pass
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_public_ssh_host_keys(self):
|
2015-05-15 20:36:13 +00:00
|
|
|
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-04-25 16:15:35 +00:00
|
|
|
# list of directories to check for ssh keys
|
|
|
|
# used in the order listed here, the first one with keys is used
|
|
|
|
keydirs = ['/etc/ssh', '/etc/openssh', '/etc']
|
2015-05-15 20:36:13 +00:00
|
|
|
|
2016-04-25 16:15:35 +00:00
|
|
|
for keydir in keydirs:
|
|
|
|
for type_ in keytypes:
|
2015-05-15 20:36:13 +00:00
|
|
|
factname = 'ssh_host_key_%s_public' % type_
|
2016-04-25 16:15:35 +00:00
|
|
|
if factname in self.facts:
|
|
|
|
# a previous keydir was already successful, stop looking
|
|
|
|
# for keys
|
|
|
|
return
|
|
|
|
key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
|
|
|
|
keydata = get_file_content(key_filename)
|
|
|
|
if keydata is not None:
|
|
|
|
self.facts[factname] = keydata.split()[1]
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_pkg_mgr_facts(self):
|
|
|
|
if self.facts['system'] == 'OpenBSD':
|
|
|
|
self.facts['pkg_mgr'] = 'openbsd_pkg'
|
2016-06-03 13:18:20 +00:00
|
|
|
else:
|
|
|
|
self.facts['pkg_mgr'] = 'unknown'
|
|
|
|
for pkg in Facts.PKG_MGRS:
|
|
|
|
if os.path.exists(pkg['path']):
|
|
|
|
self.facts['pkg_mgr'] = pkg['name']
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2015-08-30 17:04:30 +00:00
|
|
|
def get_service_mgr_facts(self):
|
2016-01-15 15:25:56 +00:00
|
|
|
#TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, runit, etc
|
2015-08-30 17:04:30 +00:00
|
|
|
# also other OSs other than linux might need to check across several possible candidates
|
|
|
|
|
|
|
|
# try various forms of querying pid 1
|
2016-01-20 19:08:16 +00:00
|
|
|
proc_1 = get_file_content('/proc/1/comm')
|
2015-08-30 17:04:30 +00:00
|
|
|
if proc_1 is None:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, proc_1, err = self.module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
|
2016-01-20 19:08:16 +00:00
|
|
|
else:
|
|
|
|
proc_1 = os.path.basename(proc_1)
|
2015-08-30 17:04:30 +00:00
|
|
|
|
2016-03-08 13:42:42 +00:00
|
|
|
if proc_1 is not None:
|
|
|
|
proc_1 = proc_1.strip()
|
|
|
|
|
2016-01-15 15:25:56 +00:00
|
|
|
if proc_1 == 'init' or proc_1.endswith('sh'):
|
|
|
|
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
|
2015-08-30 17:04:30 +00:00
|
|
|
proc_1 = None
|
|
|
|
|
|
|
|
# if not init/None it should be an identifiable or custom init, so we are done!
|
|
|
|
if proc_1 is not None:
|
2016-03-08 13:42:42 +00:00
|
|
|
self.facts['service_mgr'] = proc_1
|
2015-08-30 17:04:30 +00:00
|
|
|
|
|
|
|
# start with the easy ones
|
|
|
|
elif self.facts['distribution'] == 'MacOSX':
|
|
|
|
#FIXME: find way to query executable, version matching is not ideal
|
|
|
|
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
|
|
|
|
self.facts['service_mgr'] = 'launchd'
|
|
|
|
else:
|
|
|
|
self.facts['service_mgr'] = 'systemstarter'
|
2016-01-15 15:25:56 +00:00
|
|
|
elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']:
|
2015-11-03 16:43:50 +00:00
|
|
|
#FIXME: we might want to break out to individual BSDs
|
|
|
|
self.facts['service_mgr'] = 'bsdinit'
|
2015-08-30 17:04:30 +00:00
|
|
|
elif self.facts['system'] == 'AIX':
|
|
|
|
self.facts['service_mgr'] = 'src'
|
|
|
|
elif self.facts['system'] == 'SunOS':
|
|
|
|
#FIXME: smf?
|
|
|
|
self.facts['service_mgr'] = 'svcs'
|
|
|
|
elif self.facts['system'] == 'Linux':
|
2016-05-23 19:08:56 +00:00
|
|
|
if self.is_systemd_managed():
|
2015-08-30 17:04:30 +00:00
|
|
|
self.facts['service_mgr'] = 'systemd'
|
2016-03-14 16:45:28 +00:00
|
|
|
elif self.module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
|
2015-08-30 17:04:30 +00:00
|
|
|
self.facts['service_mgr'] = 'upstart'
|
2016-01-15 15:25:56 +00:00
|
|
|
elif os.path.realpath('/sbin/rc') == '/sbin/openrc':
|
2015-08-30 17:04:30 +00:00
|
|
|
self.facts['service_mgr'] = 'openrc'
|
|
|
|
elif os.path.exists('/etc/init.d/'):
|
|
|
|
self.facts['service_mgr'] = 'sysvinit'
|
|
|
|
|
|
|
|
if not self.facts.get('service_mgr', False):
|
|
|
|
# if we cannot detect, fallback to generic 'service'
|
|
|
|
self.facts['service_mgr'] = 'service'
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
def get_lsb_facts(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
lsb_path = self.module.get_bin_path('lsb_release')
|
2014-03-16 20:02:37 +00:00
|
|
|
if lsb_path:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([lsb_path, "-a"])
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0:
|
2016-08-18 13:36:03 +00:00
|
|
|
out = out.decode('utf-8', 'replace')
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['lsb'] = {}
|
2016-03-16 23:59:44 +00:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if len(line) < 1 or ':' not in line:
|
|
|
|
continue
|
|
|
|
value = line.split(':', 1)[1].strip()
|
|
|
|
if 'LSB Version:' in line:
|
|
|
|
self.facts['lsb']['release'] = value
|
|
|
|
elif 'Distributor ID:' in line:
|
|
|
|
self.facts['lsb']['id'] = value
|
|
|
|
elif 'Description:' in line:
|
|
|
|
self.facts['lsb']['description'] = value
|
|
|
|
elif 'Release:' in line:
|
|
|
|
self.facts['lsb']['release'] = value
|
|
|
|
elif 'Codename:' in line:
|
|
|
|
self.facts['lsb']['codename'] = value
|
2014-03-16 20:02:37 +00:00
|
|
|
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
|
|
|
|
self.facts['lsb'] = {}
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines('/etc/lsb-release'):
|
|
|
|
value = line.split('=',1)[1].strip()
|
|
|
|
if 'DISTRIB_ID' in line:
|
|
|
|
self.facts['lsb']['id'] = value
|
|
|
|
elif 'DISTRIB_RELEASE' in line:
|
|
|
|
self.facts['lsb']['release'] = value
|
|
|
|
elif 'DISTRIB_DESCRIPTION' in line:
|
|
|
|
self.facts['lsb']['description'] = value
|
|
|
|
elif 'DISTRIB_CODENAME' in line:
|
|
|
|
self.facts['lsb']['codename'] = value
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
|
|
|
|
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
|
|
|
|
|
|
|
|
def get_selinux_facts(self):
|
|
|
|
if not HAVE_SELINUX:
|
|
|
|
self.facts['selinux'] = False
|
|
|
|
return
|
|
|
|
self.facts['selinux'] = {}
|
|
|
|
if not selinux.is_selinux_enabled():
|
|
|
|
self.facts['selinux']['status'] = 'disabled'
|
|
|
|
else:
|
|
|
|
self.facts['selinux']['status'] = 'enabled'
|
|
|
|
try:
|
|
|
|
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
|
2015-11-03 17:51:21 +00:00
|
|
|
except OSError:
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['selinux']['policyvers'] = 'unknown'
|
|
|
|
try:
|
|
|
|
(rc, configmode) = selinux.selinux_getenforcemode()
|
|
|
|
if rc == 0:
|
|
|
|
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
|
|
|
|
else:
|
|
|
|
self.facts['selinux']['config_mode'] = 'unknown'
|
2015-11-03 17:51:21 +00:00
|
|
|
except OSError:
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['selinux']['config_mode'] = 'unknown'
|
|
|
|
try:
|
|
|
|
mode = selinux.security_getenforce()
|
|
|
|
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
|
2015-11-03 17:51:21 +00:00
|
|
|
except OSError:
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['selinux']['mode'] = 'unknown'
|
|
|
|
try:
|
|
|
|
(rc, policytype) = selinux.selinux_getpolicytype()
|
|
|
|
if rc == 0:
|
|
|
|
self.facts['selinux']['type'] = policytype
|
|
|
|
else:
|
|
|
|
self.facts['selinux']['type'] = 'unknown'
|
2015-11-03 17:51:21 +00:00
|
|
|
except OSError:
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['selinux']['type'] = 'unknown'
|
|
|
|
|
2016-04-13 18:13:45 +00:00
|
|
|
def get_caps_facts(self):
|
2016-04-13 18:30:46 +00:00
|
|
|
capsh_path = self.module.get_bin_path('capsh')
|
2016-04-13 18:13:45 +00:00
|
|
|
if capsh_path:
|
2016-04-13 18:30:46 +00:00
|
|
|
rc, out, err = self.module.run_command([capsh_path, "--print"])
|
2016-08-18 13:36:03 +00:00
|
|
|
out = out.decode('utf-8', 'replace')
|
2016-04-13 18:13:45 +00:00
|
|
|
enforced_caps = []
|
|
|
|
enforced = 'NA'
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if len(line) < 1:
|
|
|
|
continue
|
|
|
|
if line.startswith('Current:'):
|
|
|
|
if line.split(':')[1].strip() == '=ep':
|
|
|
|
enforced = 'False'
|
|
|
|
else:
|
|
|
|
enforced = 'True'
|
|
|
|
enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
|
|
|
|
|
|
|
|
self.facts['system_capabilities_enforced'] = enforced
|
|
|
|
self.facts['system_capabilities'] = enforced_caps
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2014-11-12 17:28:27 +00:00
|
|
|
def get_fips_facts(self):
|
|
|
|
self.facts['fips'] = False
|
|
|
|
data = get_file_content('/proc/sys/crypto/fips_enabled')
|
|
|
|
if data and data == '1':
|
|
|
|
self.facts['fips'] = True
|
|
|
|
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
def get_date_time_facts(self):
|
|
|
|
self.facts['date_time'] = {}
|
|
|
|
|
|
|
|
now = datetime.datetime.now()
|
|
|
|
self.facts['date_time']['year'] = now.strftime('%Y')
|
|
|
|
self.facts['date_time']['month'] = now.strftime('%m')
|
2014-03-24 17:43:00 +00:00
|
|
|
self.facts['date_time']['weekday'] = now.strftime('%A')
|
2015-09-24 13:05:44 +00:00
|
|
|
self.facts['date_time']['weekday_number'] = now.strftime('%w')
|
|
|
|
self.facts['date_time']['weeknumber'] = now.strftime('%W')
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['date_time']['day'] = now.strftime('%d')
|
|
|
|
self.facts['date_time']['hour'] = now.strftime('%H')
|
|
|
|
self.facts['date_time']['minute'] = now.strftime('%M')
|
|
|
|
self.facts['date_time']['second'] = now.strftime('%S')
|
|
|
|
self.facts['date_time']['epoch'] = now.strftime('%s')
|
|
|
|
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
|
|
|
|
self.facts['date_time']['epoch'] = str(int(time.time()))
|
|
|
|
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
|
|
|
|
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
|
|
|
|
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
|
|
|
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
|
2015-07-05 16:23:22 +00:00
|
|
|
self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
|
|
|
|
self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['date_time']['tz'] = time.strftime("%Z")
|
|
|
|
self.facts['date_time']['tz_offset'] = time.strftime("%z")
|
|
|
|
|
2016-05-23 19:08:56 +00:00
|
|
|
def is_systemd_managed(self):
|
2015-11-02 20:23:23 +00:00
|
|
|
# tools must be installed
|
2016-03-14 16:45:28 +00:00
|
|
|
if self.module.get_bin_path('systemctl'):
|
2015-11-02 20:23:23 +00:00
|
|
|
|
2015-11-03 16:43:50 +00:00
|
|
|
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
|
2015-11-02 20:23:23 +00:00
|
|
|
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
|
|
|
|
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
|
|
|
|
if os.path.exists(canary):
|
|
|
|
return True
|
|
|
|
return False
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
# User
|
|
|
|
def get_user_facts(self):
|
|
|
|
self.facts['user_id'] = getpass.getuser()
|
2014-09-24 21:05:31 +00:00
|
|
|
pwent = pwd.getpwnam(getpass.getuser())
|
|
|
|
self.facts['user_uid'] = pwent.pw_uid
|
|
|
|
self.facts['user_gid'] = pwent.pw_gid
|
|
|
|
self.facts['user_gecos'] = pwent.pw_gecos
|
|
|
|
self.facts['user_dir'] = pwent.pw_dir
|
|
|
|
self.facts['user_shell'] = pwent.pw_shell
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_env_facts(self):
|
|
|
|
self.facts['env'] = {}
|
2016-04-27 07:15:01 +00:00
|
|
|
for k,v in iteritems(os.environ):
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['env'][k] = v
|
|
|
|
|
2014-09-29 00:24:16 +00:00
|
|
|
def get_dns_facts(self):
|
|
|
|
self.facts['dns'] = {}
|
2015-11-09 20:02:03 +00:00
|
|
|
for line in get_file_content('/etc/resolv.conf', '').splitlines():
|
2014-09-29 00:24:16 +00:00
|
|
|
if line.startswith('#') or line.startswith(';') or line.strip() == '':
|
|
|
|
continue
|
|
|
|
tokens = line.split()
|
|
|
|
if len(tokens) == 0:
|
|
|
|
continue
|
|
|
|
if tokens[0] == 'nameserver':
|
2016-03-04 16:34:01 +00:00
|
|
|
if not 'nameservers' in self.facts['dns']:
|
|
|
|
self.facts['dns']['nameservers'] = []
|
2014-09-29 00:24:16 +00:00
|
|
|
for nameserver in tokens[1:]:
|
|
|
|
self.facts['dns']['nameservers'].append(nameserver)
|
|
|
|
elif tokens[0] == 'domain':
|
2016-03-15 03:13:23 +00:00
|
|
|
if len(tokens) > 1:
|
|
|
|
self.facts['dns']['domain'] = tokens[1]
|
2014-09-29 00:24:16 +00:00
|
|
|
elif tokens[0] == 'search':
|
|
|
|
self.facts['dns']['search'] = []
|
|
|
|
for suffix in tokens[1:]:
|
|
|
|
self.facts['dns']['search'].append(suffix)
|
|
|
|
elif tokens[0] == 'sortlist':
|
|
|
|
self.facts['dns']['sortlist'] = []
|
|
|
|
for address in tokens[1:]:
|
|
|
|
self.facts['dns']['sortlist'].append(address)
|
|
|
|
elif tokens[0] == 'options':
|
|
|
|
self.facts['dns']['options'] = {}
|
2016-03-15 03:13:23 +00:00
|
|
|
if len(tokens) > 1:
|
|
|
|
for option in tokens[1:]:
|
|
|
|
option_tokens = option.split(':', 1)
|
|
|
|
if len(option_tokens) == 0:
|
|
|
|
continue
|
|
|
|
val = len(option_tokens) == 2 and option_tokens[1] or True
|
|
|
|
self.facts['dns']['options'][option_tokens[0]] = val
|
2014-09-29 00:24:16 +00:00
|
|
|
|
2016-02-20 19:33:53 +00:00
|
|
|
def _get_mount_size_facts(self, mountpoint):
|
|
|
|
size_total = None
|
|
|
|
size_available = None
|
|
|
|
try:
|
|
|
|
statvfs_result = os.statvfs(mountpoint)
|
|
|
|
size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
|
|
|
|
size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
return size_total, size_available
|
|
|
|
|
2016-02-19 18:59:58 +00:00
|
|
|
def get_python_facts(self):
|
|
|
|
self.facts['python'] = {
|
|
|
|
'version': {
|
|
|
|
'major': sys.version_info[0],
|
|
|
|
'minor': sys.version_info[1],
|
|
|
|
'micro': sys.version_info[2],
|
|
|
|
'releaselevel': sys.version_info[3],
|
|
|
|
'serial': sys.version_info[4]
|
|
|
|
},
|
|
|
|
'version_info': list(sys.version_info),
|
|
|
|
'executable': sys.executable,
|
2016-02-23 17:28:04 +00:00
|
|
|
'has_sslcontext': HAS_SSLCONTEXT
|
2016-02-19 18:59:58 +00:00
|
|
|
}
|
2016-02-29 15:05:48 +00:00
|
|
|
try:
|
|
|
|
self.facts['python']['type'] = sys.subversion[0]
|
|
|
|
except AttributeError:
|
|
|
|
self.facts['python']['type'] = None
|
2016-02-19 18:59:58 +00:00
|
|
|
|
|
|
|
|
2016-04-27 07:15:01 +00:00
|
|
|
class Distribution(object):
|
|
|
|
"""
|
|
|
|
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
|
|
|
|
|
|
|
|
To do so it checks the existance and content of typical files in /etc containing distribution information
|
|
|
|
|
|
|
|
This is unit tested. Please extend the tests to cover all distributions if you have them available.
|
|
|
|
"""
|
|
|
|
|
2016-05-04 19:32:08 +00:00
|
|
|
# every distribution name mentioned here, must have one of
|
|
|
|
# - allowempty == True
|
|
|
|
# - be listed in SEARCH_STRING
|
|
|
|
# - have a function get_distribution_DISTNAME implemented
|
2016-04-27 07:15:01 +00:00
|
|
|
OSDIST_LIST = (
|
2016-04-29 20:18:50 +00:00
|
|
|
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
|
|
|
|
{'path': '/etc/slackware-version', 'name': 'Slackware'},
|
|
|
|
{'path': '/etc/redhat-release', 'name': 'RedHat'},
|
|
|
|
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
|
|
|
|
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
|
|
|
|
{'path': '/etc/system-release', 'name': 'Amazon'},
|
|
|
|
{'path': '/etc/alpine-release', 'name': 'Alpine'},
|
|
|
|
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
|
|
|
|
{'path': '/etc/os-release', 'name': 'SuSE'},
|
|
|
|
{'path': '/etc/SuSE-release', 'name': 'SuSE'},
|
|
|
|
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
|
|
|
|
{'path': '/etc/os-release', 'name': 'Debian'},
|
|
|
|
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
|
|
|
|
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
|
|
|
|
{'path': '/etc/os-release', 'name': 'NA'},
|
|
|
|
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
|
2016-04-27 07:15:01 +00:00
|
|
|
)
|
|
|
|
|
2016-04-29 20:18:50 +00:00
|
|
|
SEARCH_STRING = {
|
|
|
|
'OracleLinux': 'Oracle Linux',
|
|
|
|
'RedHat': 'Red Hat',
|
|
|
|
'Altlinux': 'ALT Linux',
|
|
|
|
}
|
|
|
|
|
2016-04-27 07:15:01 +00:00
|
|
|
# A list with OS Family members
|
|
|
|
OS_FAMILY = dict(
|
|
|
|
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
|
|
|
|
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
|
|
|
|
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
|
|
|
|
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
|
|
|
|
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
|
|
|
|
Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux',
|
|
|
|
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
|
|
|
|
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
|
|
|
|
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse'
|
|
|
|
)
|
|
|
|
|
2016-05-09 13:59:26 +00:00
|
|
|
def __init__(self, module):
|
2016-04-27 07:15:01 +00:00
|
|
|
self.system = platform.system()
|
|
|
|
self.facts = {}
|
2016-05-09 13:59:26 +00:00
|
|
|
self.module = module
|
2016-04-27 07:15:01 +00:00
|
|
|
|
|
|
|
def populate(self):
|
2016-05-09 14:55:28 +00:00
|
|
|
self.get_distribution_facts()
|
2016-04-27 07:15:01 +00:00
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_distribution_facts(self):
|
|
|
|
# The platform module provides information about the running
|
|
|
|
# system/distribution. Use this as a baseline and fix buggy systems
|
|
|
|
# afterwards
|
2016-05-13 20:57:17 +00:00
|
|
|
self.facts['distribution'] = self.system
|
2016-04-27 07:15:01 +00:00
|
|
|
self.facts['distribution_release'] = platform.release()
|
|
|
|
self.facts['distribution_version'] = platform.version()
|
2016-06-07 19:12:37 +00:00
|
|
|
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS')
|
2016-04-27 07:15:01 +00:00
|
|
|
|
2016-05-16 12:02:38 +00:00
|
|
|
self.facts['distribution'] = self.system
|
2016-04-27 07:15:01 +00:00
|
|
|
|
2016-05-16 12:02:38 +00:00
|
|
|
if self.system in systems_implemented:
|
2016-04-27 07:15:01 +00:00
|
|
|
cleanedname = self.system.replace('-','')
|
|
|
|
distfunc = getattr(self, 'get_distribution_'+cleanedname)
|
|
|
|
distfunc()
|
2016-05-09 14:55:28 +00:00
|
|
|
elif self.system == 'Linux':
|
2016-04-27 07:15:01 +00:00
|
|
|
# try to find out which linux distribution this is
|
|
|
|
dist = platform.dist()
|
|
|
|
self.facts['distribution'] = dist[0].capitalize() or 'NA'
|
|
|
|
self.facts['distribution_version'] = dist[1] or 'NA'
|
|
|
|
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
|
|
|
|
self.facts['distribution_release'] = dist[2] or 'NA'
|
|
|
|
# Try to handle the exceptions now ...
|
|
|
|
# self.facts['distribution_debug'] = []
|
2016-04-29 20:18:50 +00:00
|
|
|
for ddict in self.OSDIST_LIST:
|
|
|
|
name = ddict['name']
|
|
|
|
path = ddict['path']
|
2016-04-27 07:15:01 +00:00
|
|
|
|
|
|
|
if not os.path.exists(path):
|
|
|
|
continue
|
2016-05-04 19:32:08 +00:00
|
|
|
# if allowempty is set, we only check for file existance but not content
|
|
|
|
if 'allowempty' in ddict and ddict['allowempty']:
|
|
|
|
self.facts['distribution'] = name
|
|
|
|
break
|
2016-04-27 07:15:01 +00:00
|
|
|
if os.path.getsize(path) == 0:
|
2016-05-04 19:32:08 +00:00
|
|
|
continue
|
2016-04-27 07:15:01 +00:00
|
|
|
|
2016-04-29 20:18:50 +00:00
|
|
|
data = get_file_content(path)
|
|
|
|
if name in self.SEARCH_STRING:
|
|
|
|
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
|
|
|
|
# only the distribution name is set, the version is assumed to be correct from platform.dist()
|
|
|
|
if self.SEARCH_STRING[name] in data:
|
|
|
|
# this sets distribution=RedHat if 'Red Hat' shows up in data
|
|
|
|
self.facts['distribution'] = name
|
|
|
|
else:
|
|
|
|
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
|
|
|
|
self.facts['distribution'] = data.split()[0]
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
# call a dedicated function for parsing the file content
|
2016-05-04 19:32:08 +00:00
|
|
|
try:
|
|
|
|
distfunc = getattr(self, 'get_distribution_' + name)
|
|
|
|
parsed = distfunc(name, data, path)
|
|
|
|
if parsed is None or parsed:
|
|
|
|
# distfunc return False if parsing failed
|
|
|
|
# break only if parsing was succesful
|
|
|
|
# otherwise continue with other distributions
|
|
|
|
break
|
|
|
|
except AttributeError:
|
|
|
|
# this should never happen, but if it does fail quitely and not with a traceback
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2016-04-29 20:18:50 +00:00
|
|
|
|
|
|
|
# to debug multiple matching release files, one can use:
|
|
|
|
# self.facts['distribution_debug'].append({path + ' ' + name:
|
|
|
|
# (parsed,
|
|
|
|
# self.facts['distribution'],
|
|
|
|
# self.facts['distribution_version'],
|
|
|
|
# self.facts['distribution_release'],
|
|
|
|
# )})
|
2016-04-27 07:15:01 +00:00
|
|
|
|
|
|
|
self.facts['os_family'] = self.facts['distribution']
|
|
|
|
distro = self.facts['distribution'].replace(' ', '_')
|
|
|
|
if distro in self.OS_FAMILY:
|
|
|
|
self.facts['os_family'] = self.OS_FAMILY[distro]
|
|
|
|
|
|
|
|
def get_distribution_AIX(self):
|
|
|
|
rc, out, err = self.module.run_command("/usr/bin/oslevel")
|
|
|
|
data = out.split('.')
|
|
|
|
self.facts['distribution_version'] = data[0]
|
|
|
|
self.facts['distribution_release'] = data[1]
|
|
|
|
|
|
|
|
def get_distribution_HPUX(self):
|
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
|
|
|
|
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
|
|
|
|
if data:
|
|
|
|
self.facts['distribution_version'] = data.groups()[0]
|
|
|
|
self.facts['distribution_release'] = data.groups()[1]
|
|
|
|
|
|
|
|
def get_distribution_Darwin(self):
|
|
|
|
self.facts['distribution'] = 'MacOSX'
|
|
|
|
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
|
|
|
|
data = out.split()[-1]
|
|
|
|
self.facts['distribution_version'] = data
|
|
|
|
|
2016-06-14 21:58:23 +00:00
|
|
|
def get_distribution_FreeBSD(self):
|
|
|
|
self.facts['distribution_release'] = platform.release()
|
|
|
|
data = re.search('(\d+)\.(\d+)-RELEASE.*', self.facts['distribution_release'])
|
|
|
|
if data:
|
|
|
|
self.facts['distribution_major_version'] = data.group(1)
|
|
|
|
self.facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
|
|
|
|
|
2016-04-27 07:15:01 +00:00
|
|
|
def get_distribution_OpenBSD(self):
|
|
|
|
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
|
|
|
|
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
|
|
|
|
if match:
|
|
|
|
self.facts['distribution_version'] = match.groups()[0]
|
|
|
|
else:
|
|
|
|
self.facts['distribution_version'] = 'release'
|
|
|
|
|
|
|
|
def get_distribution_Slackware(self, name, data, path):
|
2016-04-29 20:18:50 +00:00
|
|
|
if 'Slackware' not in data:
|
|
|
|
return False # TODO: remove
|
|
|
|
self.facts['distribution'] = name
|
|
|
|
version = re.findall('\w+[.]\w+', data)
|
|
|
|
if version:
|
|
|
|
self.facts['distribution_version'] = version[0]
|
|
|
|
|
|
|
|
def get_distribution_Amazon(self, name, data, path):
|
|
|
|
if 'Amazon' not in data:
|
|
|
|
return False # TODO: remove
|
|
|
|
self.facts['distribution'] = 'Amazon'
|
|
|
|
self.facts['distribution_version'] = data.split()[-1]
|
2016-04-27 07:15:01 +00:00
|
|
|
|
|
|
|
def get_distribution_OpenWrt(self, name, data, path):
|
2016-04-29 20:18:50 +00:00
|
|
|
if 'OpenWrt' not in data:
|
|
|
|
return False # TODO: remove
|
|
|
|
self.facts['distribution'] = name
|
|
|
|
version = re.search('DISTRIB_RELEASE="(.*)"', data)
|
|
|
|
if version:
|
|
|
|
self.facts['distribution_version'] = version.groups()[0]
|
|
|
|
release = re.search('DISTRIB_CODENAME="(.*)"', data)
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.groups()[0]
|
2016-04-27 07:15:01 +00:00
|
|
|
|
|
|
|
def get_distribution_Alpine(self, name, data, path):
|
|
|
|
self.facts['distribution'] = 'Alpine'
|
|
|
|
self.facts['distribution_version'] = data
|
|
|
|
|
2016-06-07 19:12:37 +00:00
|
|
|
def get_distribution_SunOS(self):
|
|
|
|
data = get_file_content('/etc/release').split('\n')[0]
|
2016-04-27 07:15:01 +00:00
|
|
|
if 'Solaris' in data:
|
|
|
|
ora_prefix = ''
|
|
|
|
if 'Oracle Solaris' in data:
|
|
|
|
data = data.replace('Oracle ','')
|
|
|
|
ora_prefix = 'Oracle '
|
|
|
|
self.facts['distribution'] = data.split()[0]
|
|
|
|
self.facts['distribution_version'] = data.split()[1]
|
|
|
|
self.facts['distribution_release'] = ora_prefix + data
|
|
|
|
return
|
|
|
|
|
2016-06-07 19:12:37 +00:00
|
|
|
uname_v = get_uname_version(self.module)
|
2016-04-27 07:15:01 +00:00
|
|
|
distribution_version = None
|
|
|
|
if 'SmartOS' in data:
|
|
|
|
self.facts['distribution'] = 'SmartOS'
|
|
|
|
if os.path.exists('/etc/product'):
|
|
|
|
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l])
|
|
|
|
if 'Image' in product_data:
|
|
|
|
distribution_version = product_data.get('Image').split()[-1]
|
|
|
|
elif 'OpenIndiana' in data:
|
|
|
|
self.facts['distribution'] = 'OpenIndiana'
|
|
|
|
elif 'OmniOS' in data:
|
|
|
|
self.facts['distribution'] = 'OmniOS'
|
|
|
|
distribution_version = data.split()[-1]
|
2016-06-07 19:12:37 +00:00
|
|
|
elif uname_v is not None and 'NexentaOS_' in uname_v:
|
2016-04-27 07:15:01 +00:00
|
|
|
self.facts['distribution'] = 'Nexenta'
|
|
|
|
distribution_version = data.split()[-1].lstrip('v')
|
|
|
|
|
|
|
|
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
|
|
|
|
self.facts['distribution_release'] = data.strip()
|
|
|
|
if distribution_version is not None:
|
|
|
|
self.facts['distribution_version'] = distribution_version
|
2016-06-07 19:12:37 +00:00
|
|
|
elif uname_v is not None:
|
|
|
|
self.facts['distribution_version'] = uname_v.split('\n')[0].strip()
|
2016-04-27 07:15:01 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
return False # TODO: remove if tested without this
|
|
|
|
|
|
|
|
def get_distribution_SuSE(self, name, data, path):
|
|
|
|
if 'suse' not in data.lower():
|
|
|
|
return False # TODO: remove if tested without this
|
|
|
|
if path == '/etc/os-release':
|
|
|
|
for line in data.splitlines():
|
|
|
|
distribution = re.search("^NAME=(.*)", line)
|
|
|
|
if distribution:
|
|
|
|
self.facts['distribution'] = distribution.group(1).strip('"')
|
|
|
|
# example pattern are 13.04 13.0 13
|
|
|
|
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
|
|
|
|
if distribution_version:
|
|
|
|
self.facts['distribution_version'] = distribution_version.group(1)
|
|
|
|
if 'open' in data.lower():
|
|
|
|
release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.groups()[0]
|
|
|
|
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
|
|
|
|
# SLES doesn't got funny release names
|
|
|
|
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
|
|
|
|
if release.group(1):
|
|
|
|
release = release.group(1)
|
|
|
|
else:
|
|
|
|
release = "0" # no minor number, so it is the first release
|
|
|
|
self.facts['distribution_release'] = release
|
|
|
|
elif path == '/etc/SuSE-release':
|
|
|
|
if 'open' in data.lower():
|
|
|
|
data = data.splitlines()
|
|
|
|
distdata = get_file_content(path).split('\n')[0]
|
|
|
|
self.facts['distribution'] = distdata.split()[0]
|
|
|
|
for line in data:
|
|
|
|
release = re.search('CODENAME *= *([^\n]+)', line)
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.groups()[0].strip()
|
|
|
|
elif 'enterprise' in data.lower():
|
|
|
|
lines = data.splitlines()
|
|
|
|
distribution = lines[0].split()[0]
|
|
|
|
if "Server" in data:
|
|
|
|
self.facts['distribution'] = "SLES"
|
|
|
|
elif "Desktop" in data:
|
|
|
|
self.facts['distribution'] = "SLED"
|
|
|
|
for line in lines:
|
|
|
|
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.group(1)
|
|
|
|
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
|
|
|
|
|
|
|
|
def get_distribution_Debian(self, name, data, path):
|
|
|
|
if 'Debian' in data or 'Raspbian' in data:
|
|
|
|
self.facts['distribution'] = 'Debian'
|
|
|
|
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.groups()[0]
|
|
|
|
elif 'Ubuntu' in data:
|
|
|
|
self.facts['distribution'] = 'Ubuntu'
|
|
|
|
pass # Ubuntu gets correct info from python functions
|
|
|
|
else:
|
|
|
|
return False # TODO: remove if tested without this
|
|
|
|
|
|
|
|
def get_distribution_Mandriva(self, name, data, path):
|
|
|
|
if 'Mandriva' in data:
|
|
|
|
self.facts['distribution'] = 'Mandriva'
|
|
|
|
version = re.search('DISTRIB_RELEASE="(.*)"', data)
|
|
|
|
if version:
|
|
|
|
self.facts['distribution_version'] = version.groups()[0]
|
|
|
|
release = re.search('DISTRIB_CODENAME="(.*)"', data)
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.groups()[0]
|
|
|
|
self.facts['distribution'] = name
|
|
|
|
else:
|
|
|
|
return False
|
|
|
|
|
|
|
|
def get_distribution_NA(self, name, data, path):
|
|
|
|
for line in data.splitlines():
|
|
|
|
distribution = re.search("^NAME=(.*)", line)
|
|
|
|
if distribution and self.facts['distribution'] == 'NA':
|
|
|
|
self.facts['distribution'] = distribution.group(1).strip('"')
|
|
|
|
version = re.search("^VERSION=(.*)", line)
|
|
|
|
if version and self.facts['distribution_version'] == 'NA':
|
|
|
|
self.facts['distribution_version'] = version.group(1).strip('"')
|
|
|
|
|
|
|
|
def get_distribution_Coreos(self, name, data, path):
|
|
|
|
if self.facts['distribution'].lower() == 'coreos':
|
|
|
|
if not data:
|
|
|
|
# include fix from #15230, #15228
|
|
|
|
return
|
|
|
|
release = re.search("^GROUP=(.*)", data)
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.group(1).strip('"')
|
|
|
|
else:
|
|
|
|
return False # TODO: remove if tested without this
|
|
|
|
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
class Hardware(Facts):
|
|
|
|
"""
|
|
|
|
This is a generic Hardware subclass of Facts. This should be further
|
|
|
|
subclassed to implement per platform. If you subclass this, it
|
|
|
|
should define:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
|
|
|
|
All subclasses MUST define platform.
|
|
|
|
"""
|
|
|
|
platform = 'Generic'
|
|
|
|
|
|
|
|
def __new__(cls, *arguments, **keyword):
|
2016-08-11 17:26:17 +00:00
|
|
|
# When Hardware is created, it chooses a subclass to create instead.
|
|
|
|
# This check prevents the subclass from then trying to find a subclass
|
|
|
|
# and create that.
|
|
|
|
if cls is not Hardware:
|
|
|
|
return super(Hardware, cls).__new__(cls)
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
subclass = cls
|
2016-04-23 07:14:20 +00:00
|
|
|
for sc in get_all_subclasses(Hardware):
|
2014-03-16 20:02:37 +00:00
|
|
|
if sc.platform == platform.system():
|
|
|
|
subclass = sc
|
2016-08-18 00:58:51 +00:00
|
|
|
if PY3:
|
|
|
|
return super(cls, subclass).__new__(subclass)
|
|
|
|
else:
|
|
|
|
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
class LinuxHardware(Hardware):
|
|
|
|
"""
|
|
|
|
Linux-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
|
|
|
|
In addition, it also defines number of DMI facts and device facts.
|
|
|
|
"""
|
|
|
|
|
|
|
|
platform = 'Linux'
|
2014-12-10 16:43:37 +00:00
|
|
|
|
2015-01-29 20:09:19 +00:00
|
|
|
# Originally only had these four as toplevelfacts
|
|
|
|
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
|
|
|
|
# Now we have all of these in a dict structure
|
|
|
|
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-08-18 00:58:51 +00:00
|
|
|
# regex used against findmnt output to detect bind mounts
|
|
|
|
BIND_MOUNT_RE = re.compile(r'.*\]')
|
|
|
|
|
|
|
|
# regex used against mtab content to find entries that are bind mounts
|
|
|
|
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
self.get_dmi_facts()
|
|
|
|
self.get_device_facts()
|
2015-03-13 20:35:31 +00:00
|
|
|
self.get_uptime_facts()
|
Add LVM facts to setup module
This commit adds LinuxHardware.get_device_facts() and calls that from
.populate().
LVM facts are only gathered if the setup module is running as root and
the lvm utilities are available (tested by searching for 'vgs').
If the conditions are met, facts are set for each volume group and
logical volume.
Example:
Test LVM Data:
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
test 1 2 0 wz--n- 5.00g 2.00g
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
testlv test -wi-a---- 1.00g
testlv2 test -wi-a---- 2.00g
Facts Returned:
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {
"ansible_lvm": {
"lvs": {
"testlv": {
"size_g": "1.00",
"vg": "test"
},
"testlv2": {
"size_g": "2.00",
"vg": "test"
}
},
"vgs": {
"test": {
"free_g": "2.00",
"num_lvs": "2",
"num_pvs": "1",
"size_g": "5.00"
}
}
}
},
"changed": false
}
Test as non-root:
$ ansible localhost -i /tmp/inv-user -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
Test without lvm utilities available
$ sudo mv /sbin/vgs{,.bk}
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
2014-12-12 12:47:43 +00:00
|
|
|
self.get_lvm_facts()
|
2014-04-08 18:21:42 +00:00
|
|
|
try:
|
|
|
|
self.get_mount_facts()
|
|
|
|
except TimeoutError:
|
|
|
|
pass
|
2014-03-16 20:02:37 +00:00
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
if not os.access("/proc/meminfo", os.R_OK):
|
|
|
|
return
|
2015-01-29 20:09:19 +00:00
|
|
|
|
2015-01-30 04:56:41 +00:00
|
|
|
memstats = {}
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines("/proc/meminfo"):
|
2014-03-16 20:02:37 +00:00
|
|
|
data = line.split(":", 1)
|
|
|
|
key = data[0]
|
2015-01-29 20:09:19 +00:00
|
|
|
if key in self.ORIGINAL_MEMORY_FACTS:
|
2014-03-16 20:02:37 +00:00
|
|
|
val = data[1].strip().split(' ')[0]
|
|
|
|
self.facts["%s_mb" % key.lower()] = long(val) / 1024
|
2015-01-29 20:09:19 +00:00
|
|
|
|
|
|
|
if key in self.MEMORY_FACTS:
|
2014-12-10 16:43:37 +00:00
|
|
|
val = data[1].strip().split(' ')[0]
|
|
|
|
memstats[key.lower()] = long(val) / 1024
|
2015-01-29 20:09:19 +00:00
|
|
|
|
2015-01-30 04:56:41 +00:00
|
|
|
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
|
2015-01-29 20:09:19 +00:00
|
|
|
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
|
2015-01-30 04:56:41 +00:00
|
|
|
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
|
2015-01-29 20:09:19 +00:00
|
|
|
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
|
2015-01-30 04:56:41 +00:00
|
|
|
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
|
2015-01-29 20:09:19 +00:00
|
|
|
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
|
2015-01-30 04:56:41 +00:00
|
|
|
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
|
2015-01-29 20:09:19 +00:00
|
|
|
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
|
|
|
|
|
2014-12-10 16:43:37 +00:00
|
|
|
self.facts['memory_mb'] = {
|
|
|
|
'real' : {
|
2015-01-30 04:56:41 +00:00
|
|
|
'total': memstats.get('memtotal'),
|
|
|
|
'used': memstats.get('real:used'),
|
|
|
|
'free': memstats.get('memfree'),
|
2014-12-10 16:43:37 +00:00
|
|
|
},
|
|
|
|
'nocache' : {
|
2015-01-30 04:56:41 +00:00
|
|
|
'free': memstats.get('nocache:free'),
|
|
|
|
'used': memstats.get('nocache:used'),
|
2014-12-10 16:43:37 +00:00
|
|
|
},
|
|
|
|
'swap' : {
|
2015-01-30 04:56:41 +00:00
|
|
|
'total': memstats.get('swaptotal'),
|
|
|
|
'free': memstats.get('swapfree'),
|
|
|
|
'used': memstats.get('swap:used'),
|
|
|
|
'cached': memstats.get('swapcached'),
|
2015-01-29 20:09:19 +00:00
|
|
|
},
|
2014-12-10 16:43:37 +00:00
|
|
|
}
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
i = 0
|
2015-01-06 23:00:19 +00:00
|
|
|
vendor_id_occurrence = 0
|
|
|
|
model_name_occurrence = 0
|
2014-03-16 20:02:37 +00:00
|
|
|
physid = 0
|
|
|
|
coreid = 0
|
|
|
|
sockets = {}
|
|
|
|
cores = {}
|
2015-01-07 14:45:14 +00:00
|
|
|
|
|
|
|
xen = False
|
2015-01-06 23:00:19 +00:00
|
|
|
xen_paravirt = False
|
2015-01-07 14:45:14 +00:00
|
|
|
try:
|
2015-01-08 17:29:11 +00:00
|
|
|
if os.path.exists('/proc/xen'):
|
|
|
|
xen = True
|
2015-02-09 22:30:06 +00:00
|
|
|
else:
|
|
|
|
for line in get_file_lines('/sys/hypervisor/type'):
|
|
|
|
if line.strip() == 'xen':
|
|
|
|
xen = True
|
|
|
|
# Only interested in the first line
|
|
|
|
break
|
2015-01-07 14:45:14 +00:00
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
if not os.access("/proc/cpuinfo", os.R_OK):
|
|
|
|
return
|
|
|
|
self.facts['processor'] = []
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines('/proc/cpuinfo'):
|
2014-03-16 20:02:37 +00:00
|
|
|
data = line.split(":", 1)
|
|
|
|
key = data[0].strip()
|
2015-01-06 23:00:19 +00:00
|
|
|
|
2015-01-07 14:45:14 +00:00
|
|
|
if xen:
|
|
|
|
if key == 'flags':
|
|
|
|
# Check for vme cpu flag, Xen paravirt does not expose this.
|
|
|
|
# Need to detect Xen paravirt because it exposes cpuinfo
|
|
|
|
# differently than Xen HVM or KVM and causes reporting of
|
|
|
|
# only a single cpu core.
|
|
|
|
if 'vme' not in data:
|
|
|
|
xen_paravirt = True
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
# model name is for Intel arch, Processor (mind the uppercase P)
|
|
|
|
# works for some ARM devices, like the Sheevaplug.
|
2015-07-28 01:44:17 +00:00
|
|
|
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
|
2014-03-16 20:02:37 +00:00
|
|
|
if 'processor' not in self.facts:
|
|
|
|
self.facts['processor'] = []
|
|
|
|
self.facts['processor'].append(data[1].strip())
|
2015-01-06 23:00:19 +00:00
|
|
|
if key == 'vendor_id':
|
|
|
|
vendor_id_occurrence += 1
|
|
|
|
if key == 'model name':
|
|
|
|
model_name_occurrence += 1
|
2014-03-16 20:02:37 +00:00
|
|
|
i += 1
|
|
|
|
elif key == 'physical id':
|
|
|
|
physid = data[1].strip()
|
|
|
|
if physid not in sockets:
|
|
|
|
sockets[physid] = 1
|
|
|
|
elif key == 'core id':
|
|
|
|
coreid = data[1].strip()
|
|
|
|
if coreid not in sockets:
|
|
|
|
cores[coreid] = 1
|
|
|
|
elif key == 'cpu cores':
|
|
|
|
sockets[physid] = int(data[1].strip())
|
|
|
|
elif key == 'siblings':
|
|
|
|
cores[coreid] = int(data[1].strip())
|
2014-03-31 13:30:12 +00:00
|
|
|
elif key == '# processors':
|
|
|
|
self.facts['processor_cores'] = int(data[1].strip())
|
2015-01-06 23:00:19 +00:00
|
|
|
|
|
|
|
if vendor_id_occurrence == model_name_occurrence:
|
|
|
|
i = vendor_id_occurrence
|
|
|
|
|
2014-03-31 13:30:12 +00:00
|
|
|
if self.facts['architecture'] != 's390x':
|
2015-01-06 23:00:19 +00:00
|
|
|
if xen_paravirt:
|
|
|
|
self.facts['processor_count'] = i
|
|
|
|
self.facts['processor_cores'] = i
|
|
|
|
self.facts['processor_threads_per_core'] = 1
|
|
|
|
self.facts['processor_vcpus'] = i
|
|
|
|
else:
|
2016-08-18 13:36:03 +00:00
|
|
|
if sockets:
|
|
|
|
self.facts['processor_count'] = len(sockets)
|
|
|
|
else:
|
|
|
|
self.facts['processor_count'] = i
|
|
|
|
|
|
|
|
socket_values = list(sockets.values())
|
|
|
|
if socket_values:
|
|
|
|
self.facts['processor_cores'] = socket_values[0]
|
|
|
|
else:
|
|
|
|
self.facts['processor_cores'] = 1
|
|
|
|
|
|
|
|
core_values = list(cores.values())
|
|
|
|
if core_values:
|
|
|
|
self.facts['processor_threads_per_core'] = core_values[0] // self.facts['processor_cores']
|
|
|
|
else:
|
|
|
|
self.facts['processor_threads_per_core'] = 1 // self.facts['processor_cores']
|
|
|
|
|
2015-01-06 23:00:19 +00:00
|
|
|
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
|
|
|
|
self.facts['processor_count'] * self.facts['processor_cores'])
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_dmi_facts(self):
|
|
|
|
''' learn dmi facts from system
|
|
|
|
|
|
|
|
Try /sys first for dmi related facts.
|
|
|
|
If that is not available, fall back to dmidecode executable '''
|
|
|
|
|
|
|
|
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
|
|
|
|
# Use kernel DMI info, if available
|
|
|
|
|
|
|
|
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
|
|
|
|
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
|
|
|
|
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
|
|
|
|
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
|
|
|
|
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
|
|
|
|
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
|
|
|
|
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
|
|
|
|
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
|
|
|
|
"CompactPCI", "AdvancedTCA", "Blade" ]
|
|
|
|
|
|
|
|
DMI_DICT = {
|
|
|
|
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
|
|
|
|
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
|
|
|
|
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
|
|
|
|
'product_name': '/sys/devices/virtual/dmi/id/product_name',
|
|
|
|
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
|
|
|
|
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
|
|
|
|
'product_version': '/sys/devices/virtual/dmi/id/product_version',
|
|
|
|
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
|
|
|
|
}
|
|
|
|
|
|
|
|
for (key,path) in DMI_DICT.items():
|
|
|
|
data = get_file_content(path)
|
|
|
|
if data is not None:
|
|
|
|
if key == 'form_factor':
|
|
|
|
try:
|
|
|
|
self.facts['form_factor'] = FORM_FACTOR[int(data)]
|
2015-11-03 17:51:21 +00:00
|
|
|
except IndexError:
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['form_factor'] = 'unknown (%s)' % data
|
|
|
|
else:
|
|
|
|
self.facts[key] = data
|
|
|
|
else:
|
|
|
|
self.facts[key] = 'NA'
|
|
|
|
|
|
|
|
else:
|
|
|
|
# Fall back to using dmidecode, if available
|
2016-03-14 16:45:28 +00:00
|
|
|
dmi_bin = self.module.get_bin_path('dmidecode')
|
2014-03-16 20:02:37 +00:00
|
|
|
DMI_DICT = {
|
|
|
|
'bios_date': 'bios-release-date',
|
|
|
|
'bios_version': 'bios-version',
|
|
|
|
'form_factor': 'chassis-type',
|
|
|
|
'product_name': 'system-product-name',
|
|
|
|
'product_serial': 'system-serial-number',
|
|
|
|
'product_uuid': 'system-uuid',
|
|
|
|
'product_version': 'system-version',
|
|
|
|
'system_vendor': 'system-manufacturer'
|
|
|
|
}
|
|
|
|
for (k, v) in DMI_DICT.items():
|
|
|
|
if dmi_bin is not None:
|
2016-03-14 16:45:28 +00:00
|
|
|
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0:
|
|
|
|
# Strip out commented lines (specific dmidecode output)
|
|
|
|
thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
|
|
|
|
try:
|
|
|
|
json.dumps(thisvalue)
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
thisvalue = "NA"
|
|
|
|
|
|
|
|
self.facts[k] = thisvalue
|
|
|
|
else:
|
|
|
|
self.facts[k] = 'NA'
|
|
|
|
else:
|
|
|
|
self.facts[k] = 'NA'
|
|
|
|
|
2016-08-18 00:58:51 +00:00
|
|
|
def _run_lsblk(self, lsblk_path):
|
|
|
|
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID']
|
|
|
|
cmd = [lsblk_path] + args
|
|
|
|
rc, out, err = self.module.run_command(cmd)
|
|
|
|
return rc, out, err
|
|
|
|
|
|
|
|
def _lsblk_uuid(self):
|
|
|
|
uuids = {}
|
|
|
|
lsblk_path = self.module.get_bin_path("lsblk")
|
|
|
|
if not lsblk_path:
|
|
|
|
return uuids
|
|
|
|
|
|
|
|
rc, out, err = self._run_lsblk(lsblk_path)
|
|
|
|
if rc != 0:
|
|
|
|
return uuids
|
|
|
|
|
|
|
|
# each line will be in format:
|
|
|
|
# <devicename><some whitespace><uuid>
|
|
|
|
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
|
|
|
|
for lsblk_line in out.splitlines():
|
|
|
|
if not lsblk_line:
|
|
|
|
continue
|
|
|
|
|
|
|
|
line = lsblk_line.strip()
|
|
|
|
fields = line.rsplit(None, 1)
|
|
|
|
|
|
|
|
if len(fields) < 2:
|
|
|
|
continue
|
|
|
|
|
|
|
|
device_name, uuid = fields[0].strip(), fields[1].strip()
|
|
|
|
if device_name in uuids:
|
|
|
|
continue
|
|
|
|
uuids[device_name] = uuid
|
|
|
|
|
|
|
|
return uuids
|
|
|
|
|
|
|
|
def _run_findmnt(self, findmnt_path):
|
|
|
|
args = ['--list', '--noheadings', '--notruncate']
|
|
|
|
cmd = [findmnt_path] + args
|
|
|
|
rc, out, err = self.module.run_command(cmd)
|
|
|
|
return rc, out, err
|
|
|
|
|
|
|
|
def _find_bind_mounts(self):
|
|
|
|
bind_mounts = set()
|
|
|
|
findmnt_path = self.module.get_bin_path("findmnt")
|
|
|
|
if not findmnt_path:
|
|
|
|
return bind_mounts
|
|
|
|
|
|
|
|
rc, out, err = self._run_findmnt(findmnt_path)
|
|
|
|
if rc != 0:
|
|
|
|
return bind_mounts
|
2016-08-18 13:36:03 +00:00
|
|
|
out = out.decode('utf-8', 'replace')
|
2016-08-18 00:58:51 +00:00
|
|
|
|
|
|
|
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
|
|
|
|
for line in out.splitlines():
|
|
|
|
fields = line.split()
|
|
|
|
# fields[0] is the TARGET, fields[1] is the SOURCE
|
|
|
|
if len(fields) < 2:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# bind mounts will have a [/directory_name] in the SOURCE column
|
|
|
|
if self.BIND_MOUNT_RE.match(fields[1]):
|
|
|
|
bind_mounts.add(fields[0])
|
|
|
|
|
|
|
|
return bind_mounts
|
|
|
|
|
|
|
|
def _mtab_entries(self):
|
|
|
|
mtab = get_file_content('/etc/mtab', '')
|
|
|
|
mtab_entries = []
|
|
|
|
for line in mtab.splitlines():
|
|
|
|
fields = line.split()
|
|
|
|
if len(fields) < 4:
|
|
|
|
continue
|
|
|
|
mtab_entries.append(fields)
|
|
|
|
return mtab_entries
|
|
|
|
|
2014-04-08 18:21:42 +00:00
|
|
|
@timeout(10)
|
2014-03-16 20:02:37 +00:00
|
|
|
def get_mount_facts(self):
|
|
|
|
self.facts['mounts'] = []
|
2016-04-13 18:05:50 +00:00
|
|
|
|
2016-08-18 00:58:51 +00:00
|
|
|
bind_mounts = self._find_bind_mounts()
|
|
|
|
uuids = self._lsblk_uuid()
|
|
|
|
mtab_entries = self._mtab_entries()
|
|
|
|
|
|
|
|
mounts = []
|
|
|
|
for fields in mtab_entries:
|
|
|
|
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
|
|
|
|
|
|
|
|
if not device.startswith('/') and ':/' not in device:
|
2016-06-09 19:05:06 +00:00
|
|
|
continue
|
2016-08-18 00:58:51 +00:00
|
|
|
|
|
|
|
if fstype == 'none':
|
|
|
|
continue
|
|
|
|
|
|
|
|
size_total, size_available = self._get_mount_size_facts(mount)
|
|
|
|
|
|
|
|
if mount in bind_mounts:
|
|
|
|
# only add if not already there, we might have a plain /etc/mtab
|
|
|
|
if not self.MTAB_BIND_MOUNT_RE.match(options):
|
|
|
|
options += ",bind"
|
|
|
|
|
|
|
|
mount_info = {'mount': mount,
|
|
|
|
'device': device,
|
|
|
|
'fstype': fstype,
|
|
|
|
'options': options,
|
|
|
|
# statvfs data
|
|
|
|
'size_total': size_total,
|
|
|
|
'size_available': size_available,
|
|
|
|
'uuid': uuids.get(device, 'N/A')}
|
|
|
|
|
|
|
|
mounts.append(mount_info)
|
|
|
|
|
|
|
|
self.facts['mounts'] = mounts
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-06-28 13:44:51 +00:00
|
|
|
def get_holders(self, block_dev_dict, sysdir):
|
|
|
|
block_dev_dict['holders'] = []
|
|
|
|
if os.path.isdir(sysdir + "/holders"):
|
|
|
|
for folder in os.listdir(sysdir + "/holders"):
|
|
|
|
if not folder.startswith("dm-"):
|
|
|
|
continue
|
|
|
|
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
|
|
|
|
if name:
|
|
|
|
block_dev_dict['holders'].append(name)
|
|
|
|
else:
|
|
|
|
block_dev_dict['holders'].append(folder)
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
def get_device_facts(self):
|
|
|
|
self.facts['devices'] = {}
|
2016-03-14 16:45:28 +00:00
|
|
|
lspci = self.module.get_bin_path('lspci')
|
2014-03-16 20:02:37 +00:00
|
|
|
if lspci:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, pcidata, err = self.module.run_command([lspci, '-D'])
|
2016-08-18 13:36:03 +00:00
|
|
|
pcidata = pcidata.decode('utf-8', 'replace')
|
2014-03-16 20:02:37 +00:00
|
|
|
else:
|
|
|
|
pcidata = None
|
|
|
|
|
|
|
|
try:
|
|
|
|
block_devs = os.listdir("/sys/block")
|
|
|
|
except OSError:
|
|
|
|
return
|
|
|
|
|
|
|
|
for block in block_devs:
|
|
|
|
virtual = 1
|
|
|
|
sysfs_no_links = 0
|
|
|
|
try:
|
|
|
|
path = os.readlink(os.path.join("/sys/block/", block))
|
2016-04-27 07:15:01 +00:00
|
|
|
except OSError:
|
|
|
|
e = sys.exc_info()[1]
|
2014-03-16 20:02:37 +00:00
|
|
|
if e.errno == errno.EINVAL:
|
|
|
|
path = block
|
|
|
|
sysfs_no_links = 1
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
if "virtual" in path:
|
|
|
|
continue
|
|
|
|
sysdir = os.path.join("/sys/block", path)
|
|
|
|
if sysfs_no_links == 1:
|
|
|
|
for folder in os.listdir(sysdir):
|
|
|
|
if "device" in folder:
|
|
|
|
virtual = 0
|
|
|
|
break
|
|
|
|
if virtual:
|
|
|
|
continue
|
|
|
|
d = {}
|
|
|
|
diskname = os.path.basename(sysdir)
|
2016-04-25 15:22:46 +00:00
|
|
|
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
|
2014-03-16 20:02:37 +00:00
|
|
|
d[key] = get_file_content(sysdir + "/device/" + key)
|
|
|
|
|
|
|
|
for key,test in [ ('removable','/removable'), \
|
|
|
|
('support_discard','/queue/discard_granularity'),
|
|
|
|
]:
|
|
|
|
d[key] = get_file_content(sysdir + test)
|
|
|
|
|
|
|
|
d['partitions'] = {}
|
|
|
|
for folder in os.listdir(sysdir):
|
|
|
|
m = re.search("(" + diskname + "\d+)", folder)
|
|
|
|
if m:
|
|
|
|
part = {}
|
|
|
|
partname = m.group(1)
|
|
|
|
part_sysdir = sysdir + "/" + partname
|
|
|
|
|
|
|
|
part['start'] = get_file_content(part_sysdir + "/start",0)
|
|
|
|
part['sectors'] = get_file_content(part_sysdir + "/size",0)
|
2014-12-01 15:17:54 +00:00
|
|
|
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
|
2014-06-14 17:42:41 +00:00
|
|
|
if not part['sectorsize']:
|
|
|
|
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
|
2016-03-14 16:45:28 +00:00
|
|
|
part['size'] = self.module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
|
2016-08-08 16:23:19 +00:00
|
|
|
part['uuid'] = get_partition_uuid(partname)
|
2016-06-28 13:44:51 +00:00
|
|
|
self.get_holders(part, part_sysdir)
|
2016-07-25 01:39:47 +00:00
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
d['partitions'][partname] = part
|
|
|
|
|
|
|
|
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
|
|
|
|
d['scheduler_mode'] = ""
|
|
|
|
scheduler = get_file_content(sysdir + "/queue/scheduler")
|
|
|
|
if scheduler is not None:
|
|
|
|
m = re.match(".*?(\[(.*)\])", scheduler)
|
|
|
|
if m:
|
|
|
|
d['scheduler_mode'] = m.group(2)
|
|
|
|
|
|
|
|
d['sectors'] = get_file_content(sysdir + "/size")
|
|
|
|
if not d['sectors']:
|
|
|
|
d['sectors'] = 0
|
2014-12-01 15:17:54 +00:00
|
|
|
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
|
2014-03-16 20:02:37 +00:00
|
|
|
if not d['sectorsize']:
|
2014-06-14 17:42:41 +00:00
|
|
|
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
|
2016-03-14 16:45:28 +00:00
|
|
|
d['size'] = self.module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
d['host'] = ""
|
|
|
|
|
|
|
|
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
|
|
|
|
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
|
|
|
|
if m and pcidata:
|
|
|
|
pciid = m.group(1)
|
|
|
|
did = re.escape(pciid)
|
|
|
|
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
|
2015-08-05 13:43:55 +00:00
|
|
|
if m:
|
|
|
|
d['host'] = m.group(1)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-06-28 13:44:51 +00:00
|
|
|
self.get_holders(d, sysdir)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
self.facts['devices'][diskname] = d
|
|
|
|
|
2015-03-13 20:35:31 +00:00
|
|
|
def get_uptime_facts(self):
|
2015-09-26 16:54:56 +00:00
|
|
|
uptime_file_content = get_file_content('/proc/uptime')
|
|
|
|
if uptime_file_content:
|
|
|
|
uptime_seconds_string = uptime_file_content.split(' ')[0]
|
|
|
|
self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
|
2014-03-16 20:02:37 +00:00
|
|
|
|
Add LVM facts to setup module
This commit adds LinuxHardware.get_device_facts() and calls that from
.populate().
LVM facts are only gathered if the setup module is running as root and
the lvm utilities are available (tested by searching for 'vgs').
If the conditions are met, facts are set for each volume group and
logical volume.
Example:
Test LVM Data:
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
test 1 2 0 wz--n- 5.00g 2.00g
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
testlv test -wi-a---- 1.00g
testlv2 test -wi-a---- 2.00g
Facts Returned:
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {
"ansible_lvm": {
"lvs": {
"testlv": {
"size_g": "1.00",
"vg": "test"
},
"testlv2": {
"size_g": "2.00",
"vg": "test"
}
},
"vgs": {
"test": {
"free_g": "2.00",
"num_lvs": "2",
"num_pvs": "1",
"size_g": "5.00"
}
}
}
},
"changed": false
}
Test as non-root:
$ ansible localhost -i /tmp/inv-user -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
Test without lvm utilities available
$ sudo mv /sbin/vgs{,.bk}
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
2014-12-12 12:47:43 +00:00
|
|
|
def get_lvm_facts(self):
|
|
|
|
""" Get LVM Facts if running as root and lvm utils are available """
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
|
Add LVM facts to setup module
This commit adds LinuxHardware.get_device_facts() and calls that from
.populate().
LVM facts are only gathered if the setup module is running as root and
the lvm utilities are available (tested by searching for 'vgs').
If the conditions are met, facts are set for each volume group and
logical volume.
Example:
Test LVM Data:
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
test 1 2 0 wz--n- 5.00g 2.00g
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
testlv test -wi-a---- 1.00g
testlv2 test -wi-a---- 2.00g
Facts Returned:
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {
"ansible_lvm": {
"lvs": {
"testlv": {
"size_g": "1.00",
"vg": "test"
},
"testlv2": {
"size_g": "2.00",
"vg": "test"
}
},
"vgs": {
"test": {
"free_g": "2.00",
"num_lvs": "2",
"num_pvs": "1",
"size_g": "5.00"
}
}
}
},
"changed": false
}
Test as non-root:
$ ansible localhost -i /tmp/inv-user -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
Test without lvm utilities available
$ sudo mv /sbin/vgs{,.bk}
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
2014-12-12 12:47:43 +00:00
|
|
|
lvm_util_options = '--noheadings --nosuffix --units g'
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
vgs_path = self.module.get_bin_path('vgs')
|
Add LVM facts to setup module
This commit adds LinuxHardware.get_device_facts() and calls that from
.populate().
LVM facts are only gathered if the setup module is running as root and
the lvm utilities are available (tested by searching for 'vgs').
If the conditions are met, facts are set for each volume group and
logical volume.
Example:
Test LVM Data:
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
test 1 2 0 wz--n- 5.00g 2.00g
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
testlv test -wi-a---- 1.00g
testlv2 test -wi-a---- 2.00g
Facts Returned:
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {
"ansible_lvm": {
"lvs": {
"testlv": {
"size_g": "1.00",
"vg": "test"
},
"testlv2": {
"size_g": "2.00",
"vg": "test"
}
},
"vgs": {
"test": {
"free_g": "2.00",
"num_lvs": "2",
"num_pvs": "1",
"size_g": "5.00"
}
}
}
},
"changed": false
}
Test as non-root:
$ ansible localhost -i /tmp/inv-user -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
Test without lvm utilities available
$ sudo mv /sbin/vgs{,.bk}
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
2014-12-12 12:47:43 +00:00
|
|
|
#vgs fields: VG #PV #LV #SN Attr VSize VFree
|
|
|
|
vgs={}
|
2015-08-12 14:53:13 +00:00
|
|
|
if vgs_path:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, vg_lines, err = self.module.run_command( '%s %s' % (vgs_path, lvm_util_options))
|
2015-08-12 14:53:13 +00:00
|
|
|
for vg_line in vg_lines.splitlines():
|
|
|
|
items = vg_line.split()
|
|
|
|
vgs[items[0]] = {'size_g':items[-2],
|
|
|
|
'free_g':items[-1],
|
|
|
|
'num_lvs': items[2],
|
|
|
|
'num_pvs': items[1]}
|
Add LVM facts to setup module
This commit adds LinuxHardware.get_device_facts() and calls that from
.populate().
LVM facts are only gathered if the setup module is running as root and
the lvm utilities are available (tested by searching for 'vgs').
If the conditions are met, facts are set for each volume group and
logical volume.
Example:
Test LVM Data:
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
test 1 2 0 wz--n- 5.00g 2.00g
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
testlv test -wi-a---- 1.00g
testlv2 test -wi-a---- 2.00g
Facts Returned:
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {
"ansible_lvm": {
"lvs": {
"testlv": {
"size_g": "1.00",
"vg": "test"
},
"testlv2": {
"size_g": "2.00",
"vg": "test"
}
},
"vgs": {
"test": {
"free_g": "2.00",
"num_lvs": "2",
"num_pvs": "1",
"size_g": "5.00"
}
}
}
},
"changed": false
}
Test as non-root:
$ ansible localhost -i /tmp/inv-user -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
Test without lvm utilities available
$ sudo mv /sbin/vgs{,.bk}
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
2014-12-12 12:47:43 +00:00
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
lvs_path = self.module.get_bin_path('lvs')
|
Add LVM facts to setup module
This commit adds LinuxHardware.get_device_facts() and calls that from
.populate().
LVM facts are only gathered if the setup module is running as root and
the lvm utilities are available (tested by searching for 'vgs').
If the conditions are met, facts are set for each volume group and
logical volume.
Example:
Test LVM Data:
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
test 1 2 0 wz--n- 5.00g 2.00g
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
testlv test -wi-a---- 1.00g
testlv2 test -wi-a---- 2.00g
Facts Returned:
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {
"ansible_lvm": {
"lvs": {
"testlv": {
"size_g": "1.00",
"vg": "test"
},
"testlv2": {
"size_g": "2.00",
"vg": "test"
}
},
"vgs": {
"test": {
"free_g": "2.00",
"num_lvs": "2",
"num_pvs": "1",
"size_g": "5.00"
}
}
}
},
"changed": false
}
Test as non-root:
$ ansible localhost -i /tmp/inv-user -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
Test without lvm utilities available
$ sudo mv /sbin/vgs{,.bk}
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
2014-12-12 12:47:43 +00:00
|
|
|
#lvs fields:
|
|
|
|
#LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
|
|
|
|
lvs = {}
|
2015-08-12 14:53:13 +00:00
|
|
|
if lvs_path:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, lv_lines, err = self.module.run_command( '%s %s' % (lvs_path, lvm_util_options))
|
2015-08-12 14:53:13 +00:00
|
|
|
for lv_line in lv_lines.splitlines():
|
|
|
|
items = lv_line.split()
|
|
|
|
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
|
|
|
|
|
|
|
|
self.facts['lvm'] = {'lvs': lvs, 'vgs': vgs}
|
Add LVM facts to setup module
This commit adds LinuxHardware.get_device_facts() and calls that from
.populate().
LVM facts are only gathered if the setup module is running as root and
the lvm utilities are available (tested by searching for 'vgs').
If the conditions are met, facts are set for each volume group and
logical volume.
Example:
Test LVM Data:
$ sudo vgs
VG #PV #LV #SN Attr VSize VFree
test 1 2 0 wz--n- 5.00g 2.00g
$ sudo lvs
LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
testlv test -wi-a---- 1.00g
testlv2 test -wi-a---- 2.00g
Facts Returned:
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {
"ansible_lvm": {
"lvs": {
"testlv": {
"size_g": "1.00",
"vg": "test"
},
"testlv2": {
"size_g": "2.00",
"vg": "test"
}
},
"vgs": {
"test": {
"free_g": "2.00",
"num_lvs": "2",
"num_pvs": "1",
"size_g": "5.00"
}
}
}
},
"changed": false
}
Test as non-root:
$ ansible localhost -i /tmp/inv-user -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
Test without lvm utilities available
$ sudo mv /sbin/vgs{,.bk}
$ ansible localhost -i /tmp/inv -m setup -a 'filter=ansible_lvm'
localhost | success >> {
"ansible_facts": {},
"changed": false
}
2014-12-12 12:47:43 +00:00
|
|
|
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
class SunOSHardware(Hardware):
|
|
|
|
"""
|
|
|
|
In addition to the generic memory and cpu facts, this also sets
|
|
|
|
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
|
|
|
|
"""
|
|
|
|
platform = 'SunOS'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
2015-02-19 14:38:20 +00:00
|
|
|
try:
|
|
|
|
self.get_mount_facts()
|
|
|
|
except TimeoutError:
|
|
|
|
pass
|
2014-03-16 20:02:37 +00:00
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
physid = 0
|
|
|
|
sockets = {}
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor'] = []
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if len(line) < 1:
|
|
|
|
continue
|
|
|
|
data = line.split(None, 1)
|
|
|
|
key = data[0].strip()
|
|
|
|
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
|
|
|
|
if key == 'module:':
|
|
|
|
brand = ''
|
|
|
|
elif key == 'brand':
|
|
|
|
brand = data[1].strip()
|
|
|
|
elif key == 'clock_MHz':
|
|
|
|
clock_mhz = data[1].strip()
|
|
|
|
elif key == 'implementation':
|
|
|
|
processor = brand or data[1].strip()
|
|
|
|
# Add clock speed to description for SPARC CPU
|
|
|
|
if self.facts['machine'] != 'i86pc':
|
|
|
|
processor += " @ " + clock_mhz + "MHz"
|
|
|
|
if 'processor' not in self.facts:
|
|
|
|
self.facts['processor'] = []
|
|
|
|
self.facts['processor'].append(processor)
|
|
|
|
elif key == 'chip_id':
|
|
|
|
physid = data[1].strip()
|
|
|
|
if physid not in sockets:
|
|
|
|
sockets[physid] = 1
|
|
|
|
else:
|
|
|
|
sockets[physid] += 1
|
|
|
|
# Counting cores on Solaris can be complicated.
|
|
|
|
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
|
|
|
|
# Treat 'processor_count' as physical sockets and 'processor_cores' as
|
|
|
|
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
|
|
|
|
# these processors have: sockets -> cores -> threads/virtual CPU.
|
|
|
|
if len(sockets) > 0:
|
|
|
|
self.facts['processor_count'] = len(sockets)
|
|
|
|
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
|
|
|
|
else:
|
|
|
|
self.facts['processor_cores'] = 'NA'
|
|
|
|
self.facts['processor_count'] = len(self.facts['processor'])
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
|
2014-03-16 20:02:37 +00:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if 'Memory size' in line:
|
|
|
|
self.facts['memtotal_mb'] = line.split()[2]
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/swap -s")
|
2014-03-16 20:02:37 +00:00
|
|
|
allocated = long(out.split()[1][:-1])
|
|
|
|
reserved = long(out.split()[5][:-1])
|
|
|
|
used = long(out.split()[8][:-1])
|
|
|
|
free = long(out.split()[10][:-1])
|
|
|
|
self.facts['swapfree_mb'] = free / 1024
|
|
|
|
self.facts['swaptotal_mb'] = (free + used) / 1024
|
|
|
|
self.facts['swap_allocated_mb'] = allocated / 1024
|
|
|
|
self.facts['swap_reserved_mb'] = reserved / 1024
|
|
|
|
|
2015-02-19 14:38:20 +00:00
|
|
|
@timeout(10)
|
|
|
|
def get_mount_facts(self):
|
|
|
|
self.facts['mounts'] = []
|
|
|
|
# For a detailed format description see mnttab(4)
|
|
|
|
# special mount_point fstype options time
|
|
|
|
fstab = get_file_content('/etc/mnttab')
|
|
|
|
if fstab:
|
|
|
|
for line in fstab.split('\n'):
|
|
|
|
fields = line.rstrip('\n').split('\t')
|
2016-02-20 19:33:53 +00:00
|
|
|
size_total, size_available = self._get_mount_size_facts(fields[1])
|
|
|
|
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4], 'size_total': size_total, 'size_available': size_available})
|
|
|
|
|
2015-02-19 14:38:20 +00:00
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
class OpenBSDHardware(Hardware):
|
|
|
|
"""
|
|
|
|
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
- processor_speed
|
|
|
|
- devices
|
|
|
|
"""
|
|
|
|
platform = 'OpenBSD'
|
|
|
|
DMESG_BOOT = '/var/run/dmesg.boot'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.sysctl = self.get_sysctl()
|
|
|
|
self.get_memory_facts()
|
|
|
|
self.get_processor_facts()
|
|
|
|
self.get_device_facts()
|
2014-10-27 19:27:57 +00:00
|
|
|
self.get_mount_facts()
|
2014-03-16 20:02:37 +00:00
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_sysctl(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(["/sbin/sysctl", "hw"])
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc != 0:
|
|
|
|
return dict()
|
|
|
|
sysctl = dict()
|
|
|
|
for line in out.splitlines():
|
|
|
|
(key, value) = line.split('=')
|
|
|
|
sysctl[key] = value.strip()
|
|
|
|
return sysctl
|
|
|
|
|
2014-10-27 19:27:57 +00:00
|
|
|
@timeout(10)
|
|
|
|
def get_mount_facts(self):
|
|
|
|
self.facts['mounts'] = []
|
|
|
|
fstab = get_file_content('/etc/fstab')
|
|
|
|
if fstab:
|
|
|
|
for line in fstab.split('\n'):
|
|
|
|
if line.startswith('#') or line.strip() == '':
|
|
|
|
continue
|
|
|
|
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
|
2014-10-28 09:04:59 +00:00
|
|
|
if fields[1] == 'none' or fields[3] == 'xx':
|
|
|
|
continue
|
2016-02-20 19:33:53 +00:00
|
|
|
size_total, size_available = self._get_mount_size_facts(fields[1])
|
|
|
|
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
|
|
|
|
|
2014-10-27 19:27:57 +00:00
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
def get_memory_facts(self):
|
|
|
|
# Get free memory. vmstat output looks like:
|
|
|
|
# procs memory page disks traps cpu
|
|
|
|
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
|
|
|
|
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/bin/vmstat")
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0:
|
|
|
|
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
|
|
|
|
self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
|
|
|
|
|
|
|
|
# Get swapctl info. swapctl output looks like:
|
|
|
|
# total: 69268 1K-blocks allocated, 0 used, 69268 available
|
|
|
|
# And for older OpenBSD:
|
|
|
|
# total: 69268k bytes allocated = 0k used, 69268k available
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/sbin/swapctl -sk")
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0:
|
2014-06-30 21:23:55 +00:00
|
|
|
swaptrans = maketrans(' ', ' ')
|
2014-03-16 20:02:37 +00:00
|
|
|
data = out.split()
|
2014-06-30 21:23:55 +00:00
|
|
|
self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
|
|
|
|
self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_processor_facts(self):
|
|
|
|
processor = []
|
|
|
|
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
|
|
|
|
if not dmesg_boot:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
|
2014-03-16 20:02:37 +00:00
|
|
|
i = 0
|
|
|
|
for line in dmesg_boot.splitlines():
|
|
|
|
if line.split(' ', 1)[0] == 'cpu%i:' % i:
|
|
|
|
processor.append(line.split(' ', 1)[1])
|
|
|
|
i = i + 1
|
|
|
|
processor_count = i
|
|
|
|
self.facts['processor'] = processor
|
|
|
|
self.facts['processor_count'] = processor_count
|
|
|
|
# I found no way to figure out the number of Cores per CPU in OpenBSD
|
|
|
|
self.facts['processor_cores'] = 'NA'
|
|
|
|
|
|
|
|
def get_device_facts(self):
|
|
|
|
devices = []
|
|
|
|
devices.extend(self.sysctl['hw.disknames'].split(','))
|
|
|
|
self.facts['devices'] = devices
|
|
|
|
|
|
|
|
class FreeBSDHardware(Hardware):
|
|
|
|
"""
|
|
|
|
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
- devices
|
|
|
|
"""
|
|
|
|
platform = 'FreeBSD'
|
|
|
|
DMESG_BOOT = '/var/run/dmesg.boot'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
self.get_dmi_facts()
|
|
|
|
self.get_device_facts()
|
2014-04-08 18:21:42 +00:00
|
|
|
try:
|
|
|
|
self.get_mount_facts()
|
|
|
|
except TimeoutError:
|
|
|
|
pass
|
2014-03-16 20:02:37 +00:00
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
self.facts['processor'] = []
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/sbin/sysctl -n hw.ncpu")
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor_count'] = out.strip()
|
|
|
|
|
|
|
|
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
|
|
|
|
if not dmesg_boot:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
|
2014-03-16 20:02:37 +00:00
|
|
|
for line in dmesg_boot.split('\n'):
|
|
|
|
if 'CPU:' in line:
|
|
|
|
cpu = re.sub(r'CPU:\s+', r"", line)
|
|
|
|
self.facts['processor'].append(cpu.strip())
|
|
|
|
if 'Logical CPUs per core' in line:
|
|
|
|
self.facts['processor_cores'] = line.split()[4]
|
|
|
|
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/sbin/sysctl vm.stats")
|
2014-03-16 20:02:37 +00:00
|
|
|
for line in out.split('\n'):
|
|
|
|
data = line.split()
|
|
|
|
if 'vm.stats.vm.v_page_size' in line:
|
|
|
|
pagesize = long(data[1])
|
|
|
|
if 'vm.stats.vm.v_page_count' in line:
|
|
|
|
pagecount = long(data[1])
|
|
|
|
if 'vm.stats.vm.v_free_count' in line:
|
|
|
|
freecount = long(data[1])
|
|
|
|
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
|
|
|
|
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
|
|
|
|
# Get swapinfo. swapinfo output looks like:
|
|
|
|
# Device 1M-blocks Used Avail Capacity
|
|
|
|
# /dev/ada0p3 314368 0 314368 0%
|
|
|
|
#
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -k")
|
2014-03-16 20:02:37 +00:00
|
|
|
lines = out.split('\n')
|
|
|
|
if len(lines[-1]) == 0:
|
|
|
|
lines.pop()
|
|
|
|
data = lines[-1].split()
|
2015-06-16 21:35:36 +00:00
|
|
|
if data[0] != 'Device':
|
|
|
|
self.facts['swaptotal_mb'] = int(data[1]) / 1024
|
|
|
|
self.facts['swapfree_mb'] = int(data[3]) / 1024
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2014-04-08 18:21:42 +00:00
|
|
|
@timeout(10)
|
2014-03-16 20:02:37 +00:00
|
|
|
def get_mount_facts(self):
|
|
|
|
self.facts['mounts'] = []
|
|
|
|
fstab = get_file_content('/etc/fstab')
|
|
|
|
if fstab:
|
|
|
|
for line in fstab.split('\n'):
|
|
|
|
if line.startswith('#') or line.strip() == '':
|
|
|
|
continue
|
|
|
|
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
|
2016-02-20 19:33:53 +00:00
|
|
|
size_total, size_available = self._get_mount_size_facts(fields[1])
|
|
|
|
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def get_device_facts(self):
|
|
|
|
sysdir = '/dev'
|
|
|
|
self.facts['devices'] = {}
|
2016-03-14 16:45:28 +00:00
|
|
|
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
|
2014-03-16 20:02:37 +00:00
|
|
|
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
|
|
|
|
if os.path.isdir(sysdir):
|
|
|
|
dirlist = sorted(os.listdir(sysdir))
|
|
|
|
for device in dirlist:
|
|
|
|
d = drives.match(device)
|
|
|
|
if d:
|
|
|
|
self.facts['devices'][d.group(1)] = []
|
|
|
|
s = slices.match(device)
|
|
|
|
if s:
|
|
|
|
self.facts['devices'][d.group(1)].append(s.group(1))
|
|
|
|
|
|
|
|
def get_dmi_facts(self):
|
|
|
|
''' learn dmi facts from system
|
|
|
|
|
|
|
|
Use dmidecode executable if available'''
|
|
|
|
|
|
|
|
# Fall back to using dmidecode, if available
|
2016-03-14 16:45:28 +00:00
|
|
|
dmi_bin = self.module.get_bin_path('dmidecode')
|
2014-03-16 20:02:37 +00:00
|
|
|
DMI_DICT = dict(
|
|
|
|
bios_date='bios-release-date',
|
|
|
|
bios_version='bios-version',
|
|
|
|
form_factor='chassis-type',
|
|
|
|
product_name='system-product-name',
|
|
|
|
product_serial='system-serial-number',
|
|
|
|
product_uuid='system-uuid',
|
|
|
|
product_version='system-version',
|
|
|
|
system_vendor='system-manufacturer'
|
|
|
|
)
|
|
|
|
for (k, v) in DMI_DICT.items():
|
|
|
|
if dmi_bin is not None:
|
2016-03-14 16:45:28 +00:00
|
|
|
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0:
|
|
|
|
# Strip out commented lines (specific dmidecode output)
|
|
|
|
self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
|
|
|
|
try:
|
|
|
|
json.dumps(self.facts[k])
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
self.facts[k] = 'NA'
|
|
|
|
else:
|
|
|
|
self.facts[k] = 'NA'
|
|
|
|
else:
|
|
|
|
self.facts[k] = 'NA'
|
|
|
|
|
2016-08-18 00:58:51 +00:00
|
|
|
|
2015-08-30 17:04:30 +00:00
|
|
|
class DragonFlyHardware(FreeBSDHardware):
|
2016-08-18 00:58:51 +00:00
|
|
|
platform = 'DragonFly'
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
class NetBSDHardware(Hardware):
|
|
|
|
"""
|
|
|
|
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
- devices
|
|
|
|
"""
|
|
|
|
platform = 'NetBSD'
|
|
|
|
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
2014-04-08 18:21:42 +00:00
|
|
|
try:
|
|
|
|
self.get_mount_facts()
|
|
|
|
except TimeoutError:
|
|
|
|
pass
|
2014-03-16 20:02:37 +00:00
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
|
|
|
|
i = 0
|
|
|
|
physid = 0
|
|
|
|
sockets = {}
|
|
|
|
if not os.access("/proc/cpuinfo", os.R_OK):
|
|
|
|
return
|
|
|
|
self.facts['processor'] = []
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines("/proc/cpuinfo"):
|
2014-03-16 20:02:37 +00:00
|
|
|
data = line.split(":", 1)
|
|
|
|
key = data[0].strip()
|
|
|
|
# model name is for Intel arch, Processor (mind the uppercase P)
|
|
|
|
# works for some ARM devices, like the Sheevaplug.
|
|
|
|
if key == 'model name' or key == 'Processor':
|
|
|
|
if 'processor' not in self.facts:
|
|
|
|
self.facts['processor'] = []
|
|
|
|
self.facts['processor'].append(data[1].strip())
|
|
|
|
i += 1
|
|
|
|
elif key == 'physical id':
|
|
|
|
physid = data[1].strip()
|
|
|
|
if physid not in sockets:
|
|
|
|
sockets[physid] = 1
|
|
|
|
elif key == 'cpu cores':
|
|
|
|
sockets[physid] = int(data[1].strip())
|
|
|
|
if len(sockets) > 0:
|
|
|
|
self.facts['processor_count'] = len(sockets)
|
|
|
|
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
|
|
|
|
else:
|
|
|
|
self.facts['processor_count'] = i
|
|
|
|
self.facts['processor_cores'] = 'NA'
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
if not os.access("/proc/meminfo", os.R_OK):
|
|
|
|
return
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines("/proc/meminfo"):
|
2014-03-16 20:02:37 +00:00
|
|
|
data = line.split(":", 1)
|
|
|
|
key = data[0]
|
|
|
|
if key in NetBSDHardware.MEMORY_FACTS:
|
|
|
|
val = data[1].strip().split(' ')[0]
|
|
|
|
self.facts["%s_mb" % key.lower()] = long(val) / 1024
|
|
|
|
|
2014-04-08 18:21:42 +00:00
|
|
|
@timeout(10)
|
2014-03-16 20:02:37 +00:00
|
|
|
def get_mount_facts(self):
|
|
|
|
self.facts['mounts'] = []
|
|
|
|
fstab = get_file_content('/etc/fstab')
|
|
|
|
if fstab:
|
|
|
|
for line in fstab.split('\n'):
|
|
|
|
if line.startswith('#') or line.strip() == '':
|
|
|
|
continue
|
|
|
|
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
|
2016-02-20 19:33:53 +00:00
|
|
|
size_total, size_available = self._get_mount_size_facts(fields[1])
|
|
|
|
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
class AIX(Hardware):
|
|
|
|
"""
|
|
|
|
AIX-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
"""
|
|
|
|
platform = 'AIX'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
self.get_dmi_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
self.facts['processor'] = []
|
|
|
|
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
|
2014-03-16 20:02:37 +00:00
|
|
|
if out:
|
|
|
|
i = 0
|
|
|
|
for line in out.split('\n'):
|
|
|
|
|
|
|
|
if 'Available' in line:
|
|
|
|
if i == 0:
|
|
|
|
data = line.split(' ')
|
|
|
|
cpudev = data[0]
|
|
|
|
|
|
|
|
i += 1
|
|
|
|
self.facts['processor_count'] = int(i)
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
data = out.split(' ')
|
|
|
|
self.facts['processor'] = data[1]
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
data = out.split(' ')
|
|
|
|
self.facts['processor_cores'] = int(data[1])
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
pagesize = 4096
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
|
2014-03-16 20:02:37 +00:00
|
|
|
for line in out.split('\n'):
|
|
|
|
data = line.split()
|
|
|
|
if 'memory pages' in line:
|
|
|
|
pagecount = long(data[0])
|
|
|
|
if 'free pages' in line:
|
|
|
|
freecount = long(data[0])
|
|
|
|
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
|
|
|
|
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
|
|
|
|
# Get swapinfo. swapinfo output looks like:
|
|
|
|
# Device 1M-blocks Used Avail Capacity
|
|
|
|
# /dev/ada0p3 314368 0 314368 0%
|
|
|
|
#
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
|
2014-03-16 20:02:37 +00:00
|
|
|
if out:
|
|
|
|
lines = out.split('\n')
|
|
|
|
data = lines[1].split()
|
|
|
|
swaptotal_mb = long(data[0].rstrip('MB'))
|
|
|
|
percused = int(data[1].rstrip('%'))
|
|
|
|
self.facts['swaptotal_mb'] = swaptotal_mb
|
|
|
|
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
|
|
|
|
|
|
|
|
def get_dmi_facts(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
|
2014-03-16 20:02:37 +00:00
|
|
|
data = out.split()
|
|
|
|
self.facts['firmware_version'] = data[1].strip('IBM,')
|
|
|
|
|
|
|
|
class HPUX(Hardware):
|
|
|
|
"""
|
2015-04-28 13:36:42 +00:00
|
|
|
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
|
2014-03-16 20:02:37 +00:00
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
- model
|
|
|
|
- firmware
|
|
|
|
"""
|
|
|
|
|
|
|
|
platform = 'HP-UX'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
self.get_hw_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
if self.facts['architecture'] == '9000/800':
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor_count'] = int(out.strip())
|
|
|
|
#Working with machinfo mess
|
|
|
|
elif self.facts['architecture'] == 'ia64':
|
|
|
|
if self.facts['distribution_version'] == "B.11.23":
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor_count'] = int(out.strip().split('=')[1])
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor_cores'] = int(out.strip())
|
|
|
|
if self.facts['distribution_version'] == "B.11.31":
|
|
|
|
#if machinfo return cores strings release B.11.31 > 1204
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
if out.strip()== '0':
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor_count'] = int(out.strip().split(" ")[0])
|
|
|
|
#If hyperthreading is active divide cores by 2
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
data = re.sub(' +',' ',out).strip().split(' ')
|
|
|
|
if len(data) == 1:
|
|
|
|
hyperthreading = 'OFF'
|
|
|
|
else:
|
|
|
|
hyperthreading = data[1]
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
data = out.strip().split(" ")
|
|
|
|
if hyperthreading == 'ON':
|
|
|
|
self.facts['processor_cores'] = int(data[0])/2
|
|
|
|
else:
|
|
|
|
if len(data) == 1:
|
|
|
|
self.facts['processor_cores'] = self.facts['processor_count']
|
|
|
|
else:
|
|
|
|
self.facts['processor_cores'] = int(data[0])
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor'] = out.strip()
|
|
|
|
else:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor_count'] = int(out.strip().split(" ")[0])
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['processor'] = out.strip()
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
pagesize = 4096
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
|
|
|
|
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
|
|
|
|
if self.facts['architecture'] == '9000/800':
|
2014-04-01 11:55:29 +00:00
|
|
|
try:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
|
2014-04-01 11:55:29 +00:00
|
|
|
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
|
|
|
|
self.facts['memtotal_mb'] = int(data) / 1024
|
|
|
|
except AttributeError:
|
|
|
|
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
|
2014-12-04 22:23:35 +00:00
|
|
|
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
|
2014-04-01 11:55:29 +00:00
|
|
|
if os.access("/dev/kmem", os.R_OK):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
|
2014-04-01 12:36:19 +00:00
|
|
|
if not err:
|
|
|
|
data = out
|
|
|
|
self.facts['memtotal_mb'] = int(data) / 256
|
2014-03-16 20:02:37 +00:00
|
|
|
else:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
|
|
|
|
self.facts['memtotal_mb'] = int(data)
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['swaptotal_mb'] = int(out.strip())
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
|
2014-03-16 20:02:37 +00:00
|
|
|
swap = 0
|
|
|
|
for line in out.strip().split('\n'):
|
|
|
|
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
|
|
|
|
self.facts['swapfree_mb'] = swap
|
|
|
|
|
|
|
|
def get_hw_facts(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("model")
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['model'] = out.strip()
|
|
|
|
if self.facts['architecture'] == 'ia64':
|
2014-05-26 05:56:46 +00:00
|
|
|
separator = ':'
|
|
|
|
if self.facts['distribution_version'] == "B.11.23":
|
|
|
|
separator = '='
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
|
2014-05-26 05:56:46 +00:00
|
|
|
self.facts['firmware_version'] = out.split(separator)[1].strip()
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Darwin(Hardware):
|
|
|
|
"""
|
|
|
|
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- processor
|
|
|
|
- processor_cores
|
|
|
|
- memtotal_mb
|
|
|
|
- memfree_mb
|
|
|
|
- model
|
|
|
|
- osversion
|
|
|
|
- osrevision
|
|
|
|
"""
|
|
|
|
platform = 'Darwin'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.sysctl = self.get_sysctl()
|
|
|
|
self.get_mac_facts()
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_sysctl(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc != 0:
|
|
|
|
return dict()
|
|
|
|
sysctl = dict()
|
|
|
|
for line in out.splitlines():
|
|
|
|
if line.rstrip("\n"):
|
|
|
|
(key, value) = re.split(' = |: ', line, maxsplit=1)
|
|
|
|
sysctl[key] = value.strip()
|
|
|
|
return sysctl
|
|
|
|
|
|
|
|
def get_system_profile(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc != 0:
|
|
|
|
return dict()
|
|
|
|
system_profile = dict()
|
|
|
|
for line in out.splitlines():
|
|
|
|
if ': ' in line:
|
|
|
|
(key, value) = line.split(': ', 1)
|
|
|
|
system_profile[key.strip()] = ' '.join(value.strip().split())
|
|
|
|
return system_profile
|
|
|
|
|
|
|
|
def get_mac_facts(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("sysctl hw.model")
|
2014-07-16 19:37:11 +00:00
|
|
|
if rc == 0:
|
|
|
|
self.facts['model'] = out.splitlines()[-1].split()[1]
|
2014-03-16 20:02:37 +00:00
|
|
|
self.facts['osversion'] = self.sysctl['kern.osversion']
|
|
|
|
self.facts['osrevision'] = self.sysctl['kern.osrevision']
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
|
|
|
|
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
|
|
|
|
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
|
|
|
|
else: # PowerPC
|
|
|
|
system_profile = self.get_system_profile()
|
|
|
|
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
|
|
|
|
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
|
2014-07-16 19:37:11 +00:00
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("sysctl hw.usermem")
|
2014-07-16 19:37:11 +00:00
|
|
|
if rc == 0:
|
2014-07-18 14:55:24 +00:00
|
|
|
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2015-03-13 20:35:31 +00:00
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
class Network(Facts):
|
|
|
|
"""
|
|
|
|
This is a generic Network subclass of Facts. This should be further
|
|
|
|
subclassed to implement per platform. If you subclass this,
|
|
|
|
you must define:
|
|
|
|
- interfaces (a list of interface names)
|
|
|
|
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
|
|
|
|
|
|
|
|
All subclasses MUST define platform.
|
|
|
|
"""
|
|
|
|
platform = 'Generic'
|
|
|
|
|
|
|
|
IPV6_SCOPE = { '0' : 'global',
|
|
|
|
'10' : 'host',
|
|
|
|
'20' : 'link',
|
|
|
|
'40' : 'admin',
|
|
|
|
'50' : 'site',
|
|
|
|
'80' : 'organization' }
|
|
|
|
|
|
|
|
def __new__(cls, *arguments, **keyword):
|
2016-08-11 17:26:17 +00:00
|
|
|
# When Network is created, it chooses a subclass to create instead.
|
|
|
|
# This check prevents the subclass from then trying to find a subclass
|
|
|
|
# and create that.
|
|
|
|
if cls is not Network:
|
|
|
|
return super(Network, cls).__new__(cls)
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
subclass = cls
|
2016-04-23 07:14:20 +00:00
|
|
|
for sc in get_all_subclasses(Network):
|
|
|
|
if sc.platform == platform.system():
|
|
|
|
subclass = sc
|
2016-08-18 00:58:51 +00:00
|
|
|
if PY3:
|
|
|
|
return super(cls, subclass).__new__(subclass)
|
|
|
|
else:
|
|
|
|
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
class LinuxNetwork(Network):
|
|
|
|
"""
|
|
|
|
This is a Linux-specific subclass of Network. It defines
|
|
|
|
- interfaces (a list of interface names)
|
|
|
|
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
|
|
|
|
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
|
|
|
|
- ipv4_address and ipv6_address: the first non-local address for each family.
|
|
|
|
"""
|
|
|
|
platform = 'Linux'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
ip_path = self.module.get_bin_path('ip')
|
|
|
|
if ip_path is None:
|
|
|
|
return self.facts
|
|
|
|
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
|
|
|
|
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
|
|
|
|
self.facts['interfaces'] = interfaces.keys()
|
|
|
|
for iface in interfaces:
|
|
|
|
self.facts[iface] = interfaces[iface]
|
|
|
|
self.facts['default_ipv4'] = default_ipv4
|
|
|
|
self.facts['default_ipv6'] = default_ipv6
|
|
|
|
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
|
|
|
|
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_default_interfaces(self, ip_path):
|
|
|
|
# Use the commands:
|
|
|
|
# ip -4 route get 8.8.8.8 -> Google public DNS
|
|
|
|
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
|
|
|
|
# to find out the default outgoing interface, address, and gateway
|
|
|
|
command = dict(
|
|
|
|
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
|
|
|
|
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
|
|
|
|
)
|
|
|
|
interface = dict(v4 = {}, v6 = {})
|
|
|
|
for v in 'v4', 'v6':
|
|
|
|
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
|
|
|
|
and self.facts['distribution_version'].startswith('4.'):
|
|
|
|
continue
|
|
|
|
if v == 'v6' and not socket.has_ipv6:
|
|
|
|
continue
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(command[v])
|
2016-08-18 13:36:03 +00:00
|
|
|
out = out.decode('utf-8', 'replace')
|
2014-03-16 20:02:37 +00:00
|
|
|
if not out:
|
|
|
|
# v6 routing may result in
|
|
|
|
# RTNETLINK answers: Invalid argument
|
|
|
|
continue
|
|
|
|
words = out.split('\n')[0].split()
|
|
|
|
# A valid output starts with the queried address on the first line
|
|
|
|
if len(words) > 0 and words[0] == command[v][-1]:
|
|
|
|
for i in range(len(words) - 1):
|
|
|
|
if words[i] == 'dev':
|
|
|
|
interface[v]['interface'] = words[i+1]
|
|
|
|
elif words[i] == 'src':
|
|
|
|
interface[v]['address'] = words[i+1]
|
|
|
|
elif words[i] == 'via' and words[i+1] != command[v][-1]:
|
|
|
|
interface[v]['gateway'] = words[i+1]
|
|
|
|
return interface['v4'], interface['v6']
|
|
|
|
|
|
|
|
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
|
|
|
|
interfaces = {}
|
|
|
|
ips = dict(
|
|
|
|
all_ipv4_addresses = [],
|
|
|
|
all_ipv6_addresses = [],
|
|
|
|
)
|
|
|
|
|
|
|
|
for path in glob.glob('/sys/class/net/*'):
|
|
|
|
if not os.path.isdir(path):
|
|
|
|
continue
|
|
|
|
device = os.path.basename(path)
|
|
|
|
interfaces[device] = { 'device': device }
|
|
|
|
if os.path.exists(os.path.join(path, 'address')):
|
2015-02-11 22:52:41 +00:00
|
|
|
macaddress = get_file_content(os.path.join(path, 'address'), default='')
|
2014-03-16 20:02:37 +00:00
|
|
|
if macaddress and macaddress != '00:00:00:00:00:00':
|
|
|
|
interfaces[device]['macaddress'] = macaddress
|
|
|
|
if os.path.exists(os.path.join(path, 'mtu')):
|
2015-02-09 22:30:06 +00:00
|
|
|
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
|
2014-03-16 20:02:37 +00:00
|
|
|
if os.path.exists(os.path.join(path, 'operstate')):
|
2015-02-09 22:30:06 +00:00
|
|
|
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
|
2014-03-16 20:02:37 +00:00
|
|
|
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
|
|
|
|
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
|
|
|
|
if os.path.exists(os.path.join(path, 'type')):
|
2015-02-09 22:30:06 +00:00
|
|
|
_type = get_file_content(os.path.join(path, 'type'))
|
|
|
|
if _type == '1':
|
2014-03-16 20:02:37 +00:00
|
|
|
interfaces[device]['type'] = 'ether'
|
2015-02-09 22:30:06 +00:00
|
|
|
elif _type == '512':
|
2014-03-16 20:02:37 +00:00
|
|
|
interfaces[device]['type'] = 'ppp'
|
2015-02-09 22:30:06 +00:00
|
|
|
elif _type == '772':
|
2014-03-16 20:02:37 +00:00
|
|
|
interfaces[device]['type'] = 'loopback'
|
|
|
|
if os.path.exists(os.path.join(path, 'bridge')):
|
|
|
|
interfaces[device]['type'] = 'bridge'
|
|
|
|
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
|
|
|
|
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
|
2015-02-11 22:52:41 +00:00
|
|
|
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
|
2014-03-16 20:02:37 +00:00
|
|
|
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
|
2015-02-09 22:30:06 +00:00
|
|
|
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
|
2014-03-16 20:02:37 +00:00
|
|
|
if os.path.exists(os.path.join(path, 'bonding')):
|
|
|
|
interfaces[device]['type'] = 'bonding'
|
2015-02-11 22:52:41 +00:00
|
|
|
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
|
|
|
|
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
|
|
|
|
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
|
|
|
|
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
|
2015-02-09 22:30:06 +00:00
|
|
|
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
|
2014-03-16 20:02:37 +00:00
|
|
|
if primary:
|
|
|
|
interfaces[device]['primary'] = primary
|
|
|
|
path = os.path.join(path, 'bonding', 'all_slaves_active')
|
|
|
|
if os.path.exists(path):
|
2015-02-09 22:30:06 +00:00
|
|
|
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
|
2014-12-17 19:27:18 +00:00
|
|
|
if os.path.exists(os.path.join(path,'device')):
|
|
|
|
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2014-05-03 16:40:05 +00:00
|
|
|
# Check whether an interface is in promiscuous mode
|
2014-03-16 20:02:37 +00:00
|
|
|
if os.path.exists(os.path.join(path,'flags')):
|
|
|
|
promisc_mode = False
|
|
|
|
# The second byte indicates whether the interface is in promiscuous mode.
|
|
|
|
# 1 = promisc
|
|
|
|
# 0 = no promisc
|
2015-02-09 22:30:06 +00:00
|
|
|
data = int(get_file_content(os.path.join(path, 'flags')),16)
|
2014-03-16 20:02:37 +00:00
|
|
|
promisc_mode = (data & 0x0100 > 0)
|
|
|
|
interfaces[device]['promisc'] = promisc_mode
|
|
|
|
|
|
|
|
def parse_ip_output(output, secondary=False):
|
|
|
|
for line in output.split('\n'):
|
|
|
|
if not line:
|
|
|
|
continue
|
|
|
|
words = line.split()
|
2015-08-06 13:00:52 +00:00
|
|
|
broadcast = ''
|
2014-03-16 20:02:37 +00:00
|
|
|
if words[0] == 'inet':
|
|
|
|
if '/' in words[1]:
|
|
|
|
address, netmask_length = words[1].split('/')
|
2015-08-06 13:00:52 +00:00
|
|
|
if len(words) > 3:
|
|
|
|
broadcast = words[3]
|
2014-03-16 20:02:37 +00:00
|
|
|
else:
|
|
|
|
# pointopoint interfaces do not have a prefix
|
|
|
|
address = words[1]
|
|
|
|
netmask_length = "32"
|
|
|
|
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
|
|
|
|
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
|
|
|
|
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
|
|
|
|
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
|
|
|
|
iface = words[-1]
|
|
|
|
if iface != device:
|
|
|
|
interfaces[iface] = {}
|
2014-06-11 07:32:35 +00:00
|
|
|
if not secondary and "ipv4" not in interfaces[iface]:
|
2014-03-16 20:02:37 +00:00
|
|
|
interfaces[iface]['ipv4'] = {'address': address,
|
2015-05-11 14:33:50 +00:00
|
|
|
'broadcast': broadcast,
|
2014-03-16 20:02:37 +00:00
|
|
|
'netmask': netmask,
|
|
|
|
'network': network}
|
|
|
|
else:
|
|
|
|
if "ipv4_secondaries" not in interfaces[iface]:
|
|
|
|
interfaces[iface]["ipv4_secondaries"] = []
|
|
|
|
interfaces[iface]["ipv4_secondaries"].append({
|
|
|
|
'address': address,
|
2015-05-11 14:33:50 +00:00
|
|
|
'broadcast': broadcast,
|
2014-03-16 20:02:37 +00:00
|
|
|
'netmask': netmask,
|
|
|
|
'network': network,
|
|
|
|
})
|
|
|
|
|
|
|
|
# add this secondary IP to the main device
|
|
|
|
if secondary:
|
|
|
|
if "ipv4_secondaries" not in interfaces[device]:
|
|
|
|
interfaces[device]["ipv4_secondaries"] = []
|
|
|
|
interfaces[device]["ipv4_secondaries"].append({
|
|
|
|
'address': address,
|
2015-05-11 14:33:50 +00:00
|
|
|
'broadcast': broadcast,
|
2014-03-16 20:02:37 +00:00
|
|
|
'netmask': netmask,
|
|
|
|
'network': network,
|
|
|
|
})
|
|
|
|
|
|
|
|
# If this is the default address, update default_ipv4
|
|
|
|
if 'address' in default_ipv4 and default_ipv4['address'] == address:
|
2016-04-25 15:22:46 +00:00
|
|
|
default_ipv4['broadcast'] = broadcast
|
2014-03-16 20:02:37 +00:00
|
|
|
default_ipv4['netmask'] = netmask
|
|
|
|
default_ipv4['network'] = network
|
|
|
|
default_ipv4['macaddress'] = macaddress
|
|
|
|
default_ipv4['mtu'] = interfaces[device]['mtu']
|
|
|
|
default_ipv4['type'] = interfaces[device].get("type", "unknown")
|
|
|
|
default_ipv4['alias'] = words[-1]
|
|
|
|
if not address.startswith('127.'):
|
|
|
|
ips['all_ipv4_addresses'].append(address)
|
|
|
|
elif words[0] == 'inet6':
|
2016-06-17 21:10:45 +00:00
|
|
|
if 'peer' == words[2]:
|
|
|
|
address = words[1]
|
|
|
|
_, prefix = words[3].split('/')
|
|
|
|
scope = words[5]
|
|
|
|
else:
|
|
|
|
address, prefix = words[1].split('/')
|
|
|
|
scope = words[3]
|
2014-03-16 20:02:37 +00:00
|
|
|
if 'ipv6' not in interfaces[device]:
|
|
|
|
interfaces[device]['ipv6'] = []
|
|
|
|
interfaces[device]['ipv6'].append({
|
|
|
|
'address' : address,
|
|
|
|
'prefix' : prefix,
|
|
|
|
'scope' : scope
|
|
|
|
})
|
|
|
|
# If this is the default address, update default_ipv6
|
|
|
|
if 'address' in default_ipv6 and default_ipv6['address'] == address:
|
|
|
|
default_ipv6['prefix'] = prefix
|
|
|
|
default_ipv6['scope'] = scope
|
|
|
|
default_ipv6['macaddress'] = macaddress
|
|
|
|
default_ipv6['mtu'] = interfaces[device]['mtu']
|
|
|
|
default_ipv6['type'] = interfaces[device].get("type", "unknown")
|
|
|
|
if not address == '::1':
|
|
|
|
ips['all_ipv6_addresses'].append(address)
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
ip_path = self.module.get_bin_path("ip")
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
args = [ip_path, 'addr', 'show', 'primary', device]
|
|
|
|
rc, stdout, stderr = self.module.run_command(args)
|
2016-08-18 13:36:03 +00:00
|
|
|
primary_data = stdout.decode('utf-8', 'replace')
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
args = [ip_path, 'addr', 'show', 'secondary', device]
|
|
|
|
rc, stdout, stderr = self.module.run_command(args)
|
2016-08-18 13:36:03 +00:00
|
|
|
secondary_data = stdout.decode('utf-8', 'decode')
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
parse_ip_output(primary_data)
|
|
|
|
parse_ip_output(secondary_data, secondary=True)
|
|
|
|
|
2016-08-04 14:41:02 +00:00
|
|
|
interfaces[device]['features'] = self.get_ethtool_data(device)
|
2016-07-25 01:39:47 +00:00
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
# replace : by _ in interface name since they are hard to use in template
|
|
|
|
new_interfaces = {}
|
|
|
|
for i in interfaces:
|
|
|
|
if ':' in i:
|
|
|
|
new_interfaces[i.replace(':','_')] = interfaces[i]
|
|
|
|
else:
|
|
|
|
new_interfaces[i] = interfaces[i]
|
|
|
|
return new_interfaces, ips
|
|
|
|
|
2016-08-04 14:41:02 +00:00
|
|
|
def get_ethtool_data(self, device):
|
|
|
|
|
|
|
|
features = {}
|
|
|
|
ethtool_path = self.module.get_bin_path("ethtool")
|
|
|
|
if ethtool_path:
|
|
|
|
args = [ethtool_path, '-k', device]
|
|
|
|
rc, stdout, stderr = self.module.run_command(args)
|
2016-08-18 13:36:03 +00:00
|
|
|
stdout = stdout.decode('utf-8', 'replace')
|
2016-08-04 14:41:02 +00:00
|
|
|
if rc == 0:
|
|
|
|
for line in stdout.strip().split('\n'):
|
|
|
|
if not line or line.endswith(":"):
|
|
|
|
continue
|
|
|
|
key,value = line.split(": ")
|
|
|
|
if not value:
|
|
|
|
continue
|
|
|
|
features[key.strip().replace('-','_')] = value.strip()
|
|
|
|
return features
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
class GenericBsdIfconfigNetwork(Network):
|
|
|
|
"""
|
|
|
|
This is a generic BSD subclass of Network using the ifconfig command.
|
|
|
|
It defines
|
|
|
|
- interfaces (a list of interface names)
|
|
|
|
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
|
|
|
|
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
|
|
|
|
It currently does not define
|
|
|
|
- default_ipv4 and default_ipv6
|
|
|
|
- type, mtu and network on interfaces
|
|
|
|
"""
|
|
|
|
platform = 'Generic_BSD_Ifconfig'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
ifconfig_path = self.module.get_bin_path('ifconfig')
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
if ifconfig_path is None:
|
|
|
|
return self.facts
|
2016-03-14 16:45:28 +00:00
|
|
|
route_path = self.module.get_bin_path('route')
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
if route_path is None:
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
|
|
|
|
interfaces, ips = self.get_interfaces_info(ifconfig_path)
|
|
|
|
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
|
|
|
|
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
|
|
|
|
self.facts['interfaces'] = interfaces.keys()
|
|
|
|
|
|
|
|
for iface in interfaces:
|
|
|
|
self.facts[iface] = interfaces[iface]
|
|
|
|
|
|
|
|
self.facts['default_ipv4'] = default_ipv4
|
|
|
|
self.facts['default_ipv6'] = default_ipv6
|
|
|
|
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
|
|
|
|
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
|
|
|
|
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_default_interfaces(self, route_path):
|
|
|
|
|
|
|
|
# Use the commands:
|
|
|
|
# route -n get 8.8.8.8 -> Google public DNS
|
|
|
|
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
|
|
|
|
# to find out the default outgoing interface, address, and gateway
|
|
|
|
|
|
|
|
command = dict(
|
|
|
|
v4 = [route_path, '-n', 'get', '8.8.8.8'],
|
|
|
|
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
|
|
|
|
)
|
|
|
|
|
|
|
|
interface = dict(v4 = {}, v6 = {})
|
|
|
|
|
|
|
|
for v in 'v4', 'v6':
|
|
|
|
|
|
|
|
if v == 'v6' and not socket.has_ipv6:
|
|
|
|
continue
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(command[v])
|
2014-03-16 20:02:37 +00:00
|
|
|
if not out:
|
|
|
|
# v6 routing may result in
|
|
|
|
# RTNETLINK answers: Invalid argument
|
|
|
|
continue
|
|
|
|
lines = out.split('\n')
|
|
|
|
for line in lines:
|
|
|
|
words = line.split()
|
|
|
|
# Collect output from route command
|
|
|
|
if len(words) > 1:
|
|
|
|
if words[0] == 'interface:':
|
|
|
|
interface[v]['interface'] = words[1]
|
|
|
|
if words[0] == 'gateway:':
|
|
|
|
interface[v]['gateway'] = words[1]
|
|
|
|
|
|
|
|
return interface['v4'], interface['v6']
|
|
|
|
|
2015-07-02 12:36:56 +00:00
|
|
|
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
|
2014-03-16 20:02:37 +00:00
|
|
|
interfaces = {}
|
|
|
|
current_if = {}
|
|
|
|
ips = dict(
|
|
|
|
all_ipv4_addresses = [],
|
|
|
|
all_ipv6_addresses = [],
|
|
|
|
)
|
|
|
|
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
|
|
|
|
# when running the command 'ifconfig'.
|
|
|
|
# Solaris must explicitly run the command 'ifconfig -a'.
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
for line in out.split('\n'):
|
|
|
|
|
|
|
|
if line:
|
|
|
|
words = line.split()
|
|
|
|
|
2014-04-21 17:44:47 +00:00
|
|
|
if words[0] == 'pass':
|
|
|
|
continue
|
|
|
|
elif re.match('^\S', line) and len(words) > 3:
|
2014-03-16 20:02:37 +00:00
|
|
|
current_if = self.parse_interface_line(words)
|
|
|
|
interfaces[ current_if['device'] ] = current_if
|
|
|
|
elif words[0].startswith('options='):
|
|
|
|
self.parse_options_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'nd6':
|
|
|
|
self.parse_nd6_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'ether':
|
|
|
|
self.parse_ether_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'media:':
|
|
|
|
self.parse_media_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'status:':
|
|
|
|
self.parse_status_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'lladdr':
|
|
|
|
self.parse_lladdr_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'inet':
|
|
|
|
self.parse_inet_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'inet6':
|
|
|
|
self.parse_inet6_line(words, current_if, ips)
|
|
|
|
else:
|
|
|
|
self.parse_unknown_line(words, current_if, ips)
|
|
|
|
|
|
|
|
return interfaces, ips
|
|
|
|
|
|
|
|
def parse_interface_line(self, words):
|
|
|
|
device = words[0][0:-1]
|
|
|
|
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
|
2014-04-20 02:03:30 +00:00
|
|
|
current_if['flags'] = self.get_options(words[1])
|
2014-03-16 20:02:37 +00:00
|
|
|
current_if['macaddress'] = 'unknown' # will be overwritten later
|
2014-04-22 01:00:58 +00:00
|
|
|
|
|
|
|
if len(words) >= 5 : # Newer FreeBSD versions
|
|
|
|
current_if['metric'] = words[3]
|
|
|
|
current_if['mtu'] = words[5]
|
|
|
|
else:
|
|
|
|
current_if['mtu'] = words[3]
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
return current_if
|
|
|
|
|
|
|
|
def parse_options_line(self, words, current_if, ips):
|
|
|
|
# Mac has options like this...
|
|
|
|
current_if['options'] = self.get_options(words[0])
|
|
|
|
|
|
|
|
def parse_nd6_line(self, words, current_if, ips):
|
2015-04-28 13:36:42 +00:00
|
|
|
# FreeBSD has options like this...
|
2014-03-16 20:02:37 +00:00
|
|
|
current_if['options'] = self.get_options(words[1])
|
|
|
|
|
|
|
|
def parse_ether_line(self, words, current_if, ips):
|
|
|
|
current_if['macaddress'] = words[1]
|
|
|
|
|
|
|
|
def parse_media_line(self, words, current_if, ips):
|
|
|
|
# not sure if this is useful - we also drop information
|
|
|
|
current_if['media'] = words[1]
|
|
|
|
if len(words) > 2:
|
|
|
|
current_if['media_select'] = words[2]
|
|
|
|
if len(words) > 3:
|
|
|
|
current_if['media_type'] = words[3][1:]
|
|
|
|
if len(words) > 4:
|
|
|
|
current_if['media_options'] = self.get_options(words[4])
|
|
|
|
|
|
|
|
def parse_status_line(self, words, current_if, ips):
|
|
|
|
current_if['status'] = words[1]
|
|
|
|
|
|
|
|
def parse_lladdr_line(self, words, current_if, ips):
|
|
|
|
current_if['lladdr'] = words[1]
|
|
|
|
|
|
|
|
def parse_inet_line(self, words, current_if, ips):
|
|
|
|
address = {'address': words[1]}
|
|
|
|
# deal with hex netmask
|
|
|
|
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
|
|
|
|
words[3] = '0x' + words[3]
|
|
|
|
if words[3].startswith('0x'):
|
|
|
|
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
|
|
|
|
else:
|
|
|
|
# otherwise assume this is a dotted quad
|
|
|
|
address['netmask'] = words[3]
|
|
|
|
# calculate the network
|
|
|
|
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
|
|
|
|
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
|
|
|
|
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
|
|
|
|
# broadcast may be given or we need to calculate
|
|
|
|
if len(words) > 5:
|
|
|
|
address['broadcast'] = words[5]
|
|
|
|
else:
|
|
|
|
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
|
|
|
|
# add to our list of addresses
|
|
|
|
if not words[1].startswith('127.'):
|
|
|
|
ips['all_ipv4_addresses'].append(address['address'])
|
|
|
|
current_if['ipv4'].append(address)
|
|
|
|
|
|
|
|
def parse_inet6_line(self, words, current_if, ips):
|
|
|
|
address = {'address': words[1]}
|
|
|
|
if (len(words) >= 4) and (words[2] == 'prefixlen'):
|
|
|
|
address['prefix'] = words[3]
|
|
|
|
if (len(words) >= 6) and (words[4] == 'scopeid'):
|
|
|
|
address['scope'] = words[5]
|
|
|
|
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
|
|
|
|
if address['address'] not in localhost6:
|
|
|
|
ips['all_ipv6_addresses'].append(address['address'])
|
|
|
|
current_if['ipv6'].append(address)
|
|
|
|
|
|
|
|
def parse_unknown_line(self, words, current_if, ips):
|
|
|
|
# we are going to ignore unknown lines here - this may be
|
|
|
|
# a bad idea - but you can override it in your subclass
|
|
|
|
pass
|
|
|
|
|
|
|
|
def get_options(self, option_string):
|
|
|
|
start = option_string.find('<') + 1
|
|
|
|
end = option_string.rfind('>')
|
|
|
|
if (start > 0) and (end > 0) and (end > start + 1):
|
|
|
|
option_csv = option_string[start:end]
|
|
|
|
return option_csv.split(',')
|
|
|
|
else:
|
|
|
|
return []
|
|
|
|
|
|
|
|
def merge_default_interface(self, defaults, interfaces, ip_type):
|
|
|
|
if not 'interface' in defaults.keys():
|
|
|
|
return
|
|
|
|
if not defaults['interface'] in interfaces:
|
|
|
|
return
|
|
|
|
ifinfo = interfaces[defaults['interface']]
|
|
|
|
# copy all the interface values across except addresses
|
|
|
|
for item in ifinfo.keys():
|
|
|
|
if item != 'ipv4' and item != 'ipv6':
|
|
|
|
defaults[item] = ifinfo[item]
|
|
|
|
if len(ifinfo[ip_type]) > 0:
|
|
|
|
for item in ifinfo[ip_type][0].keys():
|
|
|
|
defaults[item] = ifinfo[ip_type][0][item]
|
|
|
|
|
2015-07-20 14:26:49 +00:00
|
|
|
class HPUXNetwork(Network):
|
2015-02-10 16:33:29 +00:00
|
|
|
"""
|
|
|
|
HP-UX-specifig subclass of Network. Defines networking facts:
|
|
|
|
- default_interface
|
|
|
|
- interfaces (a list of interface names)
|
|
|
|
- interface_<name> dictionary of ipv4 address information.
|
|
|
|
"""
|
|
|
|
platform = 'HP-UX'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
netstat_path = self.module.get_bin_path('netstat')
|
|
|
|
if netstat_path is None:
|
|
|
|
return self.facts
|
|
|
|
self.get_default_interfaces()
|
|
|
|
interfaces = self.get_interfaces_info()
|
|
|
|
self.facts['interfaces'] = interfaces.keys()
|
|
|
|
for iface in interfaces:
|
|
|
|
self.facts[iface] = interfaces[iface]
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_default_interfaces(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
|
2015-02-10 16:33:29 +00:00
|
|
|
lines = out.split('\n')
|
|
|
|
for line in lines:
|
|
|
|
words = line.split()
|
|
|
|
if len(words) > 1:
|
|
|
|
if words[0] == 'default':
|
|
|
|
self.facts['default_interface'] = words[4]
|
|
|
|
self.facts['default_gateway'] = words[1]
|
|
|
|
|
|
|
|
def get_interfaces_info(self):
|
|
|
|
interfaces = {}
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/bin/netstat -ni")
|
2015-02-10 16:33:29 +00:00
|
|
|
lines = out.split('\n')
|
|
|
|
for line in lines:
|
|
|
|
words = line.split()
|
|
|
|
for i in range(len(words) - 1):
|
|
|
|
if words[i][:3] == 'lan':
|
|
|
|
device = words[i]
|
|
|
|
interfaces[device] = { 'device': device }
|
|
|
|
address = words[i+3]
|
|
|
|
interfaces[device]['ipv4'] = { 'address': address }
|
|
|
|
network = words[i+2]
|
|
|
|
interfaces[device]['ipv4'] = { 'network': network,
|
|
|
|
'interface': device,
|
|
|
|
'address': address }
|
|
|
|
return interfaces
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
class DarwinNetwork(GenericBsdIfconfigNetwork):
|
2014-03-16 20:02:37 +00:00
|
|
|
"""
|
|
|
|
This is the Mac OS X/Darwin Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork unchanged
|
|
|
|
"""
|
|
|
|
platform = 'Darwin'
|
|
|
|
|
|
|
|
# media line is different to the default FreeBSD one
|
|
|
|
def parse_media_line(self, words, current_if, ips):
|
|
|
|
# not sure if this is useful - we also drop information
|
|
|
|
current_if['media'] = 'Unknown' # Mac does not give us this
|
|
|
|
current_if['media_select'] = words[1]
|
|
|
|
if len(words) > 2:
|
2015-06-01 17:23:28 +00:00
|
|
|
# MacOSX sets the media to '<unknown type>' for bridge interface
|
|
|
|
# and parsing splits this into two words; this if/else helps
|
|
|
|
if words[1] == '<unknown' and words[2] == 'type>':
|
|
|
|
current_if['media_select'] = 'Unknown'
|
|
|
|
current_if['media_type'] = 'unknown type'
|
|
|
|
else:
|
|
|
|
current_if['media_type'] = words[2][1:-1]
|
2014-03-16 20:02:37 +00:00
|
|
|
if len(words) > 3:
|
|
|
|
current_if['media_options'] = self.get_options(words[3])
|
|
|
|
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
class FreeBSDNetwork(GenericBsdIfconfigNetwork):
|
2014-03-16 20:02:37 +00:00
|
|
|
"""
|
|
|
|
This is the FreeBSD Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork unchanged.
|
|
|
|
"""
|
|
|
|
platform = 'FreeBSD'
|
|
|
|
|
2016-08-18 00:58:51 +00:00
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
class DragonFlyNetwork(GenericBsdIfconfigNetwork):
|
2015-08-30 17:04:30 +00:00
|
|
|
"""
|
|
|
|
This is the DragonFly Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork unchanged.
|
|
|
|
"""
|
|
|
|
platform = 'DragonFly'
|
|
|
|
|
2016-08-18 00:58:51 +00:00
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
class AIXNetwork(GenericBsdIfconfigNetwork):
|
2014-03-16 20:02:37 +00:00
|
|
|
"""
|
|
|
|
This is the AIX Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork unchanged.
|
|
|
|
"""
|
|
|
|
platform = 'AIX'
|
|
|
|
|
2014-09-30 18:55:50 +00:00
|
|
|
def get_default_interfaces(self, route_path):
|
2016-03-14 16:45:28 +00:00
|
|
|
netstat_path = self.module.get_bin_path('netstat')
|
2014-09-30 18:55:50 +00:00
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([netstat_path, '-nr'])
|
2014-09-30 18:55:50 +00:00
|
|
|
|
|
|
|
interface = dict(v4 = {}, v6 = {})
|
|
|
|
|
|
|
|
lines = out.split('\n')
|
|
|
|
for line in lines:
|
|
|
|
words = line.split()
|
|
|
|
if len(words) > 1 and words[0] == 'default':
|
|
|
|
if '.' in words[1]:
|
|
|
|
interface['v4']['gateway'] = words[1]
|
|
|
|
interface['v4']['interface'] = words[5]
|
|
|
|
elif ':' in words[1]:
|
|
|
|
interface['v6']['gateway'] = words[1]
|
|
|
|
interface['v6']['interface'] = words[5]
|
|
|
|
|
|
|
|
return interface['v4'], interface['v6']
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
# AIX 'ifconfig -a' does not have three words in the interface line
|
2015-09-18 14:43:53 +00:00
|
|
|
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
|
2014-03-16 20:02:37 +00:00
|
|
|
interfaces = {}
|
|
|
|
current_if = {}
|
|
|
|
ips = dict(
|
|
|
|
all_ipv4_addresses = [],
|
|
|
|
all_ipv6_addresses = [],
|
|
|
|
)
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
for line in out.split('\n'):
|
|
|
|
|
|
|
|
if line:
|
|
|
|
words = line.split()
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
# only this condition differs from GenericBsdIfconfigNetwork
|
2014-03-16 20:02:37 +00:00
|
|
|
if re.match('^\w*\d*:', line):
|
|
|
|
current_if = self.parse_interface_line(words)
|
|
|
|
interfaces[ current_if['device'] ] = current_if
|
|
|
|
elif words[0].startswith('options='):
|
|
|
|
self.parse_options_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'nd6':
|
|
|
|
self.parse_nd6_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'ether':
|
|
|
|
self.parse_ether_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'media:':
|
|
|
|
self.parse_media_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'status:':
|
|
|
|
self.parse_status_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'lladdr':
|
|
|
|
self.parse_lladdr_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'inet':
|
|
|
|
self.parse_inet_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'inet6':
|
|
|
|
self.parse_inet6_line(words, current_if, ips)
|
|
|
|
else:
|
|
|
|
self.parse_unknown_line(words, current_if, ips)
|
2016-03-14 16:45:28 +00:00
|
|
|
uname_path = self.module.get_bin_path('uname')
|
2015-03-10 18:44:39 +00:00
|
|
|
if uname_path:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([uname_path, '-W'])
|
2015-03-10 18:44:39 +00:00
|
|
|
# don't bother with wpars it does not work
|
|
|
|
# zero means not in wpar
|
2015-06-16 22:17:52 +00:00
|
|
|
if not rc and out.split()[0] == '0':
|
2015-03-10 18:44:39 +00:00
|
|
|
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
|
2016-03-14 16:45:28 +00:00
|
|
|
entstat_path = self.module.get_bin_path('entstat')
|
2015-03-10 18:44:39 +00:00
|
|
|
if entstat_path:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([entstat_path, current_if['device'] ])
|
2015-03-10 18:44:39 +00:00
|
|
|
if rc != 0:
|
|
|
|
break
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if not line:
|
|
|
|
pass
|
|
|
|
buff = re.match('^Hardware Address: (.*)', line)
|
|
|
|
if buff:
|
|
|
|
current_if['macaddress'] = buff.group(1)
|
|
|
|
|
|
|
|
buff = re.match('^Device Type:', line)
|
|
|
|
if buff and re.match('.*Ethernet', line):
|
|
|
|
current_if['type'] = 'ether'
|
|
|
|
# device must have mtu attribute in ODM
|
|
|
|
if 'mtu' not in current_if:
|
2016-03-14 16:45:28 +00:00
|
|
|
lsattr_path = self.module.get_bin_path('lsattr')
|
2015-03-10 18:44:39 +00:00
|
|
|
if lsattr_path:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([lsattr_path,'-El', current_if['device'] ])
|
2015-03-10 18:44:39 +00:00
|
|
|
if rc != 0:
|
|
|
|
break
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if line:
|
|
|
|
words = line.split()
|
|
|
|
if words[0] == 'mtu':
|
|
|
|
current_if['mtu'] = words[1]
|
2014-03-16 20:02:37 +00:00
|
|
|
return interfaces, ips
|
|
|
|
|
|
|
|
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
|
|
|
|
def parse_interface_line(self, words):
|
|
|
|
device = words[0][0:-1]
|
|
|
|
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
|
|
|
|
current_if['flags'] = self.get_options(words[1])
|
|
|
|
current_if['macaddress'] = 'unknown' # will be overwritten later
|
|
|
|
return current_if
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
class OpenBSDNetwork(GenericBsdIfconfigNetwork):
|
2014-03-16 20:02:37 +00:00
|
|
|
"""
|
|
|
|
This is the OpenBSD Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork.
|
|
|
|
"""
|
|
|
|
platform = 'OpenBSD'
|
|
|
|
|
2015-07-02 12:36:56 +00:00
|
|
|
# OpenBSD 'ifconfig -a' does not have information about aliases
|
|
|
|
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
|
|
|
|
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
# Return macaddress instead of lladdr
|
|
|
|
def parse_lladdr_line(self, words, current_if, ips):
|
|
|
|
current_if['macaddress'] = words[1]
|
|
|
|
|
2016-03-14 16:45:28 +00:00
|
|
|
class SunOSNetwork(GenericBsdIfconfigNetwork):
|
2014-03-16 20:02:37 +00:00
|
|
|
"""
|
|
|
|
This is the SunOS Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork.
|
|
|
|
|
|
|
|
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
|
|
|
|
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
|
|
|
|
"""
|
|
|
|
platform = 'SunOS'
|
|
|
|
|
|
|
|
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
|
|
|
|
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
|
|
|
|
# 'parse_interface_line()' checks for previously seen interfaces before defining
|
|
|
|
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
|
|
|
|
def get_interfaces_info(self, ifconfig_path):
|
|
|
|
interfaces = {}
|
|
|
|
current_if = {}
|
|
|
|
ips = dict(
|
|
|
|
all_ipv4_addresses = [],
|
|
|
|
all_ipv6_addresses = [],
|
|
|
|
)
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command([ifconfig_path, '-a'])
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
for line in out.split('\n'):
|
|
|
|
|
|
|
|
if line:
|
|
|
|
words = line.split()
|
|
|
|
|
|
|
|
if re.match('^\S', line) and len(words) > 3:
|
|
|
|
current_if = self.parse_interface_line(words, current_if, interfaces)
|
|
|
|
interfaces[ current_if['device'] ] = current_if
|
|
|
|
elif words[0].startswith('options='):
|
|
|
|
self.parse_options_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'nd6':
|
|
|
|
self.parse_nd6_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'ether':
|
|
|
|
self.parse_ether_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'media:':
|
|
|
|
self.parse_media_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'status:':
|
|
|
|
self.parse_status_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'lladdr':
|
|
|
|
self.parse_lladdr_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'inet':
|
|
|
|
self.parse_inet_line(words, current_if, ips)
|
|
|
|
elif words[0] == 'inet6':
|
|
|
|
self.parse_inet6_line(words, current_if, ips)
|
|
|
|
else:
|
|
|
|
self.parse_unknown_line(words, current_if, ips)
|
|
|
|
|
|
|
|
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
|
|
|
|
# ipv4/ipv6 lists which is ugly and hard to read.
|
|
|
|
# This quick hack merges the dictionaries. Purely cosmetic.
|
|
|
|
for iface in interfaces:
|
|
|
|
for v in 'ipv4', 'ipv6':
|
|
|
|
combined_facts = {}
|
|
|
|
for facts in interfaces[iface][v]:
|
|
|
|
combined_facts.update(facts)
|
|
|
|
if len(combined_facts.keys()) > 0:
|
|
|
|
interfaces[iface][v] = [combined_facts]
|
|
|
|
|
|
|
|
return interfaces, ips
|
|
|
|
|
|
|
|
def parse_interface_line(self, words, current_if, interfaces):
|
|
|
|
device = words[0][0:-1]
|
|
|
|
if device not in interfaces.keys():
|
|
|
|
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
|
|
|
|
else:
|
|
|
|
current_if = interfaces[device]
|
|
|
|
flags = self.get_options(words[1])
|
2014-07-30 20:06:50 +00:00
|
|
|
v = 'ipv4'
|
2014-03-16 20:02:37 +00:00
|
|
|
if 'IPv6' in flags:
|
|
|
|
v = 'ipv6'
|
|
|
|
current_if[v].append({'flags': flags, 'mtu': words[3]})
|
|
|
|
current_if['macaddress'] = 'unknown' # will be overwritten later
|
|
|
|
return current_if
|
|
|
|
|
|
|
|
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
|
|
|
|
# Add leading zero to each octet where needed.
|
|
|
|
def parse_ether_line(self, words, current_if, ips):
|
|
|
|
macaddress = ''
|
|
|
|
for octet in words[1].split(':'):
|
|
|
|
octet = ('0' + octet)[-2:None]
|
|
|
|
macaddress += (octet + ':')
|
|
|
|
current_if['macaddress'] = macaddress[0:-1]
|
|
|
|
|
|
|
|
class Virtual(Facts):
|
|
|
|
"""
|
|
|
|
This is a generic Virtual subclass of Facts. This should be further
|
|
|
|
subclassed to implement per platform. If you subclass this,
|
|
|
|
you should define:
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
- container (e.g. solaris zones, freebsd jails, linux containers)
|
|
|
|
|
|
|
|
All subclasses MUST define platform.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __new__(cls, *arguments, **keyword):
|
2016-08-11 17:26:17 +00:00
|
|
|
# When Virtual is created, it chooses a subclass to create instead.
|
|
|
|
# This check prevents the subclass from then trying to find a subclass
|
|
|
|
# and create that.
|
|
|
|
if cls is not Virtual:
|
|
|
|
return super(Virtual, cls).__new__(cls)
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
subclass = cls
|
2016-04-23 07:14:20 +00:00
|
|
|
for sc in get_all_subclasses(Virtual):
|
2014-03-16 20:02:37 +00:00
|
|
|
if sc.platform == platform.system():
|
|
|
|
subclass = sc
|
2016-08-18 00:58:51 +00:00
|
|
|
|
|
|
|
if PY3:
|
|
|
|
return super(cls, subclass).__new__(subclass)
|
|
|
|
else:
|
|
|
|
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
class LinuxVirtual(Virtual):
|
|
|
|
"""
|
|
|
|
This is a Linux-specific subclass of Virtual. It defines
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
"""
|
|
|
|
platform = 'Linux'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_virtual_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
# For more information, check: http://people.redhat.com/~rjones/virt-what/
|
|
|
|
def get_virtual_facts(self):
|
2016-08-15 13:15:01 +00:00
|
|
|
# old lxc/docker
|
2015-09-30 15:08:21 +00:00
|
|
|
if os.path.exists('/proc/1/cgroup'):
|
|
|
|
for line in get_file_lines('/proc/1/cgroup'):
|
|
|
|
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
|
|
|
|
self.facts['virtualization_type'] = 'docker'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
if re.search('/lxc/', line):
|
|
|
|
self.facts['virtualization_type'] = 'lxc'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
2016-08-15 13:15:01 +00:00
|
|
|
# newer lxc does not appear in cgroups anymore but sets 'container=lxc' environment var
|
|
|
|
if os.path.exists('/proc/1/environ'):
|
|
|
|
for line in get_file_lines('/proc/1/environ'):
|
|
|
|
if re.search('container=lxc', line):
|
|
|
|
self.facts['virtualization_type'] = 'lxc'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
if os.path.exists('/proc/vz'):
|
|
|
|
self.facts['virtualization_type'] = 'openvz'
|
|
|
|
if os.path.exists('/proc/bc'):
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
|
|
|
else:
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
2015-04-08 17:57:56 +00:00
|
|
|
systemd_container = get_file_content('/run/systemd/container')
|
|
|
|
if systemd_container:
|
|
|
|
self.facts['virtualization_type'] = systemd_container
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
2015-09-30 15:27:50 +00:00
|
|
|
if os.path.exists("/proc/xen"):
|
|
|
|
self.facts['virtualization_type'] = 'xen'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
try:
|
|
|
|
for line in get_file_lines('/proc/xen/capabilities'):
|
|
|
|
if "control_d" in line:
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
return
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
|
|
|
|
|
|
|
|
if product_name in ['KVM', 'Bochs']:
|
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
if product_name == 'RHEV Hypervisor':
|
|
|
|
self.facts['virtualization_type'] = 'RHEV'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
if product_name == 'VMware Virtual Platform':
|
|
|
|
self.facts['virtualization_type'] = 'VMware'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
|
|
|
|
|
|
|
|
if bios_vendor == 'Xen':
|
|
|
|
self.facts['virtualization_type'] = 'xen'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
if bios_vendor == 'innotek GmbH':
|
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
|
|
|
|
|
|
|
|
# FIXME: This does also match hyperv
|
|
|
|
if sys_vendor == 'Microsoft Corporation':
|
|
|
|
self.facts['virtualization_type'] = 'VirtualPC'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
if sys_vendor == 'Parallels Software International Inc.':
|
|
|
|
self.facts['virtualization_type'] = 'parallels'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
2014-10-19 20:55:50 +00:00
|
|
|
if sys_vendor == 'QEMU':
|
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2014-03-16 20:02:37 +00:00
|
|
|
return
|
|
|
|
|
2015-04-06 20:47:52 +00:00
|
|
|
if sys_vendor == 'oVirt':
|
2015-03-07 15:17:41 +00:00
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
2014-03-16 20:02:37 +00:00
|
|
|
if os.path.exists('/proc/self/status'):
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines('/proc/self/status'):
|
2014-03-16 20:02:37 +00:00
|
|
|
if re.match('^VxID: \d+', line):
|
|
|
|
self.facts['virtualization_type'] = 'linux_vserver'
|
|
|
|
if re.match('^VxID: 0', line):
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
|
|
|
else:
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
if os.path.exists('/proc/cpuinfo'):
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines('/proc/cpuinfo'):
|
2014-03-16 20:02:37 +00:00
|
|
|
if re.match('^model name.*QEMU Virtual CPU', line):
|
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
elif re.match('^vendor_id.*User Mode Linux', line):
|
|
|
|
self.facts['virtualization_type'] = 'uml'
|
|
|
|
elif re.match('^model name.*UML', line):
|
|
|
|
self.facts['virtualization_type'] = 'uml'
|
|
|
|
elif re.match('^vendor_id.*PowerVM Lx86', line):
|
|
|
|
self.facts['virtualization_type'] = 'powervm_lx86'
|
|
|
|
elif re.match('^vendor_id.*IBM/S390', line):
|
2014-03-31 13:30:12 +00:00
|
|
|
self.facts['virtualization_type'] = 'PR/SM'
|
2016-03-14 16:45:28 +00:00
|
|
|
lscpu = self.module.get_bin_path('lscpu')
|
2014-03-31 13:30:12 +00:00
|
|
|
if lscpu:
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command(["lscpu"])
|
2014-03-31 13:30:12 +00:00
|
|
|
if rc == 0:
|
|
|
|
for line in out.split("\n"):
|
|
|
|
data = line.split(":", 1)
|
|
|
|
key = data[0].strip()
|
|
|
|
if key == 'Hypervisor':
|
|
|
|
self.facts['virtualization_type'] = data[1].strip()
|
|
|
|
else:
|
|
|
|
self.facts['virtualization_type'] = 'ibm_systemz'
|
2014-03-16 20:02:37 +00:00
|
|
|
else:
|
|
|
|
continue
|
2014-03-31 13:30:12 +00:00
|
|
|
if self.facts['virtualization_type'] == 'PR/SM':
|
|
|
|
self.facts['virtualization_role'] = 'LPAR'
|
|
|
|
else:
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2014-03-16 20:02:37 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# Beware that we can have both kvm and virtualbox running on a single system
|
|
|
|
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
|
|
|
|
modules = []
|
2015-02-09 22:30:06 +00:00
|
|
|
for line in get_file_lines("/proc/modules"):
|
2014-03-16 20:02:37 +00:00
|
|
|
data = line.split(" ", 1)
|
|
|
|
modules.append(data[0])
|
|
|
|
|
|
|
|
if 'kvm' in modules:
|
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
|
|
|
return
|
|
|
|
|
|
|
|
if 'vboxdrv' in modules:
|
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
|
|
|
return
|
|
|
|
|
|
|
|
# If none of the above matches, return 'NA' for virtualization_type
|
|
|
|
# and virtualization_role. This allows for proper grouping.
|
|
|
|
self.facts['virtualization_type'] = 'NA'
|
|
|
|
self.facts['virtualization_role'] = 'NA'
|
|
|
|
return
|
|
|
|
|
2015-05-06 20:47:53 +00:00
|
|
|
class FreeBSDVirtual(Virtual):
|
|
|
|
"""
|
|
|
|
This is a FreeBSD-specific subclass of Virtual. It defines
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
"""
|
|
|
|
platform = 'FreeBSD'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_virtual_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_virtual_facts(self):
|
|
|
|
self.facts['virtualization_type'] = ''
|
|
|
|
self.facts['virtualization_role'] = ''
|
|
|
|
|
2015-08-30 17:04:30 +00:00
|
|
|
class DragonFlyVirtual(FreeBSDVirtual):
|
2016-08-18 00:58:51 +00:00
|
|
|
platform = 'DragonFly'
|
2015-08-30 17:04:30 +00:00
|
|
|
|
2015-05-06 20:47:53 +00:00
|
|
|
class OpenBSDVirtual(Virtual):
|
|
|
|
"""
|
|
|
|
This is a OpenBSD-specific subclass of Virtual. It defines
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
"""
|
|
|
|
platform = 'OpenBSD'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_virtual_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_virtual_facts(self):
|
|
|
|
self.facts['virtualization_type'] = ''
|
|
|
|
self.facts['virtualization_role'] = ''
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
class HPUXVirtual(Virtual):
|
|
|
|
"""
|
|
|
|
This is a HP-UX specific subclass of Virtual. It defines
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
"""
|
|
|
|
platform = 'HP-UX'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_virtual_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_virtual_facts(self):
|
|
|
|
if os.path.exists('/usr/sbin/vecheck'):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/vecheck")
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0:
|
|
|
|
self.facts['virtualization_type'] = 'guest'
|
|
|
|
self.facts['virtualization_role'] = 'HP vPar'
|
|
|
|
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
|
|
|
|
self.facts['virtualization_type'] = 'guest'
|
|
|
|
self.facts['virtualization_role'] = 'HPVM vPar'
|
|
|
|
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
|
|
|
|
self.facts['virtualization_type'] = 'guest'
|
|
|
|
self.facts['virtualization_role'] = 'HPVM IVM'
|
|
|
|
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
|
|
|
|
self.facts['virtualization_type'] = 'host'
|
|
|
|
self.facts['virtualization_role'] = 'HPVM'
|
|
|
|
if os.path.exists('/usr/sbin/parstatus'):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/parstatus")
|
2014-03-16 20:02:37 +00:00
|
|
|
if rc == 0:
|
|
|
|
self.facts['virtualization_type'] = 'guest'
|
|
|
|
self.facts['virtualization_role'] = 'HP nPar'
|
|
|
|
|
|
|
|
|
|
|
|
class SunOSVirtual(Virtual):
|
|
|
|
"""
|
|
|
|
This is a SunOS-specific subclass of Virtual. It defines
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
- container
|
|
|
|
"""
|
|
|
|
platform = 'SunOS'
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_virtual_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_virtual_facts(self):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/prtdiag")
|
2014-03-16 20:02:37 +00:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if 'VMware' in line:
|
|
|
|
self.facts['virtualization_type'] = 'vmware'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'Parallels' in line:
|
|
|
|
self.facts['virtualization_type'] = 'parallels'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'VirtualBox' in line:
|
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'HVM domU' in line:
|
|
|
|
self.facts['virtualization_type'] = 'xen'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
# Check if it's a zone
|
|
|
|
if os.path.exists("/usr/bin/zonename"):
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/bin/zonename")
|
2014-03-16 20:02:37 +00:00
|
|
|
if out.rstrip() != "global":
|
|
|
|
self.facts['container'] = 'zone'
|
|
|
|
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
|
|
|
|
if os.path.isdir('/.SUNWnative'):
|
|
|
|
self.facts['container'] = 'zone'
|
|
|
|
# If it's a zone check if we can detect if our global zone is itself virtualized.
|
|
|
|
# Relies on the "guest tools" (e.g. vmware tools) to be installed
|
|
|
|
if 'container' in self.facts and self.facts['container'] == 'zone':
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/modinfo")
|
2014-03-16 20:02:37 +00:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if 'VMware' in line:
|
|
|
|
self.facts['virtualization_type'] = 'vmware'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'VirtualBox' in line:
|
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2015-02-20 09:11:06 +00:00
|
|
|
# Detect domaining on Sparc hardware
|
|
|
|
if os.path.exists("/usr/sbin/virtinfo"):
|
|
|
|
# The output of virtinfo is different whether we are on a machine with logical
|
|
|
|
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
|
2016-03-14 16:45:28 +00:00
|
|
|
rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
|
2015-02-20 09:11:06 +00:00
|
|
|
# The output contains multiple lines with different keys like this:
|
|
|
|
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
|
2015-04-28 13:36:42 +00:00
|
|
|
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
|
2015-02-20 09:11:06 +00:00
|
|
|
# virtinfo can only be run from the global zone
|
2015-02-20 12:57:02 +00:00
|
|
|
try:
|
|
|
|
for line in out.split('\n'):
|
|
|
|
fields = line.split('|')
|
|
|
|
if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
|
|
|
|
self.facts['virtualization_type'] = 'ldom'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
hostfeatures = []
|
|
|
|
for field in fields[2:]:
|
|
|
|
arg = field.split('=')
|
|
|
|
if( arg[1] == 'true' ):
|
|
|
|
hostfeatures.append(arg[0])
|
|
|
|
if( len(hostfeatures) > 0 ):
|
|
|
|
self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
|
2015-11-03 17:51:21 +00:00
|
|
|
except ValueError:
|
2015-02-20 12:57:02 +00:00
|
|
|
pass
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
class Ohai(Facts):
|
|
|
|
"""
|
|
|
|
This is a subclass of Facts for including information gathered from Ohai.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.run_ohai()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def run_ohai(self):
|
|
|
|
ohai_path = self.module.get_bin_path('ohai')
|
|
|
|
if ohai_path is None:
|
|
|
|
return
|
|
|
|
rc, out, err = self.module.run_command(ohai_path)
|
|
|
|
try:
|
|
|
|
self.facts.update(json.loads(out))
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
class Facter(Facts):
|
|
|
|
"""
|
|
|
|
This is a subclass of Facts for including information gathered from Facter.
|
|
|
|
"""
|
|
|
|
def populate(self):
|
|
|
|
self.run_facter()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def run_facter(self):
|
|
|
|
facter_path = self.module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
|
|
|
|
cfacter_path = self.module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
|
|
|
|
# Prefer to use cfacter if available
|
|
|
|
if cfacter_path is not None:
|
|
|
|
facter_path = cfacter_path
|
|
|
|
|
|
|
|
if facter_path is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
# if facter is installed, and we can use --json because
|
|
|
|
# ruby-json is ALSO installed, include facter data in the JSON
|
|
|
|
rc, out, err = self.module.run_command(facter_path + " --puppet --json")
|
|
|
|
try:
|
|
|
|
self.facts = json.loads(out)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2015-02-09 22:30:06 +00:00
|
|
|
def get_file_content(path, default=None, strip=True):
|
2014-03-16 20:02:37 +00:00
|
|
|
data = default
|
|
|
|
if os.path.exists(path) and os.access(path, os.R_OK):
|
2015-02-09 21:06:33 +00:00
|
|
|
try:
|
2016-01-19 13:31:10 +00:00
|
|
|
try:
|
|
|
|
datafile = open(path)
|
|
|
|
data = datafile.read()
|
|
|
|
if strip:
|
|
|
|
data = data.strip()
|
|
|
|
if len(data) == 0:
|
|
|
|
data = default
|
|
|
|
finally:
|
|
|
|
datafile.close()
|
2016-01-18 22:58:45 +00:00
|
|
|
except:
|
2016-01-19 13:31:10 +00:00
|
|
|
# ignore errors as some jails/containers might have readable permissions but not allow reads to proc
|
|
|
|
# done in 2 blocks for 2.4 compat
|
2016-01-18 22:58:45 +00:00
|
|
|
pass
|
2014-03-16 20:02:37 +00:00
|
|
|
return data
|
|
|
|
|
2016-06-07 19:12:37 +00:00
|
|
|
def get_uname_version(module):
|
|
|
|
rc, out, err = module.run_command(['uname', '-v'])
|
|
|
|
if rc == 0:
|
|
|
|
return out
|
|
|
|
return None
|
|
|
|
|
2016-08-08 16:23:19 +00:00
|
|
|
def get_partition_uuid(partname):
|
|
|
|
try:
|
|
|
|
uuids = os.listdir("/dev/disk/by-uuid")
|
|
|
|
except OSError:
|
|
|
|
return
|
|
|
|
|
|
|
|
for uuid in uuids:
|
|
|
|
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
|
|
|
|
if dev == ("/dev/" + partname):
|
|
|
|
return uuid
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2015-02-09 22:30:06 +00:00
|
|
|
def get_file_lines(path):
|
2015-11-10 21:22:08 +00:00
|
|
|
'''get list of lines from file'''
|
|
|
|
data = get_file_content(path)
|
|
|
|
if data:
|
|
|
|
ret = data.splitlines()
|
|
|
|
else:
|
|
|
|
ret = []
|
|
|
|
return ret
|
2015-02-09 22:30:06 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
def ansible_facts(module, gather_subset):
|
2014-03-16 20:02:37 +00:00
|
|
|
facts = {}
|
2016-03-14 21:50:27 +00:00
|
|
|
facts['gather_subset'] = list(gather_subset)
|
2016-03-14 16:45:28 +00:00
|
|
|
facts.update(Facts(module).populate())
|
|
|
|
for subset in gather_subset:
|
2016-05-20 02:34:19 +00:00
|
|
|
facts.update(FACT_SUBSETS[subset](module,
|
|
|
|
load_on_init=False,
|
|
|
|
cached_facts=facts).populate())
|
2014-03-16 20:02:37 +00:00
|
|
|
return facts
|
|
|
|
|
|
|
|
def get_all_facts(module):
|
|
|
|
|
|
|
|
setup_options = dict(module_setup=True)
|
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
# Retrieve module parameters
|
|
|
|
gather_subset = module.params['gather_subset']
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-07-08 21:46:41 +00:00
|
|
|
global GATHER_TIMEOUT
|
|
|
|
GATHER_TIMEOUT = module.params['gather_timeout']
|
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
# Retrieve all facts elements
|
|
|
|
additional_subsets = set()
|
|
|
|
exclude_subsets = set()
|
|
|
|
for subset in gather_subset:
|
|
|
|
if subset == 'all':
|
|
|
|
additional_subsets.update(VALID_SUBSETS)
|
|
|
|
continue
|
|
|
|
if subset.startswith('!'):
|
|
|
|
subset = subset[1:]
|
|
|
|
if subset == 'all':
|
|
|
|
exclude_subsets.update(VALID_SUBSETS)
|
|
|
|
continue
|
|
|
|
exclude = True
|
|
|
|
else:
|
|
|
|
exclude = False
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
if subset not in VALID_SUBSETS:
|
|
|
|
raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys())))
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
if exclude:
|
|
|
|
exclude_subsets.add(subset)
|
|
|
|
else:
|
|
|
|
additional_subsets.add(subset)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
if not additional_subsets:
|
|
|
|
additional_subsets.update(VALID_SUBSETS)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
additional_subsets.difference_update(exclude_subsets)
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
# facter and ohai are given a different prefix than other subsets
|
|
|
|
if 'facter' in additional_subsets:
|
|
|
|
additional_subsets.difference_update(('facter',))
|
|
|
|
facter_ds = FACT_SUBSETS['facter'](module, load_on_init=False).populate()
|
|
|
|
if facter_ds:
|
|
|
|
for (k, v) in facter_ds.items():
|
|
|
|
setup_options['facter_%s' % k.replace('-', '_')] = v
|
2014-03-16 20:02:37 +00:00
|
|
|
|
2016-03-14 21:50:27 +00:00
|
|
|
if 'ohai' in additional_subsets:
|
|
|
|
additional_subsets.difference_update(('ohai',))
|
|
|
|
ohai_ds = FACT_SUBSETS['ohai'](module, load_on_init=False).populate()
|
|
|
|
if ohai_ds:
|
|
|
|
for (k, v) in ohai_ds.items():
|
|
|
|
setup_options['ohai_%s' % k.replace('-', '_')] = v
|
|
|
|
|
|
|
|
facts = ansible_facts(module, additional_subsets)
|
|
|
|
|
|
|
|
for (k, v) in facts.items():
|
|
|
|
setup_options["ansible_%s" % k.replace('-', '_')] = v
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
setup_result = { 'ansible_facts': {} }
|
|
|
|
|
|
|
|
for (k,v) in setup_options.items():
|
|
|
|
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
|
|
|
|
setup_result['ansible_facts'][k] = v
|
|
|
|
|
|
|
|
# hack to keep --verbose from showing all the setup module results
|
2015-07-28 02:15:44 +00:00
|
|
|
setup_result['_ansible_verbose_override'] = True
|
2014-03-16 20:02:37 +00:00
|
|
|
|
|
|
|
return setup_result
|
2016-03-14 16:45:28 +00:00
|
|
|
|
|
|
|
# Allowed fact subset for gather_subset options and what classes they use
|
2016-03-14 21:50:27 +00:00
|
|
|
# Note: have to define this at the bottom as it references classes defined earlier in this file
|
2016-03-14 16:45:28 +00:00
|
|
|
FACT_SUBSETS = dict(
|
|
|
|
hardware=Hardware,
|
|
|
|
network=Network,
|
|
|
|
virtual=Virtual,
|
2016-03-14 21:50:27 +00:00
|
|
|
ohai=Ohai,
|
|
|
|
facter=Facter,
|
2016-03-14 16:45:28 +00:00
|
|
|
)
|
2016-03-14 21:50:27 +00:00
|
|
|
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
|