Add test for large output; fix indentation.

pull/4420/head
John Kleint 2012-05-11 11:38:15 -04:00
parent fba2bdcf0c
commit 83b9a43e60
1 changed files with 186 additions and 182 deletions

View File

@ -10,9 +10,9 @@ import os
import shutil import shutil
import time import time
try: try:
import json import json
except: except:
import simplejson as json import simplejson as json
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
@ -25,202 +25,206 @@ def get_binary(name):
class TestRunner(unittest.TestCase): class TestRunner(unittest.TestCase):
def setUp(self): def setUp(self):
self.user = getpass.getuser() self.user = getpass.getuser()
self.runner = ansible.runner.Runner( self.runner = ansible.runner.Runner(
module_name='ping', module_name='ping',
module_path='library/', module_path='library/',
module_args='', module_args='',
remote_user=self.user, remote_user=self.user,
remote_pass=None, remote_pass=None,
host_list='test/ansible_hosts', host_list='test/ansible_hosts',
timeout=5, timeout=5,
forks=1, forks=1,
background=0, background=0,
pattern='all', pattern='all',
) )
self.cwd = os.getcwd() self.cwd = os.getcwd()
self.test_dir = os.path.join(self.cwd, 'test') self.test_dir = os.path.join(self.cwd, 'test')
self.stage_dir = self._prepare_stage_dir() self.stage_dir = self._prepare_stage_dir()
def _prepare_stage_dir(self): def _prepare_stage_dir(self):
stage_path = os.path.join(self.test_dir, 'test_data') stage_path = os.path.join(self.test_dir, 'test_data')
if os.path.exists(stage_path): if os.path.exists(stage_path):
shutil.rmtree(stage_path, ignore_errors=False) shutil.rmtree(stage_path, ignore_errors=False)
assert not os.path.exists(stage_path) assert not os.path.exists(stage_path)
os.makedirs(stage_path) os.makedirs(stage_path)
assert os.path.exists(stage_path) assert os.path.exists(stage_path)
return stage_path return stage_path
def _get_test_file(self, filename): def _get_test_file(self, filename):
# get a file inside the test input directory # get a file inside the test input directory
filename = os.path.join(self.test_dir, filename) filename = os.path.join(self.test_dir, filename)
assert os.path.exists(filename) assert os.path.exists(filename)
return filename return filename
def _get_stage_file(self, filename): def _get_stage_file(self, filename):
# get a file inside the test output directory # get a file inside the test output directory
filename = os.path.join(self.stage_dir, filename) filename = os.path.join(self.stage_dir, filename)
return filename return filename
def _run(self, module_name, module_args, background=0): def _run(self, module_name, module_args, background=0):
''' run a module and get the localhost results ''' ''' run a module and get the localhost results '''
self.runner.module_name = module_name self.runner.module_name = module_name
args = ' '.join(module_args) args = ' '.join(module_args)
print "DEBUG: using args=%s" % args print "DEBUG: using args=%s" % args
self.runner.module_args = args self.runner.module_args = args
self.runner.background = background self.runner.background = background
results = self.runner.run() results = self.runner.run()
# when using nosetests this will only show up on failure # when using nosetests this will only show up on failure
# which is pretty useful # which is pretty useful
print "RESULTS=%s" % results print "RESULTS=%s" % results
assert "127.0.0.2" in results['contacted'] assert "127.0.0.2" in results['contacted']
return results['contacted']['127.0.0.2'] return results['contacted']['127.0.0.2']
def test_ping(self): def test_ping(self):
result = self._run('ping',[]) result = self._run('ping', [])
assert "ping" in result assert "ping" in result
def test_facter(self): def test_facter(self):
if not get_binary("facter"): if not get_binary("facter"):
raise SkipTest raise SkipTest
result = self._run('facter',[]) result = self._run('facter', [])
assert "hostname" in result assert "hostname" in result
# temporarily disbabled since it occasionally hangs # temporarily disbabled since it occasionally hangs
# ohai's fault, setup module doesn't actually run this # ohai's fault, setup module doesn't actually run this
# to get ohai's "facts" anyway # to get ohai's "facts" anyway
# #
#def test_ohai(self): #def test_ohai(self):
# if not get_binary("facter"): # if not get_binary("facter"):
# raise SkipTest # raise SkipTest
# result = self._run('ohai',[]) # result = self._run('ohai',[])
# assert "hostname" in result # assert "hostname" in result
def test_copy(self): def test_copy(self):
# test copy module, change trigger, etc # test copy module, change trigger, etc
pass input_ = self._get_test_file('sample.j2')
output = self._get_stage_file('sample.out')
assert not os.path.exists(output)
result = self._run('copy', [
"src=%s" % input_,
"dest=%s" % output,
])
assert os.path.exists(output)
data_in = file(input_).read()
data_out = file(output).read()
assert data_in == data_out
assert 'failed' not in result
assert result['changed'] == True
assert 'md5sum' in result
result = self._run('copy', [
"src=%s" % input_,
"dest=%s" % output,
])
assert result['changed'] == False
def test_copy(self): def test_template(self):
input = self._get_test_file('sample.j2') input_ = self._get_test_file('sample.j2')
output = self._get_stage_file('sample.out') metadata = self._get_test_file('metadata.json')
assert not os.path.exists(output) output = self._get_stage_file('sample.out')
result = self._run('copy', [ result = self._run('template', [
"src=%s" % input, "src=%s" % input_,
"dest=%s" % output, "dest=%s" % output,
]) "metadata=%s" % metadata
assert os.path.exists(output) ])
data_in = file(input).read() assert os.path.exists(output)
data_out = file(output).read() out = file(output).read()
assert data_in == data_out assert out.find("duck") != -1
assert 'failed' not in result assert result['changed'] == True
assert result['changed'] == True assert 'md5sum' in result
assert 'md5sum' in result assert 'failed' not in result
result = self._run('copy', [ result = self._run('template', [
"src=%s" % input, "src=%s" % input_,
"dest=%s" % output, "dest=%s" % output,
]) "metadata=%s" % metadata
assert result['changed'] == False ])
assert result['changed'] == False
def test_template(self): def test_command(self):
input = self._get_test_file('sample.j2') # test command module, change trigger, etc
metadata = self._get_test_file('metadata.json') result = self._run('command', [ "/bin/echo", "hi" ])
output = self._get_stage_file('sample.out') assert "failed" not in result
result = self._run('template', [ assert "msg" not in result
"src=%s" % input, assert result['rc'] == 0
"dest=%s" % output, assert result['stdout'] == 'hi'
"metadata=%s" % metadata assert result['stderr'] == ''
])
assert os.path.exists(output)
out = file(output).read()
assert out.find("duck") != -1
assert result['changed'] == True
assert 'md5sum' in result
assert 'failed' not in result
result = self._run('template', [
"src=%s" % input,
"dest=%s" % output,
"metadata=%s" % metadata
])
assert result['changed'] == False
def test_command(self): result = self._run('command', [ "/bin/false" ])
assert result['rc'] == 1
assert 'failed' not in result
# test command module, change trigger, etc result = self._run('command', [ "/usr/bin/this_does_not_exist", "splat" ])
result = self._run('command', [ "/bin/echo", "hi" ]) assert 'msg' in result
assert "failed" not in result assert 'failed' in result
assert "msg" not in result assert 'rc' not in result
assert result['rc'] == 0
assert result['stdout'] == 'hi'
assert result['stderr'] == ''
result = self._run('command', [ "/bin/false" ]) result = self._run('shell', [ "/bin/echo", "$HOME" ])
assert result['rc'] == 1 assert 'failed' not in result
assert 'failed' not in result assert result['rc'] == 0
result = self._run('command', [ "/usr/bin/this_does_not_exist", "splat" ]) def test_large_output(self):
assert 'msg' in result # Ensure reading a large amount of output from a command doesn't hang.
assert 'failed' in result result = self._run('command', [ "/bin/cat", "/usr/share/dict/words" ])
assert 'rc' not in result assert "failed" not in result
assert "msg" not in result
assert result['rc'] == 0
assert len(result['stdout']) > 100000
assert result['stderr'] == ''
result = self._run('shell', [ "/bin/echo", "$HOME" ]) def test_setup(self):
assert 'failed' not in result output = self._get_stage_file('output.json')
assert result['rc'] == 0 result = self._run('setup', [ "metadata=%s" % output, "a=2", "b=3", "c=4" ])
assert 'failed' not in result
assert 'md5sum' in result
def test_setup(self): assert result['changed'] == True
output = self._get_stage_file('output.json') outds = json.loads(file(output).read())
result = self._run('setup', [ "metadata=%s" % output, "a=2", "b=3", "c=4" ]) assert outds['c'] == '4'
assert 'failed' not in result # not bothering to test change hooks here since ohai/facter results change
assert 'md5sum' in result # almost every time so changed is always true, this just tests that
assert result['changed'] == True # rewriting the file is ok
outds = json.loads(file(output).read()) result = self._run('setup', [ "metadata=%s" % output, "a=2", "b=3", "c=4" ])
assert outds['c'] == '4' print "RAW RESULT=%s" % result
# not bothering to test change hooks here since ohai/facter results change assert 'md5sum' in result
# almost every time so changed is always true, this just tests that
# rewriting the file is ok def test_async(self):
result = self._run('setup', [ "metadata=%s" % output, "a=2", "b=3", "c=4" ]) # test async launch and job status
print "RAW RESULT=%s" % result # of any particular module
assert 'md5sum' in result result = self._run('command', [ get_binary("sleep"), "3" ], background=20)
assert 'ansible_job_id' in result
def test_async(self): assert 'started' in result
# test async launch and job status jid = result['ansible_job_id']
# of any particular module # no real chance of this op taking a while, but whatever
result = self._run('command', [ get_binary("sleep"), "3" ], background=20) time.sleep(5)
assert 'ansible_job_id' in result # CLI will abstract this (when polling), but this is how it works internally
assert 'started' in result result = self._run('async_status', [ "jid=%s" % jid ])
jid = result['ansible_job_id'] # TODO: would be nice to have tests for supervisory process
# no real chance of this op taking a while, but whatever # killing job after X seconds
time.sleep(5) assert 'finished' in result
# CLI will abstract this (when polling), but this is how it works internally assert 'failed' not in result
result = self._run('async_status', [ "jid=%s" % jid ]) assert 'rc' in result
# TODO: would be nice to have tests for supervisory process assert 'stdout' in result
# killing job after X seconds assert result['ansible_job_id'] == jid
assert 'finished' in result
assert 'failed' not in result def test_fetch(self):
assert 'rc' in result input_ = self._get_test_file('sample.j2')
assert 'stdout' in result output = os.path.join(self.stage_dir, '127.0.0.2', input_)
assert result['ansible_job_id'] == jid result = self._run('fetch', [ "src=%s" % input_, "dest=%s" % self.stage_dir ])
assert os.path.exists(output)
def test_fetch(self): assert open(input_).read() == open(output).read()
input = self._get_test_file('sample.j2')
output = os.path.join(self.stage_dir, '127.0.0.2', input) def test_yum(self):
result = self._run('fetch', [ "src=%s" % input, "dest=%s" % self.stage_dir ]) if not get_binary("yum"):
assert os.path.exists(output) raise SkipTest
assert open(input).read() == open(output).read() result = self._run('yum', [ "list=repos" ])
assert 'failed' not in result
def test_yum(self):
if not get_binary("yum"): def test_git(self):
raise SkipTest # TODO: tests for the git module
result = self._run('yum', [ "list=repos" ]) pass
assert 'failed' not in result
def test_service(self):
def test_git(self): # TODO: tests for the service module
# TODO: tests for the git module pass
pass
def test_service(self):
# TODO: tests for the service module
pass