1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-03 01:18:10 +03:00

make perftest: for performance testing

This runs a selection of subunit tests and reduces the output to only
the time it takes to run each test.

The tests are listed in selftest/perf_tests.py.

Signed-off-by: Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
Reviewed-by: Garming Sam <garming@catalyst.net.nz>
This commit is contained in:
Douglas Bagnall 2016-08-17 10:56:50 +12:00 committed by Douglas Bagnall
parent 288efc5560
commit e908873757
5 changed files with 132 additions and 8 deletions

View File

@ -16,6 +16,9 @@ uninstall:
test:
$(WAF) test $(TEST_OPTIONS)
perftest:
$(WAF) test --perf-test $(TEST_OPTIONS)
help:
@echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH
$(WAF) --help

View File

@ -44,6 +44,8 @@ parser.add_option("--fail-on-empty", default=False,
action="store_true", help="Fail if there was no subunit output")
parser.add_option("--list", default=False,
action="store_true", help="Operate in list mode")
parser.add_option("--perf-test-output", default=False,
action="store_true", help="orientate output for performance measurement")
opts, args = parser.parse_args()
if opts.list:
@ -51,6 +53,18 @@ if opts.list:
sys.stdout.write("%s%s%s\n" % (opts.prefix, l.rstrip(), opts.suffix))
sys.exit(0)
if opts.perf_test_output:
bad_options = []
for bad_opt in ('fail_immediately', 'strip_passed_output',
'flapping', 'expected_failures'):
if getattr(opts, bad_opt):
bad_options.append(bad_opt)
if bad_options:
print >>sys.stderr, ("--perf-test-output is incompatible with --%s" %
(', --'.join(x.replace('_', '-')
for x in bad_options)))
sys.exit(1)
if opts.expected_failures:
expected_failures = subunithelper.read_test_regexes(opts.expected_failures)
else:
@ -76,7 +90,12 @@ def handle_sigint(sig, stack):
signal.signal(signal.SIGINT, handle_sigint)
out = subunithelper.SubunitOps(sys.stdout)
msg_ops = subunithelper.FilterOps(out, opts.prefix, opts.suffix, expected_failures,
if opts.perf_test_output:
msg_ops = subunithelper.PerfFilterOps(out, opts.prefix, opts.suffix)
else:
msg_ops = subunithelper.FilterOps(out, opts.prefix, opts.suffix,
expected_failures,
opts.strip_passed_output,
fail_immediately=opts.fail_immediately,
flapping=flapping)

26
selftest/perf_tests.py Normal file
View File

@ -0,0 +1,26 @@
#!/usr/bin/python
# This script generates a list of testsuites that should be run to
# test Samba performance.
#
# These tests are not intended to exercise aspect of Samba, but
# perform common simple functions or to ascertain performance.
#
# The syntax for a testsuite is "-- TEST --" on a single line, followed
# by the name of the test, the environment it needs and the command to run, all
# three separated by newlines. All other lines in the output are considered
# comments.
from selftesthelpers import *
samba4srcdir = source4dir()
samba4bindir = bindir()
plantestsuite_loadlist("samba4.ldap.ad_dc_performance.python(ad_dc_ntvfs)",
"ad_dc_ntvfs",
[python, os.path.join(samba4srcdir,
"dsdb/tests/python/ad_dc_performance.py"),
'$SERVER', '-U"$USERNAME%$PASSWORD"',
'--workgroup=$DOMAIN',
'$LOADLIST', '$LISTOPT'])

View File

@ -17,6 +17,7 @@
__all__ = ['parse_results']
import datetime
import re
import sys
from samba import subunit
@ -429,6 +430,73 @@ class FilterOps(unittest.TestResult):
self.fail_immediately = fail_immediately
class PerfFilterOps(unittest.TestResult):
def progress(self, delta, whence):
pass
def output_msg(self, msg):
pass
def control_msg(self, msg):
pass
def skip_testsuite(self, name, reason=None):
self._ops.skip_testsuite(name, reason)
def start_testsuite(self, name):
self.suite_has_time = False
def end_testsuite(self, name, result, reason=None):
pass
def _add_prefix(self, test):
return subunit.RemotedTestCase(self.prefix + test.id() + self.suffix)
def time(self, time):
self.latest_time = time
#self._ops.output_msg("found time %s\n" % time)
self.suite_has_time = True
def get_time(self):
if self.suite_has_time:
return self.latest_time
return datetime.datetime.utcnow()
def startTest(self, test):
self.seen_output = True
test = self._add_prefix(test)
self.starts[test.id()] = self.get_time()
def addSuccess(self, test):
test = self._add_prefix(test)
tid = test.id()
if tid not in self.starts:
self._ops.addError(test, "%s succeeded without ever starting!" % tid)
delta = self.get_time() - self.starts[tid]
self._ops.output_msg("elapsed-time: %s: %f\n" % (tid, delta.total_seconds()))
def addFailure(self, test, err=''):
tid = test.id()
delta = self.get_time() - self.starts[tid]
self._ops.output_msg("failure: %s failed after %f seconds (%s)\n" %
(tid, delta.total_seconds(), err))
def addError(self, test, err=''):
tid = test.id()
delta = self.get_time() - self.starts[tid]
self._ops.output_msg("error: %s failed after %f seconds (%s)\n" %
(tid, delta.total_seconds(), err))
def __init__(self, out, prefix='', suffix=''):
self._ops = out
self.prefix = prefix or ''
self.suffix = suffix or ''
self.starts = {}
self.seen_output = False
self.suite_has_time = False
class PlainFormatter(TestsuiteEnabledTestResult):
def __init__(self, verbose, immediate, statistics,

View File

@ -79,6 +79,8 @@ def set_options(opt):
action="store_true", dest='SOCKET_WRAPPER_KEEP_PCAP', default=False)
gr.add_option('--random-order', dest='RANDOM_ORDER', default=False,
action="store_true", help="Run testsuites in random order")
gr.add_option('--perf-test', dest='PERF_TEST', default=False,
action="store_true", help="run performance tests only")
def configure(conf):
conf.env.SELFTEST_PREFIX = Options.options.SELFTEST_PREFIX
@ -145,7 +147,10 @@ def cmd_testonly(opt):
env.OPTIONS += ' --socket-wrapper-keep-pcap'
if Options.options.RANDOM_ORDER:
env.OPTIONS += ' --random-order'
if os.environ.get('RUN_FROM_BUILD_FARM') is not None:
if Options.options.PERF_TEST:
env.FILTER_OPTIONS = ('${PYTHON} -u ${srcdir}/selftest/filter-subunit '
'--perf-test-output')
elif os.environ.get('RUN_FROM_BUILD_FARM') is not None:
env.FILTER_OPTIONS = '${FILTER_XFAIL} --strip-passed-output'
else:
env.FILTER_OPTIONS = '${FILTER_XFAIL}'
@ -193,6 +198,9 @@ def cmd_testonly(opt):
if not os.path.isdir(env.SELFTEST_PREFIX):
os.makedirs(env.SELFTEST_PREFIX, int('755', 8))
if Options.options.PERF_TEST:
env.TESTLISTS = '--testlist="${PYTHON} ${srcdir}/selftest/perf_tests.py|" '
else:
env.TESTLISTS = ('--testlist="${PYTHON} ${srcdir}/selftest/tests.py|" ' +
'--testlist="${PYTHON} ${srcdir}/source3/selftest/tests.py|" ' +
'--testlist="${PYTHON} ${srcdir}/source4/selftest/tests.py|"')