mirror of
https://github.com/samba-team/samba.git
synced 2025-03-29 02:50:28 +03:00
selftest: Convert filter-subunit to Python so the subunit Python module
can be used later.
This commit is contained in:
parent
ae5381d34c
commit
08161e093d
44
selftest/filter-subunit
Executable file
44
selftest/filter-subunit
Executable file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
# Filter a subunit stream
|
||||
# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
|
||||
# Published under the GNU GPL, v3 or later
|
||||
|
||||
import optparse
|
||||
import subunithelper
|
||||
import sys
|
||||
import signal
|
||||
|
||||
parser = optparse.OptionParser("filter-subunit [options] < instream > outstream")
|
||||
parser.add_option("--expected-failures", type="string",
|
||||
help="File containing list of regexes matching tests to consider known "
|
||||
"failures")
|
||||
parser.add_option("--strip-passed-output", action="store_true",
|
||||
help="Whether to strip output from tests that passed")
|
||||
|
||||
parser.add_option("--prefix", type="string",
|
||||
help="Add prefix to all test names")
|
||||
|
||||
opts, args = parser.parse_args()
|
||||
|
||||
if opts.expected_failures:
|
||||
expected_failures = list(subunithelper.read_test_regexes(opts.expected_failures))
|
||||
else:
|
||||
expected_failures = []
|
||||
|
||||
statistics = {
|
||||
'TESTS_UNEXPECTED_OK': 0,
|
||||
'TESTS_EXPECTED_OK': 0,
|
||||
'TESTS_UNEXPECTED_FAIL': 0,
|
||||
'TESTS_EXPECTED_FAIL': 0,
|
||||
'TESTS_ERROR': 0,
|
||||
'TESTS_SKIP': 0,
|
||||
}
|
||||
|
||||
def handle_sigint(sig, stack):
|
||||
sys.exit(0)
|
||||
signal.signal(signal.SIGINT, handle_sigint)
|
||||
|
||||
msg_ops = subunithelper.FilterOps(opts.prefix, expected_failures,
|
||||
opts.strip_passed_output)
|
||||
|
||||
sys.exit(subunithelper.parse_results(msg_ops, statistics, sys.stdin))
|
@ -1,100 +0,0 @@
|
||||
#!/usr/bin/perl
|
||||
# Filter a subunit stream
|
||||
# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
|
||||
# Published under the GNU GPL, v3 or later
|
||||
|
||||
=pod
|
||||
|
||||
=head1 NAME
|
||||
|
||||
filter-subunit - Filter a subunit stream
|
||||
|
||||
=head1 SYNOPSIS
|
||||
|
||||
filter-subunit --help
|
||||
|
||||
filter-subunit --prefix=PREFIX --known-failures=FILE < in-stream > out-stream
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
Simple Subunit stream filter that will change failures to known failures
|
||||
based on a list of regular expressions.
|
||||
|
||||
=head1 OPTIONS
|
||||
|
||||
=over 4
|
||||
|
||||
=item I<--prefix>
|
||||
|
||||
Add the specified prefix to all test names.
|
||||
|
||||
=item I<--expected-failures>
|
||||
|
||||
Specify a file containing a list of tests that are expected to fail. Failures
|
||||
for these tests will be counted as successes, successes will be counted as
|
||||
failures.
|
||||
|
||||
The format for the file is, one entry per line:
|
||||
|
||||
TESTSUITE-NAME.TEST-NAME
|
||||
|
||||
The reason for a test can also be specified, by adding a hash sign (#) and the reason
|
||||
after the test name.
|
||||
|
||||
=head1 LICENSE
|
||||
|
||||
selftest is licensed under the GNU General Public License L<http://www.gnu.org/licenses/gpl.html>.
|
||||
|
||||
|
||||
=head1 AUTHOR
|
||||
|
||||
Jelmer Vernooij
|
||||
|
||||
=cut
|
||||
|
||||
use Getopt::Long;
|
||||
use strict;
|
||||
use FindBin qw($RealBin $Script);
|
||||
use lib "$RealBin";
|
||||
use Subunit qw(parse_results);
|
||||
use Subunit::Filter;
|
||||
|
||||
my $opt_expected_failures = undef;
|
||||
my $opt_help = 0;
|
||||
my $opt_prefix = undef;
|
||||
my $opt_strip_ok_output = 0;
|
||||
my @expected_failures = ();
|
||||
|
||||
my $result = GetOptions(
|
||||
'expected-failures=s' => \$opt_expected_failures,
|
||||
'strip-passed-output' => \$opt_strip_ok_output,
|
||||
'prefix=s' => \$opt_prefix,
|
||||
'help' => \$opt_help,
|
||||
);
|
||||
exit(1) if (not $result);
|
||||
|
||||
if ($opt_help) {
|
||||
print "Usage: filter-subunit [--prefix=PREFIX] [--expected-failures=FILE]... < instream > outstream\n";
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if (defined($opt_expected_failures)) {
|
||||
@expected_failures = Subunit::Filter::read_test_regexes($opt_expected_failures);
|
||||
}
|
||||
|
||||
# we want unbuffered output
|
||||
$| = 1;
|
||||
|
||||
my $statistics = {
|
||||
TESTS_UNEXPECTED_OK => 0,
|
||||
TESTS_EXPECTED_OK => 0,
|
||||
TESTS_UNEXPECTED_FAIL => 0,
|
||||
TESTS_EXPECTED_FAIL => 0,
|
||||
TESTS_ERROR => 0,
|
||||
TESTS_SKIP => 0,
|
||||
};
|
||||
|
||||
my $msg_ops = new Subunit::Filter($opt_prefix, \@expected_failures,
|
||||
$opt_strip_ok_output);
|
||||
|
||||
exit(parse_results($msg_ops, $statistics, *STDIN));
|
@ -18,6 +18,7 @@
|
||||
__all__ = ['parse_results']
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
VALID_RESULTS = ['success', 'successful', 'failure', 'fail', 'skip', 'knownfail', 'error', 'xfail', 'skip-testsuite', 'testsuite-failure', 'testsuite-xfail', 'testsuite-success', 'testsuite-error']
|
||||
@ -144,8 +145,8 @@ class SubunitOps(object):
|
||||
self.end_test(name, "xfail", reason)
|
||||
|
||||
def report_time(self, t):
|
||||
(sec, min, hour, mday, mon, year, wday, yday, isdst) = time.localtimet(t)
|
||||
print "time: %04d-%02d-%02d %02d:%02d:%02d" % (year+1900, mon+1, mday, hour, min, sec)
|
||||
(year, mon, mday, hour, min, sec, wday, yday, isdst) = time.localtime(t)
|
||||
print "time: %04d-%02d-%02d %02d:%02d:%02d" % (year, mon, mday, hour, min, sec)
|
||||
|
||||
# The following are Samba extensions:
|
||||
def start_testsuite(self, name):
|
||||
@ -159,11 +160,139 @@ class SubunitOps(object):
|
||||
|
||||
def end_testsuite(self, name, result, reason=None):
|
||||
if reason:
|
||||
print "testsuite-$result: %s [" % name
|
||||
print "testsuite-%s: %s [" % (result, name)
|
||||
print "%s" % reason
|
||||
print "]"
|
||||
else:
|
||||
print "testsuite-$result: %s" % name
|
||||
print "testsuite-%s: %s" % (result, name)
|
||||
|
||||
def testsuite_count(self, count):
|
||||
print "testsuite-count: %d" % count
|
||||
|
||||
|
||||
def read_test_regexes(name):
|
||||
f = open(name, 'r')
|
||||
try:
|
||||
for l in f:
|
||||
l = l.strip()
|
||||
if l == "" or l[0] == "#":
|
||||
continue
|
||||
if "#" in l:
|
||||
(regex, reason) = l.split("#", 1)
|
||||
yield (regex.strip(), reason.strip())
|
||||
else:
|
||||
yield l, None
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def find_in_list(regexes, fullname):
|
||||
for regex, reason in regexes:
|
||||
if re.match(regex, fullname):
|
||||
if reason is None:
|
||||
return ""
|
||||
return reason
|
||||
return None
|
||||
|
||||
|
||||
class FilterOps(object):
|
||||
|
||||
def control_msg(self, msg):
|
||||
pass # We regenerate control messages, so ignore this
|
||||
|
||||
def report_time(self, time):
|
||||
self._ops.report_time(time)
|
||||
|
||||
def output_msg(self, msg):
|
||||
if self.output is None:
|
||||
sys.stdout.write(msg)
|
||||
else:
|
||||
self.output+=msg
|
||||
|
||||
def start_test(self, testname):
|
||||
if self.prefix is not None:
|
||||
testname = self.prefix + testname
|
||||
|
||||
if self.strip_ok_output:
|
||||
self.output = ""
|
||||
|
||||
self._ops.start_test(testname)
|
||||
|
||||
def end_test(self, testname, result, unexpected, reason):
|
||||
if self.prefix is not None:
|
||||
testname = self.prefix + testname
|
||||
|
||||
if result in ("fail", "failure") and not unexpected:
|
||||
result = "xfail"
|
||||
self.xfail_added+=1
|
||||
self.total_xfail+=1
|
||||
xfail_reason = find_in_list(self.expected_failures, testname)
|
||||
if xfail_reason is not None and result in ("fail", "failure"):
|
||||
result = "xfail"
|
||||
self.xfail_added+=1
|
||||
self.total_xfail+=1
|
||||
reason += xfail_reason
|
||||
|
||||
if result in ("fail", "failure"):
|
||||
self.fail_added+=1
|
||||
self.total_fail+=1
|
||||
|
||||
if result == "error":
|
||||
self.error_added+=1
|
||||
self.total_error+=1
|
||||
|
||||
if self.strip_ok_output:
|
||||
if result not in ("success", "xfail", "skip"):
|
||||
print self.output
|
||||
self.output = None
|
||||
|
||||
self._ops.end_test(testname, result, reason)
|
||||
|
||||
def skip_testsuite(self, name, reason=None):
|
||||
self._ops.skip_testsuite(name, reason)
|
||||
|
||||
def start_testsuite(self, name):
|
||||
self._ops.start_testsuite(name)
|
||||
|
||||
self.error_added = 0
|
||||
self.fail_added = 0
|
||||
self.xfail_added = 0
|
||||
|
||||
def end_testsuite(self, name, result, reason=None):
|
||||
xfail = False
|
||||
|
||||
if self.xfail_added > 0:
|
||||
xfail = True
|
||||
if self.fail_added > 0 or self.error_added > 0:
|
||||
xfail = False
|
||||
|
||||
if xfail and result in ("fail", "failure"):
|
||||
result = "xfail"
|
||||
|
||||
if self.fail_added > 0 and result != "failure":
|
||||
result = "failure"
|
||||
if reason is None:
|
||||
reason = "Subunit/Filter Reason"
|
||||
reason += "\n failures[%d]" % self.fail_added
|
||||
|
||||
if self.error_added > 0 and result != "error":
|
||||
result = "error"
|
||||
if reason is None:
|
||||
reason = "Subunit/Filter Reason"
|
||||
reason += "\n errors[%d]" % self.error_added
|
||||
|
||||
self._ops.end_testsuite(name, result, reason)
|
||||
|
||||
def testsuite_count(self, count):
|
||||
self._ops.testsuite_count(count)
|
||||
|
||||
def __init__(self, prefix, expected_failures, strip_ok_output):
|
||||
self._ops = SubunitOps()
|
||||
self.output = None
|
||||
self.prefix = prefix
|
||||
self.expected_failures = expected_failures
|
||||
self.strip_ok_output = strip_ok_output
|
||||
self.xfail_added = 0
|
||||
self.total_xfail = 0
|
||||
self.total_error = 0
|
||||
self.total_fail = 0
|
||||
|
@ -3251,7 +3251,7 @@ selftest:: all torture timelimit
|
||||
--testlist="$(srcdir)/selftest/tests.sh|" \
|
||||
--exclude=$(srcdir)/selftest/skip \
|
||||
--socket-wrapper $(TESTS) | \
|
||||
$(PERL) $(selftestdir)/filter-subunit.pl \
|
||||
$(PYTHON) $(selftestdir)/filter-subunit \
|
||||
--expected-failures=$(srcdir)/selftest/knownfail | \
|
||||
$(PYTHON) $(selftestdir)/format-subunit --immediate
|
||||
|
||||
|
@ -13,7 +13,7 @@ ST_DONE_TEST = @test -f $(selftest_prefix)/st_done || { echo "SELFTEST FAILED";
|
||||
|
||||
SELFTEST_NOSLOW_OPTS = --exclude=$(srcdir)/selftest/slow
|
||||
SELFTEST_QUICK_OPTS = $(SELFTEST_NOSLOW_OPTS) --quick --include=$(srcdir)/selftest/quick
|
||||
FILTER_XFAIL = $(PERL) $(selftestdir)/filter-subunit.pl --expected-failures=$(srcdir)/selftest/knownfail
|
||||
FILTER_XFAIL = $(PYTHON) $(selftestdir)/filter-subunit --expected-failures=$(srcdir)/selftest/knownfail
|
||||
SUBUNIT_FORMATTER ?= $(PYTHON) $(selftestdir)/format-subunit --prefix=${selftest_prefix} --immediate
|
||||
FORMAT_TEST_OUTPUT = $(FILTER_XFAIL) | $(SUBUNIT_FORMATTER)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user