1
0
mirror of https://github.com/samba-team/samba.git synced 2025-02-24 13:57:43 +03:00

subunit: Also import copies of filters and perl module.

This commit is contained in:
Jelmer Vernooij 2010-03-30 15:03:41 +02:00
parent 9fe4b01ca6
commit 197c98292b
13 changed files with 1002 additions and 1 deletions

View File

@ -0,0 +1,105 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
# (C) 2009 Martin Pool
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Filter a subunit stream to include/exclude tests.
The default is to strip successful tests.
Tests can be filtered by Python regular expressions with --with and --without,
which match both the test name and the error text (if any). The result
contains tests which match any of the --with expressions and none of the
--without expressions. For case-insensitive matching prepend '(?i)'.
Remember to quote shell metacharacters.
"""
from optparse import OptionParser
import sys
import unittest
import re
from subunit import (
DiscardStream,
ProtocolTestCase,
TestProtocolClient,
)
from subunit.test_results import TestResultFilter
parser = OptionParser(description=__doc__)
parser.add_option("--error", action="store_false",
help="include errors", default=False, dest="error")
parser.add_option("-e", "--no-error", action="store_true",
help="exclude errors", dest="error")
parser.add_option("--failure", action="store_false",
help="include failures", default=False, dest="failure")
parser.add_option("-f", "--no-failure", action="store_true",
help="include failures", dest="failure")
parser.add_option("--no-passthrough", action="store_true",
help="Hide all non subunit input.", default=False, dest="no_passthrough")
parser.add_option("-s", "--success", action="store_false",
help="include successes", dest="success")
parser.add_option("--no-skip", action="store_true",
help="exclude skips", dest="skip")
parser.add_option("--no-success", action="store_true",
help="exclude successes", default=True, dest="success")
parser.add_option("-m", "--with", type=str,
help="regexp to include (case-sensitive by default)",
action="append", dest="with_regexps")
parser.add_option("--without", type=str,
help="regexp to exclude (case-sensitive by default)",
action="append", dest="without_regexps")
(options, args) = parser.parse_args()
def _compile_re_from_list(l):
return re.compile("|".join(l), re.MULTILINE)
def _make_regexp_filter(with_regexps, without_regexps):
"""Make a callback that checks tests against regexps.
with_regexps and without_regexps are each either a list of regexp strings,
or None.
"""
with_re = with_regexps and _compile_re_from_list(with_regexps)
without_re = without_regexps and _compile_re_from_list(without_regexps)
def check_regexps(test, outcome, err, details):
"""Check if this test and error match the regexp filters."""
test_str = str(test) + outcome + str(err) + str(details)
if with_re and not with_re.search(test_str):
return False
if without_re and without_re.search(test_str):
return False
return True
return check_regexps
regexp_filter = _make_regexp_filter(options.with_regexps,
options.without_regexps)
result = TestProtocolClient(sys.stdout)
result = TestResultFilter(result, filter_error=options.error,
filter_failure=options.failure, filter_success=options.success,
filter_skip=options.skip,
filter_predicate=regexp_filter)
if options.no_passthrough:
passthrough_stream = DiscardStream()
else:
passthrough_stream = None
test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
test.run(result)
sys.exit(0)

93
lib/subunit/filters/subunit-ls Executable file
View File

@ -0,0 +1,93 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""List tests in a subunit stream."""
from optparse import OptionParser
import sys
import unittest
from subunit import DiscardStream, ProtocolTestCase
class TestIdPrintingResult(unittest.TestResult):
def __init__(self, stream, show_times=False):
"""Create a FilterResult object outputting to stream."""
unittest.TestResult.__init__(self)
self._stream = stream
self.failed_tests = 0
self.__time = 0
self.show_times = show_times
self._test = None
self._test_duration = 0
def addError(self, test, err):
self.failed_tests += 1
self._test = test
def addFailure(self, test, err):
self.failed_tests += 1
self._test = test
def addSuccess(self, test):
self._test = test
def reportTest(self, test, duration):
if self.show_times:
seconds = duration.seconds
seconds += duration.days * 3600 * 24
seconds += duration.microseconds / 1000000.0
self._stream.write(test.id() + ' %0.3f\n' % seconds)
else:
self._stream.write(test.id() + '\n')
def startTest(self, test):
self._start_time = self._time()
def stopTest(self, test):
test_duration = self._time() - self._start_time
self.reportTest(self._test, test_duration)
def time(self, time):
self.__time = time
def _time(self):
return self.__time
def wasSuccessful(self):
"Tells whether or not this result was a success"
return self.failed_tests == 0
parser = OptionParser(description=__doc__)
parser.add_option("--times", action="store_true",
help="list the time each test took (requires a timestamped stream)",
default=False)
parser.add_option("--no-passthrough", action="store_true",
help="Hide all non subunit input.", default=False, dest="no_passthrough")
(options, args) = parser.parse_args()
result = TestIdPrintingResult(sys.stdout, options.times)
if options.no_passthrough:
passthrough_stream = DiscardStream()
else:
passthrough_stream = None
test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
test.run(result)
if result.wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Notify the user of a finished test run."""
from optparse import OptionParser
import sys
import pygtk
pygtk.require('2.0')
import pynotify
from subunit import DiscardStream, ProtocolTestCase, TestResultStats
if not pynotify.init("Subunit-notify"):
sys.exit(1)
parser = OptionParser(description=__doc__)
parser.add_option("--no-passthrough", action="store_true",
help="Hide all non subunit input.", default=False, dest="no_passthrough")
parser.add_option("-f", "--forward", action="store_true", default=False,
help="Forward subunit stream on stdout.")
(options, args) = parser.parse_args()
result = TestResultStats(sys.stdout)
if options.no_passthrough:
passthrough_stream = DiscardStream()
else:
passthrough_stream = None
if options.forward:
forward_stream = sys.stdout
else:
forward_stream = None
test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream,
forward=forward_stream)
test.run(result)
if result.failed_tests > 0:
summary = "Test run failed"
else:
summary = "Test run successful"
body = "Total tests: %d; Passed: %d; Failed: %d" % (
result.total_tests,
result.passed_tests,
result.failed_tests,
)
nw = pynotify.Notification(summary, body)
nw.show()
if result.wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)

View File

@ -0,0 +1,41 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Filter a subunit stream to get aggregate statistics."""
from optparse import OptionParser
import sys
import unittest
from subunit import DiscardStream, ProtocolTestCase, TestResultStats
parser = OptionParser(description=__doc__)
parser.add_option("--no-passthrough", action="store_true",
help="Hide all non subunit input.", default=False, dest="no_passthrough")
(options, args) = parser.parse_args()
result = TestResultStats(sys.stdout)
if options.no_passthrough:
passthrough_stream = DiscardStream()
else:
passthrough_stream = None
test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
test.run(result)
result.formatStats()
if result.wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)

View File

@ -0,0 +1,26 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""A filter to change tags on a subunit stream.
subunit-tags foo -> adds foo
subunit-tags foo -bar -> adds foo and removes bar
"""
import sys
from subunit import tag_stream
sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))

259
lib/subunit/filters/subunit2gtk Executable file
View File

@ -0,0 +1,259 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
### The GTK progress bar __init__ function is derived from the pygtk tutorial:
# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
#
# The GTK Tutorial is Copyright (C) 1997 Ian Main.
#
# Copyright (C) 1998-1999 Tony Gale.
#
# Permission is granted to make and distribute verbatim copies of this manual
# provided the copyright notice and this permission notice are preserved on all
# copies.
#
# Permission is granted to copy and distribute modified versions of this
# document under the conditions for verbatim copying, provided that this
# copyright notice is included exactly as in the original, and that the entire
# resulting derived work is distributed under the terms of a permission notice
# identical to this one.
#
# Permission is granted to copy and distribute translations of this document
# into another language, under the above conditions for modified versions.
#
# If you are intending to incorporate this document into a published work,
# please contact the maintainer, and we will make an effort to ensure that you
# have the most up to date information available.
#
# There is no guarantee that this document lives up to its intended purpose.
# This is simply provided as a free resource. As such, the authors and
# maintainers of the information provided within can not make any guarantee
# that the information is even accurate.
"""Display a subunit stream in a gtk progress window."""
import sys
import unittest
import pygtk
pygtk.require('2.0')
import gtk, gtk.gdk, gobject
from subunit import (
PROGRESS_POP,
PROGRESS_PUSH,
PROGRESS_SET,
TestProtocolServer,
)
from subunit.progress_model import ProgressModel
class GTKTestResult(unittest.TestResult):
def __init__(self):
super(GTKTestResult, self).__init__()
# Instance variables (in addition to TestResult)
self.window = None
self.run_label = None
self.ok_label = None
self.not_ok_label = None
self.total_tests = None
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_resizable(True)
self.window.connect("destroy", gtk.main_quit)
self.window.set_title("Tests...")
self.window.set_border_width(0)
vbox = gtk.VBox(False, 5)
vbox.set_border_width(10)
self.window.add(vbox)
vbox.show()
# Create a centering alignment object
align = gtk.Alignment(0.5, 0.5, 0, 0)
vbox.pack_start(align, False, False, 5)
align.show()
# Create the ProgressBar
self.pbar = gtk.ProgressBar()
align.add(self.pbar)
self.pbar.set_text("Running")
self.pbar.show()
self.progress_model = ProgressModel()
separator = gtk.HSeparator()
vbox.pack_start(separator, False, False, 0)
separator.show()
# rows, columns, homogeneous
table = gtk.Table(2, 3, False)
vbox.pack_start(table, False, True, 0)
table.show()
# Show summary details about the run. Could use an expander.
label = gtk.Label("Run:")
table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
gtk.EXPAND | gtk.FILL, 5, 5)
label.show()
self.run_label = gtk.Label("N/A")
table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
gtk.EXPAND | gtk.FILL, 5, 5)
self.run_label.show()
label = gtk.Label("OK:")
table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
gtk.EXPAND | gtk.FILL, 5, 5)
label.show()
self.ok_label = gtk.Label("N/A")
table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
gtk.EXPAND | gtk.FILL, 5, 5)
self.ok_label.show()
label = gtk.Label("Not OK:")
table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
gtk.EXPAND | gtk.FILL, 5, 5)
label.show()
self.not_ok_label = gtk.Label("N/A")
table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
gtk.EXPAND | gtk.FILL, 5, 5)
self.not_ok_label.show()
self.window.show()
# For the demo.
self.window.set_keep_above(True)
self.window.present()
def stopTest(self, test):
super(GTKTestResult, self).stopTest(test)
self.progress_model.advance()
if self.progress_model.width() == 0:
self.pbar.pulse()
else:
pos = self.progress_model.pos()
width = self.progress_model.width()
percentage = (pos / float(width))
self.pbar.set_fraction(percentage)
def stopTestRun(self):
try:
super(GTKTestResult, self).stopTestRun()
except AttributeError:
pass
self.pbar.set_text('Finished')
def addError(self, test, err):
super(GTKTestResult, self).addError(test, err)
self.update_counts()
def addFailure(self, test, err):
super(GTKTestResult, self).addFailure(test, err)
self.update_counts()
def addSuccess(self, test):
super(GTKTestResult, self).addSuccess(test)
self.update_counts()
def addSkip(self, test, reason):
# addSkip is new in Python 2.7/3.1
addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
if callable(addSkip):
addSkip(test, reason)
self.update_counts()
def addExpectedFailure(self, test, err):
# addExpectedFailure is new in Python 2.7/3.1
addExpectedFailure = getattr(super(GTKTestResult, self),
'addExpectedFailure', None)
if callable(addExpectedFailure):
addExpectedFailure(test, err)
self.update_counts()
def addUnexpectedSuccess(self, test):
# addUnexpectedSuccess is new in Python 2.7/3.1
addUnexpectedSuccess = getattr(super(GTKTestResult, self),
'addUnexpectedSuccess', None)
if callable(addUnexpectedSuccess):
addUnexpectedSuccess(test)
self.update_counts()
def progress(self, offset, whence):
if whence == PROGRESS_PUSH:
self.progress_model.push()
elif whence == PROGRESS_POP:
self.progress_model.pop()
elif whence == PROGRESS_SET:
self.total_tests = offset
self.progress_model.set_width(offset)
else:
self.total_tests += offset
self.progress_model.adjust_width(offset)
def time(self, a_datetime):
# We don't try to estimate completion yet.
pass
def update_counts(self):
self.run_label.set_text(str(self.testsRun))
bad = len(self.failures + self.errors)
self.ok_label.set_text(str(self.testsRun - bad))
self.not_ok_label.set_text(str(bad))
class GIOProtocolTestCase(object):
def __init__(self, stream, result, on_finish):
self.stream = stream
self.schedule_read()
self.hup_id = gobject.io_add_watch(stream, gobject.IO_HUP, self.hup)
self.protocol = TestProtocolServer(result)
self.on_finish = on_finish
def read(self, source, condition, all=False):
#NB: \o/ actually blocks
line = source.readline()
if not line:
self.protocol.lostConnection()
self.on_finish()
return False
self.protocol.lineReceived(line)
# schedule more IO shortly - if we say we're willing to do it
# immediately we starve things.
if not all:
source_id = gobject.timeout_add(1, self.schedule_read)
return False
else:
return True
def schedule_read(self):
self.read_id = gobject.io_add_watch(self.stream, gobject.IO_IN, self.read)
def hup(self, source, condition):
while self.read(source, condition, all=True): pass
self.protocol.lostConnection()
gobject.source_remove(self.read_id)
self.on_finish()
return False
result = GTKTestResult()
test = GIOProtocolTestCase(sys.stdin, result, result.stopTestRun)
gtk.main()
if result.wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Filter a subunit stream to get aggregate statistics."""
from optparse import OptionParser
import sys
import unittest
from subunit import DiscardStream, ProtocolTestCase
try:
from junitxml import JUnitXmlResult
except ImportError:
sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
"http://pypi.python.org/pypi/junitxml) is required for this filter.")
raise
parser = OptionParser(description=__doc__)
parser.add_option("--no-passthrough", action="store_true",
help="Hide all non subunit input.", default=False, dest="no_passthrough")
parser.add_option("-o", "--output-to",
help="Output the XML to this path rather than stdout.")
parser.add_option("-f", "--forward", action="store_true", default=False,
help="Forward subunit stream on stdout.")
(options, args) = parser.parse_args()
if options.output_to is None:
output_to = sys.stdout
else:
output_to = file(options.output_to, 'wb')
try:
result = JUnitXmlResult(output_to)
if options.no_passthrough:
passthrough_stream = DiscardStream()
else:
passthrough_stream = None
if options.forward:
forward_stream = sys.stdout
else:
forward_stream = None
test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream,
forward=forward_stream)
result.startTestRun()
test.run(result)
result.stopTestRun()
finally:
if options.output_to is not None:
output_to.close()
if result.wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)

View File

@ -0,0 +1,48 @@
#!/usr/bin/env python
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Display a subunit stream through python's unittest test runner."""
from optparse import OptionParser
import sys
import unittest
from subunit import DiscardStream, ProtocolTestCase, TestProtocolServer
parser = OptionParser(description=__doc__)
parser.add_option("--no-passthrough", action="store_true",
help="Hide all non subunit input.", default=False, dest="no_passthrough")
parser.add_option("--progress", action="store_true",
help="Use bzrlib's test reporter (requires bzrlib)",
default=False)
(options, args) = parser.parse_args()
if options.no_passthrough:
passthrough_stream = DiscardStream()
else:
passthrough_stream = None
test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
if options.progress:
from bzrlib.tests import TextTestRunner
from bzrlib import ui
ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
runner = TextTestRunner()
else:
runner = unittest.TextTestRunner(verbosity=2)
if runner.run(test).wasSuccessful():
exit_code = 0
else:
exit_code = 1
sys.exit(exit_code)

20
lib/subunit/perl/Makefile.PL.in Executable file
View File

@ -0,0 +1,20 @@
use ExtUtils::MakeMaker;
WriteMakefile(
'INSTALL_BASE' => '@prefix@',
'NAME' => 'Subunit',
'VERSION' => '@SUBUNIT_VERSION@',
'test' => { 'TESTS' => 'tests/*.pl' },
'PMLIBDIRS' => [ 'lib' ],
'EXE_FILES' => [ '@abs_srcdir@/subunit-diff' ],
);
sub MY::postamble {
<<'EOT';
check: # test
uninstall_distcheck:
rm -fr $(DESTINSTALLARCHLIB)
VPATH = @srcdir@
.PHONY: uninstall_distcheck
EOT
}

View File

@ -0,0 +1,162 @@
# Perl module for parsing and generating the Subunit protocol
# Copyright (C) 2008-2009 Jelmer Vernooij <jelmer@samba.org>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
package Subunit;
use POSIX;
require Exporter;
@ISA = qw(Exporter);
@EXPORT_OK = qw(parse_results $VERSION);
use vars qw ( $VERSION );
$VERSION = '0.0.2';
use strict;
sub parse_results($$$)
{
my ($msg_ops, $statistics, $fh) = @_;
my $expected_fail = 0;
my $unexpected_fail = 0;
my $unexpected_err = 0;
my $open_tests = [];
while(<$fh>) {
if (/^test: (.+)\n/) {
$msg_ops->control_msg($_);
$msg_ops->start_test($1);
push (@$open_tests, $1);
} elsif (/^time: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)Z\n/) {
$msg_ops->report_time(mktime($6, $5, $4, $3, $2, $1-1900));
} elsif (/^(success|successful|failure|fail|skip|knownfail|error|xfail): (.*?)( \[)?([ \t]*)\n/) {
$msg_ops->control_msg($_);
my $result = $1;
my $testname = $2;
my $reason = undef;
if ($3) {
$reason = "";
# reason may be specified in next lines
my $terminated = 0;
while(<$fh>) {
$msg_ops->control_msg($_);
if ($_ eq "]\n") { $terminated = 1; last; } else { $reason .= $_; }
}
unless ($terminated) {
$statistics->{TESTS_ERROR}++;
$msg_ops->end_test($testname, "error", 1, "reason ($result) interrupted");
return 1;
}
}
if ($result eq "success" or $result eq "successful") {
pop(@$open_tests); #FIXME: Check that popped value == $testname
$statistics->{TESTS_EXPECTED_OK}++;
$msg_ops->end_test($testname, $result, 0, $reason);
} elsif ($result eq "xfail" or $result eq "knownfail") {
pop(@$open_tests); #FIXME: Check that popped value == $testname
$statistics->{TESTS_EXPECTED_FAIL}++;
$msg_ops->end_test($testname, $result, 0, $reason);
$expected_fail++;
} elsif ($result eq "failure" or $result eq "fail") {
pop(@$open_tests); #FIXME: Check that popped value == $testname
$statistics->{TESTS_UNEXPECTED_FAIL}++;
$msg_ops->end_test($testname, $result, 1, $reason);
$unexpected_fail++;
} elsif ($result eq "skip") {
$statistics->{TESTS_SKIP}++;
my $last = pop(@$open_tests);
if (defined($last) and $last ne $testname) {
push (@$open_tests, $testname);
}
$msg_ops->end_test($testname, $result, 0, $reason);
} elsif ($result eq "error") {
$statistics->{TESTS_ERROR}++;
pop(@$open_tests); #FIXME: Check that popped value == $testname
$msg_ops->end_test($testname, $result, 1, $reason);
$unexpected_err++;
}
} else {
$msg_ops->output_msg($_);
}
}
while ($#$open_tests+1 > 0) {
$msg_ops->end_test(pop(@$open_tests), "error", 1,
"was started but never finished!");
$statistics->{TESTS_ERROR}++;
$unexpected_err++;
}
return 1 if $unexpected_err > 0;
return 1 if $unexpected_fail > 0;
return 0;
}
sub start_test($)
{
my ($testname) = @_;
print "test: $testname\n";
}
sub end_test($$;$)
{
my $name = shift;
my $result = shift;
my $reason = shift;
if ($reason) {
print "$result: $name [\n";
print "$reason";
print "]\n";
} else {
print "$result: $name\n";
}
}
sub skip_test($;$)
{
my $name = shift;
my $reason = shift;
end_test($name, "skip", $reason);
}
sub fail_test($;$)
{
my $name = shift;
my $reason = shift;
end_test($name, "fail", $reason);
}
sub success_test($;$)
{
my $name = shift;
my $reason = shift;
end_test($name, "success", $reason);
}
sub xfail_test($;$)
{
my $name = shift;
my $reason = shift;
end_test($name, "xfail", $reason);
}
sub report_time($)
{
my ($time) = @_;
my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime($time);
printf "time: %04d-%02d-%02d %02d:%02d:%02dZ\n", $year+1900, $mon, $mday, $hour, $min, $sec;
}
1;

View File

@ -0,0 +1,85 @@
#!/usr/bin/perl
# Diff two subunit streams
# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
package Subunit::Diff;
use strict;
use Subunit qw(parse_results);
sub control_msg() { }
sub report_time($$) { }
sub output_msg($$)
{
my ($self, $msg) = @_;
# No output for now, perhaps later diff this as well ?
}
sub start_test($$)
{
my ($self, $testname) = @_;
}
sub end_test($$$$$)
{
my ($self, $testname, $result, $unexpected, $reason) = @_;
$self->{$testname} = $result;
}
sub new {
my ($class) = @_;
my $self = {
};
bless($self, $class);
}
sub from_file($)
{
my ($path) = @_;
my $statistics = {
TESTS_UNEXPECTED_OK => 0,
TESTS_EXPECTED_OK => 0,
TESTS_UNEXPECTED_FAIL => 0,
TESTS_EXPECTED_FAIL => 0,
TESTS_ERROR => 0,
TESTS_SKIP => 0,
};
my $ret = new Subunit::Diff();
open(IN, $path) or return;
parse_results($ret, $statistics, *IN);
close(IN);
return $ret;
}
sub diff($$)
{
my ($old, $new) = @_;
my $ret = {};
foreach my $testname (keys %$old) {
if ($new->{$testname} ne $old->{$testname}) {
$ret->{$testname} = [$old->{$testname}, $new->{$testname}];
}
}
return $ret;
}
1;

31
lib/subunit/perl/subunit-diff Executable file
View File

@ -0,0 +1,31 @@
#!/usr/bin/perl
# Diff two subunit streams
# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
use Getopt::Long;
use strict;
use FindBin qw($RealBin $Script);
use lib "$RealBin/lib";
use Subunit::Diff;
my $old = Subunit::Diff::from_file($ARGV[0]);
my $new = Subunit::Diff::from_file($ARGV[1]);
my $ret = Subunit::Diff::diff($old, $new);
foreach my $e (sort(keys %$ret)) {
printf "%s: %s -> %s\n", $e, $ret->{$e}[0], $ret->{$e}[1];
}
0;

View File

@ -3,10 +3,11 @@
TARGETDIR="`dirname $0`"
WORKDIR="`mktemp -d`"
bzr export "$WORKDIR/subunit" lp:subunit
bzr export "$WORKDIR/testtools" lp:testtools
for p in python/ filters/tap2subunit;
for p in python/ filters/ perl/
do
rsync -avz --delete "$WORKDIR/subunit/$p" "$TARGETDIR/$p"
done