1
0
mirror of https://github.com/samba-team/samba.git synced 2025-01-11 05:18:09 +03:00

Remove bundled testtools.

Change-Id: Ic6ddb352e403c9591cbe4ad3fd36758ffcc2ddb9
Signed-off-by: Jelmer Vernooij <jelmer@samba.org>
Reviewed-by: Andrew Bartlett <abartlet@samba.org>
This commit is contained in:
Jelmer Vernooij 2014-12-14 20:29:12 +00:00 committed by Andrew Bartlett
parent da04eb9c3a
commit 8918481a84
81 changed files with 0 additions and 19182 deletions

View File

@ -1,4 +0,0 @@
[DEFAULT]
test_command=${PYTHON:-python} -m subunit.run discover . $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list

View File

@ -1,58 +0,0 @@
Copyright (c) 2008-2011 Jonathan M. Lange <jml@mumak.net> and the testtools
authors.
The testtools authors are:
* Canonical Ltd
* Twisted Matrix Labs
* Jonathan Lange
* Robert Collins
* Andrew Bennetts
* Benjamin Peterson
* Jamu Kakar
* James Westby
* Martin [gz]
* Michael Hudson-Doyle
* Aaron Bentley
* Christian Kampka
* Gavin Panella
* Martin Pool
* Vincent Ladeuil
and are collectively referred to as "testtools developers".
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Some code in testtools/run.py taken from Python's unittest module:
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.

View File

@ -1,10 +0,0 @@
include LICENSE
include Makefile
include MANIFEST.in
include NEWS
include README
include .bzrignore
graft doc
graft doc/_static
graft doc/_templates
prune doc/_build

View File

@ -1,56 +0,0 @@
# See README for copyright and licensing details.
PYTHON=python
SOURCES=$(shell find testtools -name "*.py")
check:
PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run testtools.tests.test_suite
TAGS: ${SOURCES}
ctags -e -R testtools/
tags: ${SOURCES}
ctags -R testtools/
clean: clean-sphinx
rm -f TAGS tags
find testtools -name "*.pyc" -exec rm '{}' \;
prerelease:
# An existing MANIFEST breaks distutils sometimes. Avoid that.
-rm MANIFEST
release:
./setup.py sdist upload --sign
$(PYTHON) scripts/_lp_release.py
snapshot: prerelease
./setup.py sdist
### Documentation ###
apidocs:
# pydoctor emits deprecation warnings under Ubuntu 10.10 LTS
PYTHONWARNINGS='ignore::DeprecationWarning' \
pydoctor --make-html --add-package testtools \
--docformat=restructuredtext --project-name=testtools \
--project-url=https://launchpad.net/testtools
doc/news.rst:
ln -s ../NEWS doc/news.rst
docs: doc/news.rst docs-sphinx
rm doc/news.rst
docs-sphinx: html-sphinx
# Clean out generated documentation
clean-sphinx:
cd doc && make clean
# Build the html docs using Sphinx.
html-sphinx:
cd doc && make html
.PHONY: apidocs docs-sphinx clean-sphinx html-sphinx docs
.PHONY: check clean prerelease release

File diff suppressed because it is too large Load Diff

View File

@ -1,89 +0,0 @@
=========
testtools
=========
testtools is a set of extensions to the Python standard library's unit testing
framework.
These extensions have been derived from years of experience with unit testing
in Python and come from many different sources.
Documentation
-------------
If you would like to learn more about testtools, consult our documentation in
the 'doc/' directory. You might like to start at 'doc/overview.rst' or
'doc/for-test-authors.rst'.
Licensing
---------
This project is distributed under the MIT license and copyright is owned by
Jonathan M. Lange and the testtools authors. See LICENSE for details.
Some code in 'testtools/run.py' is taken from Python's unittest module, and is
copyright Steve Purcell and the Python Software Foundation, it is distributed
under the same license as Python, see LICENSE for details.
Required Dependencies
---------------------
* Python 2.6+ or 3.0+
If you would like to use testtools for earlier Python's, please use testtools
0.9.15.
Optional Dependencies
---------------------
If you would like to use our undocumented, unsupported Twisted support, then
you will need Twisted.
If you want to use ``fixtures`` then you can either install fixtures (e.g. from
https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures)
or alternatively just make sure your fixture objects obey the same protocol.
Bug reports and patches
-----------------------
Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
Patches can also be submitted via Launchpad, or mailed to the author. You can
mail the author directly at jml@mumak.net.
There's no mailing list for this project yet, however the testing-in-python
mailing list may be a useful resource:
* Address: testing-in-python@lists.idyll.org
* Subscription link: http://lists.idyll.org/listinfo/testing-in-python
History
-------
testtools used to be called 'pyunit3k'. The name was changed to avoid
conflating the library with the Python 3.0 release (commonly referred to as
'py3k').
Thanks
------
* Canonical Ltd
* Bazaar
* Twisted Matrix Labs
* Robert Collins
* Andrew Bennetts
* Benjamin Peterson
* Jamu Kakar
* James Westby
* Martin [gz]
* Michael Hudson-Doyle
* Aaron Bentley
* Christian Kampka
* Gavin Panella
* Martin Pool

View File

@ -1,89 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/testtools.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/testtools.qhc"
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
"run these through (pdf)latex."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View File

@ -1,194 +0,0 @@
# -*- coding: utf-8 -*-
#
# testtools documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 28 13:45:40 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'testtools'
copyright = u'2010, The testtools authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'VERSION'
# The full version, including alpha/beta/rc tags.
release = 'VERSION'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'testtoolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'testtools.tex', u'testtools Documentation',
u'The testtools authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True

View File

@ -1,238 +0,0 @@
============================
testtools for framework folk
============================
Introduction
============
In addition to having many features :doc:`for test authors
<for-test-authors>`, testtools also has many bits and pieces that are useful
for folk who write testing frameworks.
If you are the author of a test runner, are working on a very large
unit-tested project, are trying to get one testing framework to play nicely
with another or are hacking away at getting your test suite to run in parallel
over a heterogenous cluster of machines, this guide is for you.
This manual is a summary. You can get details by consulting the `testtools
API docs`_.
Extensions to TestCase
======================
Custom exception handling
-------------------------
testtools provides a way to control how test exceptions are handled. To do
this, add a new exception to ``self.exception_handlers`` on a
``testtools.TestCase``. For example::
>>> self.exception_handlers.insert(-1, (ExceptionClass, handler)).
Having done this, if any of ``setUp``, ``tearDown``, or the test method raise
``ExceptionClass``, ``handler`` will be called with the test case, test result
and the raised exception.
Use this if you want to add a new kind of test result, that is, if you think
that ``addError``, ``addFailure`` and so forth are not enough for your needs.
Controlling test execution
--------------------------
If you want to control more than just how exceptions are raised, you can
provide a custom ``RunTest`` to a ``TestCase``. The ``RunTest`` object can
change everything about how the test executes.
To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that
takes a test and an optional list of exception handlers. Instances returned
by the factory must have a ``run()`` method that takes an optional ``TestResult``
object.
The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test
method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla
way that Python's standard unittest_ does.
To specify a ``RunTest`` for all the tests in a ``TestCase`` class, do something
like this::
class SomeTests(TestCase):
run_tests_with = CustomRunTestFactory
To specify a ``RunTest`` for a specific test in a ``TestCase`` class, do::
class SomeTests(TestCase):
@run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever')
def test_something(self):
pass
In addition, either of these can be overridden by passing a factory in to the
``TestCase`` constructor with the optional ``runTest`` argument.
Test renaming
-------------
``testtools.clone_test_with_new_id`` is a function to copy a test case
instance to one with a new name. This is helpful for implementing test
parameterization.
Test placeholders
=================
Sometimes, it's useful to be able to add things to a test suite that are not
actually tests. For example, you might wish to represents import failures
that occur during test discovery as tests, so that your test result object
doesn't have to do special work to handle them nicely.
testtools provides two such objects, called "placeholders": ``PlaceHolder``
and ``ErrorHolder``. ``PlaceHolder`` takes a test id and an optional
description. When it's run, it succeeds. ``ErrorHolder`` takes a test id,
and error and an optional short description. When it's run, it reports that
error.
These placeholders are best used to log events that occur outside the test
suite proper, but are still very relevant to its results.
e.g.::
>>> suite = TestSuite()
>>> suite.add(PlaceHolder('I record an event'))
>>> suite.run(TextTestResult(verbose=True))
I record an event [OK]
Extensions to TestResult
========================
TestResult.addSkip
------------------
This method is called on result objects when a test skips. The
``testtools.TestResult`` class records skips in its ``skip_reasons`` instance
dict. The can be reported on in much the same way as succesful tests.
TestResult.time
---------------
This method controls the time used by a ``TestResult``, permitting accurate
timing of test results gathered on different machines or in different threads.
See pydoc testtools.TestResult.time for more details.
ThreadsafeForwardingResult
--------------------------
A ``TestResult`` which forwards activity to another test result, but synchronises
on a semaphore to ensure that all the activity for a single test arrives in a
batch. This allows simple TestResults which do not expect concurrent test
reporting to be fed the activity from multiple test threads, or processes.
Note that when you provide multiple errors for a single test, the target sees
each error as a distinct complete test.
MultiTestResult
---------------
A test result that dispatches its events to many test results. Use this
to combine multiple different test result objects into one test result object
that can be passed to ``TestCase.run()`` or similar. For example::
a = TestResult()
b = TestResult()
combined = MultiTestResult(a, b)
combined.startTestRun() # Calls a.startTestRun() and b.startTestRun()
Each of the methods on ``MultiTestResult`` will return a tuple of whatever the
component test results return.
TestResultDecorator
-------------------
Not strictly a ``TestResult``, but something that implements the extended
``TestResult`` interface of testtools. It can be subclassed to create objects
that wrap ``TestResults``.
TextTestResult
--------------
A ``TestResult`` that provides a text UI very similar to the Python standard
library UI. Key differences are that its supports the extended outcomes and
details API, and is completely encapsulated into the result object, permitting
it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes
are displayed (yet). It is also a 'quiet' result with no dots or verbose mode.
These limitations will be corrected soon.
ExtendedToOriginalDecorator
---------------------------
Adapts legacy ``TestResult`` objects, such as those found in older Pythons, to
meet the testtools ``TestResult`` API.
Test Doubles
------------
In testtools.testresult.doubles there are three test doubles that testtools
uses for its own testing: ``Python26TestResult``, ``Python27TestResult``,
``ExtendedTestResult``. These TestResult objects implement a single variation of
the TestResult API each, and log activity to a list ``self._events``. These are
made available for the convenience of people writing their own extensions.
startTestRun and stopTestRun
----------------------------
Python 2.7 added hooks ``startTestRun`` and ``stopTestRun`` which are called
before and after the entire test run. 'stopTestRun' is particularly useful for
test results that wish to produce summary output.
``testtools.TestResult`` provides default ``startTestRun`` and ``stopTestRun``
methods, and he default testtools runner will call these methods
appropriately.
The ``startTestRun`` method will reset any errors, failures and so forth on
the result, making the result object look as if no tests have been run.
Extensions to TestSuite
=======================
ConcurrentTestSuite
-------------------
A TestSuite for parallel testing. This is used in conjuction with a helper that
runs a single suite in some parallel fashion (for instance, forking, handing
off to a subprocess, to a compute cloud, or simple threads).
ConcurrentTestSuite uses the helper to get a number of separate runnable
objects with a run(result), runs them all in threads using the
ThreadsafeForwardingResult to coalesce their activity.
FixtureSuite
------------
A test suite that sets up a fixture_ before running any tests, and then tears
it down after all of the tests are run. The fixture is *not* made available to
any of the tests.
sorted_tests
------------
Given the composite structure of TestSuite / TestCase, sorting tests is
problematic - you can't tell what functionality is embedded into custom Suite
implementations. In order to deliver consistent test orders when using test
discovery (see http://bugs.python.org/issue16709), testtools flattens and
sorts tests that have the standard TestSuite, defines a new method sort_tests,
which can be used by non-standard TestSuites to know when they should sort
their tests.
.. _`testtools API docs`: http://mumak.net/testtools/apidocs/
.. _unittest: http://docs.python.org/library/unittest.html
.. _fixture: http://pypi.python.org/pypi/fixtures

File diff suppressed because it is too large Load Diff

View File

@ -1,153 +0,0 @@
=========================
Contributing to testtools
=========================
Coding style
------------
In general, follow `PEP 8`_ except where consistency with the standard
library's unittest_ module would suggest otherwise.
testtools currently supports Python 2.6 and later, including Python 3.
Copyright assignment
--------------------
Part of testtools raison d'etre is to provide Python with improvements to the
testing code it ships. For that reason we require all contributions (that are
non-trivial) to meet one of the following rules:
* be inapplicable for inclusion in Python.
* be able to be included in Python without further contact with the contributor.
* be copyright assigned to Jonathan M. Lange.
Please pick one of these and specify it when contributing code to testtools.
Licensing
---------
All code that is not copyright assigned to Jonathan M. Lange (see Copyright
Assignment above) needs to be licensed under the `MIT license`_ that testtools
uses, so that testtools can ship it.
Testing
-------
Please write tests for every feature. This project ought to be a model
example of well-tested Python code!
Take particular care to make sure the *intent* of each test is clear.
You can run tests with ``make check``.
By default, testtools hides many levels of its own stack when running tests.
This is for the convenience of users, who do not care about how, say, assert
methods are implemented. However, when writing tests for testtools itself, it
is often useful to see all levels of the stack. To do this, add
``run_tests_with = FullStackRunTest`` to the top of a test's class definition.
Documentation
-------------
Documents are written using the Sphinx_ variant of reStructuredText_. All
public methods, functions, classes and modules must have API documentation.
When changing code, be sure to check the API documentation to see if it could
be improved. Before submitting changes to trunk, look over them and see if
the manuals ought to be updated.
Source layout
-------------
The top-level directory contains the ``testtools/`` package directory, and
miscellaneous files like ``README`` and ``setup.py``.
The ``testtools/`` directory is the Python package itself. It is separated
into submodules for internal clarity, but all public APIs should be “promoted”
into the top-level package by importing them in ``testtools/__init__.py``.
Users of testtools should never import a submodule in order to use a stable
API. Unstable APIs like ``testtools.matchers`` and
``testtools.deferredruntest`` should be exported as submodules.
Tests belong in ``testtools/tests/``.
Committing to trunk
-------------------
Testtools is maintained using bzr, with its trunk at lp:testtools. This gives
every contributor the ability to commit their work to their own branches.
However permission must be granted to allow contributors to commit to the trunk
branch.
Commit access to trunk is obtained by joining the testtools-committers
Launchpad team. Membership in this team is contingent on obeying the testtools
contribution policy, see `Copyright Assignment`_ above.
Code Review
-----------
All code must be reviewed before landing on trunk. The process is to create a
branch in launchpad, and submit it for merging to lp:testtools. It will then
be reviewed before it can be merged to trunk. It will be reviewed by someone:
* not the author
* a committer (member of the `~testtools-committers`_ team)
As a special exception, while the testtools committers team is small and prone
to blocking, a merge request from a committer that has not been reviewed after
24 hours may be merged by that committer. When the team is larger this policy
will be revisited.
Code reviewers should look for the quality of what is being submitted,
including conformance with this HACKING file.
Changes which all users should be made aware of should be documented in NEWS.
NEWS management
---------------
The file NEWS is structured as a sorted list of releases. Each release can have
a free form description and more or more sections with bullet point items.
Sections in use today are 'Improvements' and 'Changes'. To ease merging between
branches, the bullet points are kept alphabetically sorted. The release NEXT is
permanently present at the top of the list.
Release tasks
-------------
#. Choose a version number, say X.Y.Z
#. Branch from trunk to testtools-X.Y.Z
#. In testtools-X.Y.Z, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
#. Replace NEXT in NEWS with the version number X.Y.Z, adjusting the reST.
#. Possibly write a blurb into NEWS.
#. Replace any additional references to NEXT with the version being
released. (There should be none other than the ones in these release tasks
which should not be replaced).
#. Commit the changes.
#. Tag the release, bzr tag testtools-X.Y.Z
#. Run 'make release', this:
#. Creates a source distribution and uploads to PyPI
#. Ensures all Fix Committed bugs are in the release milestone
#. Makes a release on Launchpad and uploads the tarball
#. Marks all the Fix Committed bugs as Fix Released
#. Creates a new milestone
#. Merge the release branch testtools-X.Y.Z into trunk. Before the commit,
add a NEXT heading to the top of NEWS and bump the version in __init__.py
e.g. to ``(X, Y, Z+1, 'dev', 0)``.
#. Push trunk to Launchpad
#. If a new series has been created (e.g. 0.10.0), make the series on Launchpad.
.. _PEP 8: http://www.python.org/dev/peps/pep-0008/
.. _unittest: http://docs.python.org/library/unittest.html
.. _~testtools-committers: https://launchpad.net/~testtools-committers
.. _MIT license: http://www.opensource.org/licenses/mit-license.php
.. _Sphinx: http://sphinx.pocoo.org/
.. _restructuredtext: http://docutils.sourceforge.net/rst.html

View File

@ -1,36 +0,0 @@
.. testtools documentation master file, created by
sphinx-quickstart on Sun Nov 28 13:45:40 2010.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
testtools: tasteful testing for Python
======================================
testtools is a set of extensions to the Python standard library's unit testing
framework. These extensions have been derived from many years of experience
with unit testing in Python and come from many different sources. testtools
also ports recent unittest changes all the way back to Python 2.4. The next
release of testtools will change that to support versions that are maintained
by the Python community instead, to allow the use of modern language features
within testtools.
Contents:
.. toctree::
:maxdepth: 1
overview
for-test-authors
for-framework-folk
hacking
Changes to testtools <news>
API reference documentation <http://mumak.net/testtools/apidocs/>
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,113 +0,0 @@
@ECHO OFF
REM Command file for Sphinx documentation
set SPHINXBUILD=sphinx-build
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. changes to make an overview over all changed/added/deprecated items
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\testtools.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\testtools.ghc
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
:end

View File

@ -1,102 +0,0 @@
======================================
testtools: tasteful testing for Python
======================================
testtools is a set of extensions to the Python standard library's unit testing
framework. These extensions have been derived from many years of experience
with unit testing in Python and come from many different sources. testtools
supports Python versions all the way back to Python 2.4. The next release of
testtools will change that to support versions that are maintained by the
Python community instead, to allow the use of modern language features within
testtools.
What better way to start than with a contrived code snippet?::
from testtools import TestCase
from testtools.content import Content
from testtools.content_type import UTF8_TEXT
from testtools.matchers import Equals
from myproject import SillySquareServer
class TestSillySquareServer(TestCase):
def setUp(self):
super(TestSillySquare, self).setUp()
self.server = self.useFixture(SillySquareServer())
self.addCleanup(self.attach_log_file)
def attach_log_file(self):
self.addDetail(
'log-file',
Content(UTF8_TEXT
lambda: open(self.server.logfile, 'r').readlines()))
def test_server_is_cool(self):
self.assertThat(self.server.temperature, Equals("cool"))
def test_square(self):
self.assertThat(self.server.silly_square_of(7), Equals(49))
Why use testtools?
==================
Better assertion methods
------------------------
The standard assertion methods that come with unittest aren't as helpful as
they could be, and there aren't quite enough of them. testtools adds
``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives.
Matchers: better than assertion methods
---------------------------------------
Of course, in any serious project you want to be able to have assertions that
are specific to that project and the particular problem that it is addressing.
Rather than forcing you to define your own assertion methods and maintain your
own inheritance hierarchy of ``TestCase`` classes, testtools lets you write
your own "matchers", custom predicates that can be plugged into a unit test::
def test_response_has_bold(self):
# The response has bold text.
response = self.server.getResponse()
self.assertThat(response, HTMLContains(Tag('bold', 'b')))
More debugging info, when you need it
--------------------------------------
testtools makes it easy to add arbitrary data to your test result. If you
want to know what's in a log file when a test fails, or what the load was on
the computer when a test started, or what files were open, you can add that
information with ``TestCase.addDetail``, and it will appear in the test
results if that test fails.
Extend unittest, but stay compatible and re-usable
--------------------------------------------------
testtools goes to great lengths to allow serious test authors and test
*framework* authors to do whatever they like with their tests and their
extensions while staying compatible with the standard library's unittest.
testtools has completely parametrized how exceptions raised in tests are
mapped to ``TestResult`` methods and how tests are actually executed (ever
wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?)
It also provides many simple but handy utilities, like the ability to clone a
test, a ``MultiTestResult`` object that lets many result objects get the
results from one test suite, adapters to bring legacy ``TestResult`` objects
into our new golden age.
Cross-Python compatibility
--------------------------
testtools gives you the very latest in unit testing technology in a way that
will work with Python 2.6, 2.7 and 3.1.
If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
0.9.15.

View File

@ -1,3 +0,0 @@
These are scripts to help with building, maintaining and releasing testtools.
There is little here for anyone except a testtools contributor.

View File

@ -1,230 +0,0 @@
#!/usr/bin/python
"""Release testtools on Launchpad.
Steps:
1. Make sure all "Fix committed" bugs are assigned to 'next'
2. Rename 'next' to the new version
3. Release the milestone
4. Upload the tarball
5. Create a new 'next' milestone
6. Mark all "Fix committed" bugs in the milestone as "Fix released"
Assumes that NEWS is in the parent directory, that the release sections are
underlined with '~' and the subsections are underlined with '-'.
Assumes that this file is in the 'scripts' directory a testtools tree that has
already had a tarball built and uploaded with 'python setup.py sdist upload
--sign'.
"""
from datetime import datetime, timedelta, tzinfo
import logging
import os
import sys
from launchpadlib.launchpad import Launchpad
from launchpadlib import uris
APP_NAME = 'testtools-lp-release'
CACHE_DIR = os.path.expanduser('~/.launchpadlib/cache')
SERVICE_ROOT = uris.LPNET_SERVICE_ROOT
FIX_COMMITTED = u"Fix Committed"
FIX_RELEASED = u"Fix Released"
# Launchpad file type for a tarball upload.
CODE_RELEASE_TARBALL = 'Code Release Tarball'
PROJECT_NAME = 'testtools'
NEXT_MILESTONE_NAME = 'next'
class _UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
UTC = _UTC()
def configure_logging():
level = logging.INFO
log = logging.getLogger(APP_NAME)
log.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter("%(levelname)s: %(message)s")
handler.setFormatter(formatter)
log.addHandler(handler)
return log
LOG = configure_logging()
def get_path(relpath):
"""Get the absolute path for something relative to this file."""
return os.path.abspath(
os.path.join(
os.path.dirname(os.path.dirname(__file__)), relpath))
def assign_fix_committed_to_next(testtools, next_milestone):
"""Find all 'Fix Committed' and make sure they are in 'next'."""
fixed_bugs = list(testtools.searchTasks(status=FIX_COMMITTED))
for task in fixed_bugs:
LOG.debug("%s" % (task.title,))
if task.milestone != next_milestone:
task.milestone = next_milestone
LOG.info("Re-assigning %s" % (task.title,))
task.lp_save()
def rename_milestone(next_milestone, new_name):
"""Rename 'next_milestone' to 'new_name'."""
LOG.info("Renaming %s to %s" % (next_milestone.name, new_name))
next_milestone.name = new_name
next_milestone.lp_save()
def get_release_notes_and_changelog(news_path):
release_notes = []
changelog = []
state = None
last_line = None
def is_heading_marker(line, marker_char):
return line and line == marker_char * len(line)
LOG.debug("Loading NEWS from %s" % (news_path,))
with open(news_path, 'r') as news:
for line in news:
line = line.strip()
if state is None:
if is_heading_marker(line, '~'):
milestone_name = last_line
state = 'release-notes'
else:
last_line = line
elif state == 'title':
# The line after the title is a heading marker line, so we
# ignore it and change state. That which follows are the
# release notes.
state = 'release-notes'
elif state == 'release-notes':
if is_heading_marker(line, '-'):
state = 'changelog'
# Last line in the release notes is actually the first
# line of the changelog.
changelog = [release_notes.pop(), line]
else:
release_notes.append(line)
elif state == 'changelog':
if is_heading_marker(line, '~'):
# Last line in changelog is actually the first line of the
# next section.
changelog.pop()
break
else:
changelog.append(line)
else:
raise ValueError("Couldn't parse NEWS")
release_notes = '\n'.join(release_notes).strip() + '\n'
changelog = '\n'.join(changelog).strip() + '\n'
return milestone_name, release_notes, changelog
def release_milestone(milestone, release_notes, changelog):
date_released = datetime.now(tz=UTC)
LOG.info(
"Releasing milestone: %s, date %s" % (milestone.name, date_released))
release = milestone.createProductRelease(
date_released=date_released,
changelog=changelog,
release_notes=release_notes,
)
milestone.is_active = False
milestone.lp_save()
return release
def create_milestone(series, name):
"""Create a new milestone in the same series as 'release_milestone'."""
LOG.info("Creating milestone %s in series %s" % (name, series.name))
return series.newMilestone(name=name)
def close_fixed_bugs(milestone):
tasks = list(milestone.searchTasks())
for task in tasks:
LOG.debug("Found %s" % (task.title,))
if task.status == FIX_COMMITTED:
LOG.info("Closing %s" % (task.title,))
task.status = FIX_RELEASED
else:
LOG.warning(
"Bug not fixed, removing from milestone: %s" % (task.title,))
task.milestone = None
task.lp_save()
def upload_tarball(release, tarball_path):
with open(tarball_path) as tarball:
tarball_content = tarball.read()
sig_path = tarball_path + '.asc'
with open(sig_path) as sig:
sig_content = sig.read()
tarball_name = os.path.basename(tarball_path)
LOG.info("Uploading tarball: %s" % (tarball_path,))
release.add_file(
file_type=CODE_RELEASE_TARBALL,
file_content=tarball_content, filename=tarball_name,
signature_content=sig_content,
signature_filename=sig_path,
content_type="application/x-gzip; charset=binary")
def release_project(launchpad, project_name, next_milestone_name):
testtools = launchpad.projects[project_name]
next_milestone = testtools.getMilestone(name=next_milestone_name)
release_name, release_notes, changelog = get_release_notes_and_changelog(
get_path('NEWS'))
LOG.info("Releasing %s %s" % (project_name, release_name))
# Since reversing these operations is hard, and inspecting errors from
# Launchpad is also difficult, do some looking before leaping.
errors = []
tarball_path = get_path('dist/%s-%s.tar.gz' % (project_name, release_name,))
if not os.path.isfile(tarball_path):
errors.append("%s does not exist" % (tarball_path,))
if not os.path.isfile(tarball_path + '.asc'):
errors.append("%s does not exist" % (tarball_path + '.asc',))
if testtools.getMilestone(name=release_name):
errors.append("Milestone %s exists on %s" % (release_name, project_name))
if errors:
for error in errors:
LOG.error(error)
return 1
assign_fix_committed_to_next(testtools, next_milestone)
rename_milestone(next_milestone, release_name)
release = release_milestone(next_milestone, release_notes, changelog)
upload_tarball(release, tarball_path)
create_milestone(next_milestone.series_target, next_milestone_name)
close_fixed_bugs(next_milestone)
return 0
def main(args):
launchpad = Launchpad.login_with(APP_NAME, SERVICE_ROOT, CACHE_DIR)
return release_project(launchpad, PROJECT_NAME, NEXT_MILESTONE_NAME)
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -1,93 +0,0 @@
#!/usr/bin/python
"""Run the testtools test suite for all supported Pythons.
Prints output as a subunit test suite. If anything goes to stderr, that is
treated as a test error. If a Python is not available, then it is skipped.
"""
from datetime import datetime
import os
import subprocess
import sys
import subunit
from subunit import (
iso8601,
_make_stream_binary,
TestProtocolClient,
TestProtocolServer,
)
from testtools import (
PlaceHolder,
TestCase,
)
from testtools.compat import BytesIO
from testtools.content import text_content
ROOT = os.path.dirname(os.path.dirname(__file__))
def run_for_python(version, result, tests):
if not tests:
tests = ['testtools.tests.test_suite']
# XXX: This could probably be broken up and put into subunit.
python = 'python%s' % (version,)
# XXX: Correct API, but subunit doesn't support it. :(
# result.tags(set(python), set())
result.time(now())
test = PlaceHolder(''.join(c for c in python if c != '.'))
process = subprocess.Popen(
'%s -c pass' % (python,), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
if process.returncode:
result.startTest(test)
result.addSkip(test, reason='%s not available' % (python,))
result.stopTest(test)
return
env = os.environ.copy()
if env.get('PYTHONPATH', None):
env['PYTHONPATH'] = os.pathsep.join([ROOT, env['PYTHONPATH']])
else:
env['PYTHONPATH'] = ROOT
result.time(now())
protocol = TestProtocolServer(result)
subunit_path = os.path.join(os.path.dirname(subunit.__file__), 'run.py')
cmd = [
python,
'-W', 'ignore:Module testtools was already imported',
subunit_path]
cmd.extend(tests)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
_make_stream_binary(process.stdout)
_make_stream_binary(process.stderr)
# XXX: This buffers everything. Bad for memory, bad for getting progress
# on jenkins.
output, error = process.communicate()
protocol.readFrom(BytesIO(output))
if error:
result.startTest(test)
result.addError(test, details={
'stderr': text_content(error),
})
result.stopTest(test)
result.time(now())
# XXX: Correct API, but subunit doesn't support it. :(
#result.tags(set(), set(python))
def now():
return datetime.utcnow().replace(tzinfo=iso8601.Utc())
if __name__ == '__main__':
sys.path.append(ROOT)
result = TestProtocolClient(sys.stdout)
for version in '2.6 2.7 3.0 3.1 3.2'.split():
run_for_python(version, result, sys.argv[1:])

View File

@ -1,11 +0,0 @@
#!/usr/bin/python
from StringIO import StringIO
from urllib2 import urlopen
WEB_HOOK = 'http://readthedocs.org/build/588'
if __name__ == '__main__':
urlopen(WEB_HOOK, data=' ')

View File

@ -1,4 +0,0 @@
[test]
test_module = testtools.tests
buffer=1
catch=1

View File

@ -1,85 +0,0 @@
#!/usr/bin/env python
"""Distutils installer for testtools."""
from distutils.core import setup
import email
import os
import testtools
def get_revno():
import bzrlib.errors
import bzrlib.workingtree
try:
t = bzrlib.workingtree.WorkingTree.open_containing(__file__)[0]
except (bzrlib.errors.NotBranchError, bzrlib.errors.NoWorkingTree):
return None
else:
return t.branch.revno()
def get_version_from_pkg_info():
"""Get the version from PKG-INFO file if we can."""
pkg_info_path = os.path.join(os.path.dirname(__file__), 'PKG-INFO')
try:
pkg_info_file = open(pkg_info_path, 'r')
except (IOError, OSError):
return None
try:
pkg_info = email.message_from_file(pkg_info_file)
except email.MessageError:
return None
return pkg_info.get('Version', None)
def get_version():
"""Return the version of testtools that we are building."""
version = '.'.join(
str(component) for component in testtools.__version__[0:3])
phase = testtools.__version__[3]
if phase == 'final':
return version
pkg_info_version = get_version_from_pkg_info()
if pkg_info_version:
return pkg_info_version
revno = get_revno()
if revno is None:
# Apparently if we just say "snapshot" then distribute won't accept it
# as satisfying versioned dependencies. This is a problem for the
# daily build version.
return "snapshot-%s" % (version,)
if phase == 'alpha':
# No idea what the next version will be
return 'next-r%s' % revno
else:
# Preserve the version number but give it a revno prefix
return version + '-r%s' % revno
def get_long_description():
manual_path = os.path.join(
os.path.dirname(__file__), 'doc/overview.rst')
return open(manual_path).read()
setup(name='testtools',
author='Jonathan M. Lange',
author_email='jml+testtools@mumak.net',
url='https://launchpad.net/testtools',
description=('Extensions to the Python standard library unit testing '
'framework'),
long_description=get_long_description(),
version=get_version(),
classifiers=["License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
packages=[
'testtools',
'testtools.matchers',
'testtools.testresult',
'testtools.tests',
'testtools.tests.matchers',
],
cmdclass={'test': testtools.TestCommand},
zip_safe=False)

View File

@ -1,89 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
"""Extensions to the standard Python unittest library."""
__all__ = [
'clone_test_with_new_id',
'ConcurrentTestSuite',
'ErrorHolder',
'ExpectedException',
'ExtendedToOriginalDecorator',
'FixtureSuite',
'iterate_tests',
'MultipleExceptions',
'MultiTestResult',
'PlaceHolder',
'run_test_with',
'Tagger',
'TestCase',
'TestCommand',
'TestByTestResult',
'TestResult',
'TestResultDecorator',
'TextTestResult',
'RunTest',
'skip',
'skipIf',
'skipUnless',
'ThreadsafeForwardingResult',
'try_import',
'try_imports',
]
from testtools.helpers import (
try_import,
try_imports,
)
from testtools.matchers._impl import (
Matcher,
)
# Shut up, pyflakes. We are importing for documentation, not for namespacing.
Matcher
from testtools.runtest import (
MultipleExceptions,
RunTest,
)
from testtools.testcase import (
ErrorHolder,
ExpectedException,
PlaceHolder,
TestCase,
clone_test_with_new_id,
run_test_with,
skip,
skipIf,
skipUnless,
)
from testtools.testresult import (
ExtendedToOriginalDecorator,
MultiTestResult,
Tagger,
TestByTestResult,
TestResult,
TestResultDecorator,
TextTestResult,
ThreadsafeForwardingResult,
)
from testtools.testsuite import (
ConcurrentTestSuite,
FixtureSuite,
iterate_tests,
)
from testtools.distutilscmd import (
TestCommand,
)
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 9, 25, 'dev', 0)

View File

@ -1,17 +0,0 @@
# Copyright (c) 2011 testtools developers. See LICENSE for details.
"""Compatibility helpers that are valid syntax in Python 2.x.
Only add things here if they *only* work in Python 2.x or are Python 2
alternatives to things that *only* work in Python 3.x.
"""
__all__ = [
'reraise',
]
def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
"""Re-raise an exception received from sys.exc_info() or similar."""
raise exc_class, exc_obj, exc_tb

View File

@ -1,17 +0,0 @@
# Copyright (c) 2011 testtools developers. See LICENSE for details.
"""Compatibility helpers that are valid syntax in Python 3.x.
Only add things here if they *only* work in Python 3.x or are Python 3
alternatives to things that *only* work in Python 2.x.
"""
__all__ = [
'reraise',
]
def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
"""Re-raise an exception received from sys.exc_info() or similar."""
raise exc_obj.with_traceback(exc_tb)

View File

@ -1,316 +0,0 @@
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Evil reactor-spinning logic for running Twisted tests.
This code is highly experimental, liable to change and not to be trusted. If
you couldn't write this yourself, you should not be using it.
"""
__all__ = [
'DeferredNotFired',
'extract_result',
'NoResultError',
'not_reentrant',
'ReentryError',
'Spinner',
'StaleJunkError',
'TimeoutError',
'trap_unhandled_errors',
]
import signal
from testtools.monkey import MonkeyPatcher
from twisted.internet import defer
from twisted.internet.base import DelayedCall
from twisted.internet.interfaces import IReactorThreads
from twisted.python.failure import Failure
from twisted.python.util import mergeFunctionMetadata
class ReentryError(Exception):
"""Raised when we try to re-enter a function that forbids it."""
def __init__(self, function):
Exception.__init__(self,
"%r in not re-entrant but was called within a call to itself."
% (function,))
def not_reentrant(function, _calls={}):
"""Decorates a function as not being re-entrant.
The decorated function will raise an error if called from within itself.
"""
def decorated(*args, **kwargs):
if _calls.get(function, False):
raise ReentryError(function)
_calls[function] = True
try:
return function(*args, **kwargs)
finally:
_calls[function] = False
return mergeFunctionMetadata(function, decorated)
class DeferredNotFired(Exception):
"""Raised when we extract a result from a Deferred that's not fired yet."""
def extract_result(deferred):
"""Extract the result from a fired deferred.
It can happen that you have an API that returns Deferreds for
compatibility with Twisted code, but is in fact synchronous, i.e. the
Deferreds it returns have always fired by the time it returns. In this
case, you can use this function to convert the result back into the usual
form for a synchronous API, i.e. the result itself or a raised exception.
It would be very bad form to use this as some way of checking if a
Deferred has fired.
"""
failures = []
successes = []
deferred.addCallbacks(successes.append, failures.append)
if len(failures) == 1:
failures[0].raiseException()
elif len(successes) == 1:
return successes[0]
else:
raise DeferredNotFired("%r has not fired yet." % (deferred,))
def trap_unhandled_errors(function, *args, **kwargs):
"""Run a function, trapping any unhandled errors in Deferreds.
Assumes that 'function' will have handled any errors in Deferreds by the
time it is complete. This is almost never true of any Twisted code, since
you can never tell when someone has added an errback to a Deferred.
If 'function' raises, then don't bother doing any unhandled error
jiggery-pokery, since something horrible has probably happened anyway.
:return: A tuple of '(result, error)', where 'result' is the value
returned by 'function' and 'error' is a list of 'defer.DebugInfo'
objects that have unhandled errors in Deferreds.
"""
real_DebugInfo = defer.DebugInfo
debug_infos = []
def DebugInfo():
info = real_DebugInfo()
debug_infos.append(info)
return info
defer.DebugInfo = DebugInfo
try:
result = function(*args, **kwargs)
finally:
defer.DebugInfo = real_DebugInfo
errors = []
for info in debug_infos:
if info.failResult is not None:
errors.append(info)
# Disable the destructor that logs to error. We are already
# catching the error here.
info.__del__ = lambda: None
return result, errors
class TimeoutError(Exception):
"""Raised when run_in_reactor takes too long to run a function."""
def __init__(self, function, timeout):
Exception.__init__(self,
"%r took longer than %s seconds" % (function, timeout))
class NoResultError(Exception):
"""Raised when the reactor has stopped but we don't have any result."""
def __init__(self):
Exception.__init__(self,
"Tried to get test's result from Deferred when no result is "
"available. Probably means we received SIGINT or similar.")
class StaleJunkError(Exception):
"""Raised when there's junk in the spinner from a previous run."""
def __init__(self, junk):
Exception.__init__(self,
"There was junk in the spinner from a previous run. "
"Use clear_junk() to clear it out: %r" % (junk,))
class Spinner(object):
"""Spin the reactor until a function is done.
This class emulates the behaviour of twisted.trial in that it grotesquely
and horribly spins the Twisted reactor while a function is running, and
then kills the reactor when that function is complete and all the
callbacks in its chains are done.
"""
_UNSET = object()
# Signals that we save and restore for each spin.
_PRESERVED_SIGNALS = [
'SIGINT',
'SIGTERM',
'SIGCHLD',
]
# There are many APIs within Twisted itself where a Deferred fires but
# leaves cleanup work scheduled for the reactor to do. Arguably, many of
# these are bugs. As such, we provide a facility to iterate the reactor
# event loop a number of times after every call, in order to shake out
# these buggy-but-commonplace events. The default is 0, because that is
# the ideal, and it actually works for many cases.
_OBLIGATORY_REACTOR_ITERATIONS = 0
def __init__(self, reactor, debug=False):
"""Construct a Spinner.
:param reactor: A Twisted reactor.
:param debug: Whether or not to enable Twisted's debugging. Defaults
to False.
"""
self._reactor = reactor
self._timeout_call = None
self._success = self._UNSET
self._failure = self._UNSET
self._saved_signals = []
self._junk = []
self._debug = debug
def _cancel_timeout(self):
if self._timeout_call:
self._timeout_call.cancel()
def _get_result(self):
if self._failure is not self._UNSET:
self._failure.raiseException()
if self._success is not self._UNSET:
return self._success
raise NoResultError()
def _got_failure(self, result):
self._cancel_timeout()
self._failure = result
def _got_success(self, result):
self._cancel_timeout()
self._success = result
def _stop_reactor(self, ignored=None):
"""Stop the reactor!"""
self._reactor.crash()
def _timed_out(self, function, timeout):
e = TimeoutError(function, timeout)
self._failure = Failure(e)
self._stop_reactor()
def _clean(self):
"""Clean up any junk in the reactor.
Will always iterate the reactor a number of times equal to
``Spinner._OBLIGATORY_REACTOR_ITERATIONS``. This is to work around
bugs in various Twisted APIs where a Deferred fires but still leaves
work (e.g. cancelling a call, actually closing a connection) for the
reactor to do.
"""
for i in range(self._OBLIGATORY_REACTOR_ITERATIONS):
self._reactor.iterate(0)
junk = []
for delayed_call in self._reactor.getDelayedCalls():
delayed_call.cancel()
junk.append(delayed_call)
for selectable in self._reactor.removeAll():
# Twisted sends a 'KILL' signal to selectables that provide
# IProcessTransport. Since only _dumbwin32proc processes do this,
# we aren't going to bother.
junk.append(selectable)
if IReactorThreads.providedBy(self._reactor):
if self._reactor.threadpool is not None:
self._reactor._stopThreadPool()
self._junk.extend(junk)
return junk
def clear_junk(self):
"""Clear out our recorded junk.
:return: Whatever junk was there before.
"""
junk = self._junk
self._junk = []
return junk
def get_junk(self):
"""Return any junk that has been found on the reactor."""
return self._junk
def _save_signals(self):
available_signals = [
getattr(signal, name, None) for name in self._PRESERVED_SIGNALS]
self._saved_signals = [
(sig, signal.getsignal(sig)) for sig in available_signals if sig]
def _restore_signals(self):
for sig, hdlr in self._saved_signals:
signal.signal(sig, hdlr)
self._saved_signals = []
@not_reentrant
def run(self, timeout, function, *args, **kwargs):
"""Run 'function' in a reactor.
If 'function' returns a Deferred, the reactor will keep spinning until
the Deferred fires and its chain completes or until the timeout is
reached -- whichever comes first.
:raise TimeoutError: If 'timeout' is reached before the Deferred
returned by 'function' has completed its callback chain.
:raise NoResultError: If the reactor is somehow interrupted before
the Deferred returned by 'function' has completed its callback
chain.
:raise StaleJunkError: If there's junk in the spinner from a previous
run.
:return: Whatever is at the end of the function's callback chain. If
it's an error, then raise that.
"""
debug = MonkeyPatcher()
if self._debug:
debug.add_patch(defer.Deferred, 'debug', True)
debug.add_patch(DelayedCall, 'debug', True)
debug.patch()
try:
junk = self.get_junk()
if junk:
raise StaleJunkError(junk)
self._save_signals()
self._timeout_call = self._reactor.callLater(
timeout, self._timed_out, function, timeout)
# Calling 'stop' on the reactor will make it impossible to
# re-start the reactor. Since the default signal handlers for
# TERM, BREAK and INT all call reactor.stop(), we'll patch it over
# with crash. XXX: It might be a better idea to either install
# custom signal handlers or to override the methods that are
# Twisted's signal handlers.
stop, self._reactor.stop = self._reactor.stop, self._reactor.crash
def run_function():
d = defer.maybeDeferred(function, *args, **kwargs)
d.addCallbacks(self._got_success, self._got_failure)
d.addBoth(self._stop_reactor)
try:
self._reactor.callWhenRunning(run_function)
self._reactor.run()
finally:
self._reactor.stop = stop
self._restore_signals()
try:
return self._get_result()
finally:
self._clean()
finally:
debug.restore()

View File

@ -1,393 +0,0 @@
# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
"""Compatibility support for python 2 and 3."""
__metaclass__ = type
__all__ = [
'_b',
'_u',
'advance_iterator',
'all',
'BytesIO',
'classtypes',
'isbaseexception',
'istext',
'str_is_unicode',
'StringIO',
'reraise',
'unicode_output_stream',
]
import codecs
import linecache
import locale
import os
import re
import sys
import traceback
import unicodedata
from testtools.helpers import try_imports
BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO'])
StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
try:
from testtools import _compat2x as _compat
except SyntaxError:
from testtools import _compat3x as _compat
reraise = _compat.reraise
__u_doc = """A function version of the 'u' prefix.
This is needed becayse the u prefix is not usable in Python 3 but is required
in Python 2 to get a unicode object.
To migrate code that was written as u'\u1234' in Python 2 to 2+3 change
it to be _u('\u1234'). The Python 3 interpreter will decode it
appropriately and the no-op _u for Python 3 lets it through, in Python
2 we then call unicode-escape in the _u function.
"""
if sys.version_info > (3, 0):
import builtins
def _u(s):
return s
_r = ascii
def _b(s):
"""A byte literal."""
return s.encode("latin-1")
advance_iterator = next
# GZ 2011-08-24: Seems istext() is easy to misuse and makes for bad code.
def istext(x):
return isinstance(x, str)
def classtypes():
return (type,)
str_is_unicode = True
else:
import __builtin__ as builtins
def _u(s):
# The double replace mangling going on prepares the string for
# unicode-escape - \foo is preserved, \u and \U are decoded.
return (s.replace("\\", "\\\\").replace("\\\\u", "\\u")
.replace("\\\\U", "\\U").decode("unicode-escape"))
_r = repr
def _b(s):
return s
advance_iterator = lambda it: it.next()
def istext(x):
return isinstance(x, basestring)
def classtypes():
import types
return (type, types.ClassType)
str_is_unicode = sys.platform == "cli"
_u.__doc__ = __u_doc
if sys.version_info > (2, 5):
all = all
_error_repr = BaseException.__repr__
def isbaseexception(exception):
"""Return whether exception inherits from BaseException only"""
return (isinstance(exception, BaseException)
and not isinstance(exception, Exception))
else:
def all(iterable):
"""If contents of iterable all evaluate as boolean True"""
for obj in iterable:
if not obj:
return False
return True
def _error_repr(exception):
"""Format an exception instance as Python 2.5 and later do"""
return exception.__class__.__name__ + repr(exception.args)
def isbaseexception(exception):
"""Return whether exception would inherit from BaseException only
This approximates the hierarchy in Python 2.5 and later, compare the
difference between the diagrams at the bottom of the pages:
<http://docs.python.org/release/2.4.4/lib/module-exceptions.html>
<http://docs.python.org/release/2.5.4/lib/module-exceptions.html>
"""
return isinstance(exception, (KeyboardInterrupt, SystemExit))
# GZ 2011-08-24: Using isinstance checks like this encourages bad interfaces,
# there should be better ways to write code needing this.
if not issubclass(getattr(builtins, "bytes", str), str):
def _isbytes(x):
return isinstance(x, bytes)
else:
# Never return True on Pythons that provide the name but not the real type
def _isbytes(x):
return False
def _slow_escape(text):
"""Escape unicode ``text`` leaving printable characters unmodified
The behaviour emulates the Python 3 implementation of repr, see
unicode_repr in unicodeobject.c and isprintable definition.
Because this iterates over the input a codepoint at a time, it's slow, and
does not handle astral characters correctly on Python builds with 16 bit
rather than 32 bit unicode type.
"""
output = []
for c in text:
o = ord(c)
if o < 256:
if o < 32 or 126 < o < 161:
output.append(c.encode("unicode-escape"))
elif o == 92:
# Separate due to bug in unicode-escape codec in Python 2.4
output.append("\\\\")
else:
output.append(c)
else:
# To get correct behaviour would need to pair up surrogates here
if unicodedata.category(c)[0] in "CZ":
output.append(c.encode("unicode-escape"))
else:
output.append(c)
return "".join(output)
def text_repr(text, multiline=None):
"""Rich repr for ``text`` returning unicode, triple quoted if ``multiline``.
"""
is_py3k = sys.version_info > (3, 0)
nl = _isbytes(text) and bytes((0xA,)) or "\n"
if multiline is None:
multiline = nl in text
if not multiline and (is_py3k or not str_is_unicode and type(text) is str):
# Use normal repr for single line of unicode on Python 3 or bytes
return repr(text)
prefix = repr(text[:0])[:-2]
if multiline:
# To escape multiline strings, split and process each line in turn,
# making sure that quotes are not escaped.
if is_py3k:
offset = len(prefix) + 1
lines = []
for l in text.split(nl):
r = repr(l)
q = r[-1]
lines.append(r[offset:-1].replace("\\" + q, q))
elif not str_is_unicode and isinstance(text, str):
lines = [l.encode("string-escape").replace("\\'", "'")
for l in text.split("\n")]
else:
lines = [_slow_escape(l) for l in text.split("\n")]
# Combine the escaped lines and append two of the closing quotes,
# then iterate over the result to escape triple quotes correctly.
_semi_done = "\n".join(lines) + "''"
p = 0
while True:
p = _semi_done.find("'''", p)
if p == -1:
break
_semi_done = "\\".join([_semi_done[:p], _semi_done[p:]])
p += 2
return "".join([prefix, "'''\\\n", _semi_done, "'"])
escaped_text = _slow_escape(text)
# Determine which quote character to use and if one gets prefixed with a
# backslash following the same logic Python uses for repr() on strings
quote = "'"
if "'" in text:
if '"' in text:
escaped_text = escaped_text.replace("'", "\\'")
else:
quote = '"'
return "".join([prefix, quote, escaped_text, quote])
def unicode_output_stream(stream):
"""Get wrapper for given stream that writes any unicode without exception
Characters that can't be coerced to the encoding of the stream, or 'ascii'
if valid encoding is not found, will be replaced. The original stream may
be returned in situations where a wrapper is determined unneeded.
The wrapper only allows unicode to be written, not non-ascii bytestrings,
which is a good thing to ensure sanity and sanitation.
"""
if sys.platform == "cli":
# Best to never encode before writing in IronPython
return stream
try:
writer = codecs.getwriter(stream.encoding or "")
except (AttributeError, LookupError):
# GZ 2010-06-16: Python 3 StringIO ends up here, but probably needs
# different handling as it doesn't want bytestrings
return codecs.getwriter("ascii")(stream, "replace")
if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
# The current stream has a unicode encoding so no error handler is needed
if sys.version_info > (3, 0):
return stream
return writer(stream)
if sys.version_info > (3, 0):
# Python 3 doesn't seem to make this easy, handle a common case
try:
return stream.__class__(stream.buffer, stream.encoding, "replace",
stream.newlines, stream.line_buffering)
except AttributeError:
pass
return writer(stream, "replace")
# The default source encoding is actually "iso-8859-1" until Python 2.5 but
# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to
# treat all versions the same way
_default_source_encoding = "ascii"
# Pattern specified in <http://www.python.org/dev/peps/pep-0263/>
_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search
def _detect_encoding(lines):
"""Get the encoding of a Python source file from a list of lines as bytes
This function does less than tokenize.detect_encoding added in Python 3 as
it does not attempt to raise a SyntaxError when the interpreter would, it
just wants the encoding of a source file Python has already compiled and
determined is valid.
"""
if not lines:
return _default_source_encoding
if lines[0].startswith("\xef\xbb\xbf"):
# Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError
return "utf-8"
# Only the first two lines of the source file are examined
magic = _cookie_search("".join(lines[:2]))
if magic is None:
return _default_source_encoding
encoding = magic.group(1)
try:
codecs.lookup(encoding)
except LookupError:
# Some codecs raise something other than LookupError if they don't
# support the given error handler, but not the text ones that could
# actually be used for Python source code
return _default_source_encoding
return encoding
class _EncodingTuple(tuple):
"""A tuple type that can have an encoding attribute smuggled on"""
def _get_source_encoding(filename):
"""Detect, cache and return the encoding of Python source at filename"""
try:
return linecache.cache[filename].encoding
except (AttributeError, KeyError):
encoding = _detect_encoding(linecache.getlines(filename))
if filename in linecache.cache:
newtuple = _EncodingTuple(linecache.cache[filename])
newtuple.encoding = encoding
linecache.cache[filename] = newtuple
return encoding
def _get_exception_encoding():
"""Return the encoding we expect messages from the OS to be encoded in"""
if os.name == "nt":
# GZ 2010-05-24: Really want the codepage number instead, the error
# handling of standard codecs is more deterministic
return "mbcs"
# GZ 2010-05-23: We need this call to be after initialisation, but there's
# no benefit in asking more than once as it's a global
# setting that can change after the message is formatted.
return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii"
def _exception_to_text(evalue):
"""Try hard to get a sensible text value out of an exception instance"""
try:
return unicode(evalue)
except KeyboardInterrupt:
raise
except:
# Apparently this is what traceback._some_str does. Sigh - RBC 20100623
pass
try:
return str(evalue).decode(_get_exception_encoding(), "replace")
except KeyboardInterrupt:
raise
except:
# Apparently this is what traceback._some_str does. Sigh - RBC 20100623
pass
# Okay, out of ideas, let higher level handle it
return None
# GZ 2010-05-23: This function is huge and horrible and I welcome suggestions
# on the best way to break it up
_TB_HEADER = _u('Traceback (most recent call last):\n')
def _format_exc_info(eclass, evalue, tb, limit=None):
"""Format a stack trace and the exception information as unicode
Compatibility function for Python 2 which ensures each component of a
traceback is correctly decoded according to its origins.
Based on traceback.format_exception and related functions.
"""
fs_enc = sys.getfilesystemencoding()
if tb:
list = [_TB_HEADER]
extracted_list = []
for filename, lineno, name, line in traceback.extract_tb(tb, limit):
extracted_list.append((
filename.decode(fs_enc, "replace"),
lineno,
name.decode("ascii", "replace"),
line and line.decode(
_get_source_encoding(filename), "replace")))
list.extend(traceback.format_list(extracted_list))
else:
list = []
if evalue is None:
# Is a (deprecated) string exception
list.append((eclass + "\n").decode("ascii", "replace"))
return list
if isinstance(evalue, SyntaxError):
# Avoid duplicating the special formatting for SyntaxError here,
# instead create a new instance with unicode filename and line
# Potentially gives duff spacing, but that's a pre-existing issue
try:
msg, (filename, lineno, offset, line) = evalue
except (TypeError, ValueError):
pass # Strange exception instance, fall through to generic code
else:
# Errors during parsing give the line from buffer encoded as
# latin-1 or utf-8 or the encoding of the file depending on the
# coding and whether the patch for issue #1031213 is applied, so
# give up on trying to decode it and just read the file again
if line:
bytestr = linecache.getline(filename, lineno)
if bytestr:
if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"):
bytestr = bytestr[3:]
line = bytestr.decode(
_get_source_encoding(filename), "replace")
del linecache.cache[filename]
else:
line = line.decode("ascii", "replace")
if filename:
filename = filename.decode(fs_enc, "replace")
evalue = eclass(msg, (filename, lineno, offset, line))
list.extend(traceback.format_exception_only(eclass, evalue))
return list
sclass = eclass.__name__
svalue = _exception_to_text(evalue)
if svalue:
list.append("%s: %s\n" % (sclass, svalue))
elif svalue is None:
# GZ 2010-05-24: Not a great fallback message, but keep for the moment
list.append("%s: <unprintable %s object>\n" % (sclass, sclass))
else:
list.append("%s\n" % sclass)
return list

View File

@ -1,324 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""Content - a MIME-like Content object."""
__all__ = [
'attach_file',
'Content',
'content_from_file',
'content_from_stream',
'text_content',
'TracebackContent',
]
import codecs
import json
import os
import sys
import traceback
from testtools import try_import
from testtools.compat import _b, _format_exc_info, str_is_unicode, _u
from testtools.content_type import ContentType, JSON, UTF8_TEXT
functools = try_import('functools')
_join_b = _b("").join
DEFAULT_CHUNK_SIZE = 4096
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
"""Read 'stream' in chunks of 'chunk_size'.
:param stream: A file-like object to read from.
:param chunk_size: The size of each read from 'stream'.
:param seek_offset: If non-None, seek before iterating.
:param seek_whence: Pass through to the seek call, if seeking.
"""
if seek_offset is not None:
stream.seek(seek_offset, seek_whence)
chunk = stream.read(chunk_size)
while chunk:
yield chunk
chunk = stream.read(chunk_size)
class Content(object):
"""A MIME-like Content object.
Content objects can be serialised to bytes using the iter_bytes method.
If the Content-Type is recognised by other code, they are welcome to
look for richer contents that mere byte serialisation - for example in
memory object graphs etc. However, such code MUST be prepared to receive
a generic Content object that has been reconstructed from a byte stream.
:ivar content_type: The content type of this Content.
"""
def __init__(self, content_type, get_bytes):
"""Create a ContentType."""
if None in (content_type, get_bytes):
raise ValueError("None not permitted in %r, %r" % (
content_type, get_bytes))
self.content_type = content_type
self._get_bytes = get_bytes
def __eq__(self, other):
return (self.content_type == other.content_type and
_join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
def as_text(self):
"""Return all of the content as text.
This is only valid where ``iter_text`` is. It will load all of the
content into memory. Where this is a concern, use ``iter_text``
instead.
"""
return _u('').join(self.iter_text())
def iter_bytes(self):
"""Iterate over bytestrings of the serialised content."""
return self._get_bytes()
def iter_text(self):
"""Iterate over the text of the serialised content.
This is only valid for text MIME types, and will use ISO-8859-1 if
no charset parameter is present in the MIME type. (This is somewhat
arbitrary, but consistent with RFC2617 3.7.1).
:raises ValueError: If the content type is not text/\*.
"""
if self.content_type.type != "text":
raise ValueError("Not a text type %r" % self.content_type)
return self._iter_text()
def _iter_text(self):
"""Worker for iter_text - does the decoding."""
encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
try:
# 2.5+
decoder = codecs.getincrementaldecoder(encoding)()
for bytes in self.iter_bytes():
yield decoder.decode(bytes)
final = decoder.decode(_b(''), True)
if final:
yield final
except AttributeError:
# < 2.5
bytes = ''.join(self.iter_bytes())
yield bytes.decode(encoding)
def __repr__(self):
return "<Content type=%r, value=%r>" % (
self.content_type, _join_b(self.iter_bytes()))
class TracebackContent(Content):
"""Content object for tracebacks.
This adapts an exc_info tuple to the Content interface.
text/x-traceback;language=python is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
# Whether or not to hide layers of the stack trace that are
# unittest/testtools internal code. Defaults to True since the
# system-under-test is rarely unittest or testtools.
HIDE_INTERNAL_STACK = True
def __init__(self, err, test):
"""Create a TracebackContent for err."""
if err is None:
raise ValueError("err may not be None")
content_type = ContentType('text', 'x-traceback',
{"language": "python", "charset": "utf8"})
value = self._exc_info_to_unicode(err, test)
super(TracebackContent, self).__init__(
content_type, lambda: [value.encode("utf8")])
def _exc_info_to_unicode(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string.
Copied from Python 2.7's unittest.TestResult._exc_info_to_string.
"""
exctype, value, tb = err
# Skip test runner traceback levels
if self.HIDE_INTERNAL_STACK:
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
# testtools customization. When str is unicode (e.g. IronPython,
# Python 3), traceback.format_exception returns unicode. For Python 2,
# it returns bytes. We need to guarantee unicode.
if str_is_unicode:
format_exception = traceback.format_exception
else:
format_exception = _format_exc_info
if (self.HIDE_INTERNAL_STACK and test.failureException
and isinstance(value, test.failureException)):
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = format_exception(exctype, value, tb, length)
else:
msgLines = format_exception(exctype, value, tb)
if getattr(self, 'buffer', None):
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def json_content(json_data):
"""Create a JSON `Content` object from JSON-encodeable data."""
data = json.dumps(json_data)
if str_is_unicode:
# The json module perversely returns native str not bytes
data = data.encode('utf8')
return Content(JSON, lambda: [data])
def text_content(text):
"""Create a `Content` object from some text.
This is useful for adding details which are short strings.
"""
return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
def maybe_wrap(wrapper, func):
"""Merge metadata for func into wrapper if functools is present."""
if functools is not None:
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
buffer_now=False, seek_offset=None, seek_whence=0):
"""Create a `Content` object from a file on disk.
Note that unless 'read_now' is explicitly passed in as True, the file
will only be read from when ``iter_bytes`` is called.
:param path: The path to the file to be used as content.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, read the file from disk now and keep it in
memory. Otherwise, only read when the content is serialized.
:param seek_offset: If non-None, seek within the stream before reading it.
:param seek_whence: If supplied, pass to stream.seek() when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
def reader():
# This should be try:finally:, but python2.4 makes that hard. When
# We drop older python support we can make this use a context manager
# for maximum simplicity.
stream = open(path, 'rb')
for chunk in _iter_chunks(stream, chunk_size, seek_offset, seek_whence):
yield chunk
stream.close()
return content_from_reader(reader, content_type, buffer_now)
def content_from_stream(stream, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
seek_offset=None, seek_whence=0):
"""Create a `Content` object from a file-like stream.
Note that the stream will only be read from when ``iter_bytes`` is
called.
:param stream: A file-like object to read the content from. The stream
is not closed by this function or the content object it returns.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to ``DEFAULT_CHUNK_SIZE``.
:param buffer_now: If True, reads from the stream right now. Otherwise,
only reads when the content is serialized. Defaults to False.
:param seek_offset: If non-None, seek within the stream before reading it.
:param seek_whence: If supplied, pass to stream.seek() when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
return content_from_reader(reader, content_type, buffer_now)
def content_from_reader(reader, content_type, buffer_now):
"""Create a Content object that will obtain the content from reader.
:param reader: A callback to read the content. Should return an iterable of
bytestrings.
:param content_type: The content type to create.
:param buffer_now: If True the reader is evaluated immediately and
buffered.
"""
if content_type is None:
content_type = UTF8_TEXT
if buffer_now:
contents = list(reader())
reader = lambda: contents
return Content(content_type, reader)
def attach_file(detailed, path, name=None, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True):
"""Attach a file to this test as a detail.
This is a convenience method wrapping around ``addDetail``.
Note that unless 'read_now' is explicitly passed in as True, the file
*must* exist when the test result is called with the results of this
test, after the test has been torn down.
:param detailed: An object with details
:param path: The path to the file to attach.
:param name: The name to give to the detail for the attached file.
:param content_type: The content type of the file. If not provided,
defaults to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file. Defaults
to something sensible.
:param buffer_now: If False the file content is read when the content
object is evaluated rather than when attach_file is called.
Note that this may be after any cleanups that obj_with_details has, so
if the file is a temporary file disabling buffer_now may cause the file
to be read after it is deleted. To handle those cases, using
attach_file as a cleanup is recommended because it guarantees a
sequence for when the attach_file call is made::
detailed.addCleanup(attach_file, 'foo.txt', detailed)
"""
if name is None:
name = os.path.basename(path)
content_object = content_from_file(
path, content_type, chunk_size, buffer_now)
detailed.addDetail(name, content_object)

View File

@ -1,41 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""ContentType - a MIME Content Type."""
class ContentType(object):
"""A content type from http://www.iana.org/assignments/media-types/
:ivar type: The primary type, e.g. "text" or "application"
:ivar subtype: The subtype, e.g. "plain" or "octet-stream"
:ivar parameters: A dict of additional parameters specific to the
content type.
"""
def __init__(self, primary_type, sub_type, parameters=None):
"""Create a ContentType."""
if None in (primary_type, sub_type):
raise ValueError("None not permitted in %r, %r" % (
primary_type, sub_type))
self.type = primary_type
self.subtype = sub_type
self.parameters = parameters or {}
def __eq__(self, other):
if type(other) != ContentType:
return False
return self.__dict__ == other.__dict__
def __repr__(self):
if self.parameters:
params = '; '
params += ', '.join(
sorted('%s="%s"' % (k, v) for k, v in self.parameters.items()))
else:
params = ''
return "%s/%s%s" % (self.type, self.subtype, params)
JSON = ContentType('application', 'json')
UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'})

View File

@ -1,336 +0,0 @@
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Individual test case execution for tests that return Deferreds.
This module is highly experimental and is liable to change in ways that cause
subtle failures in tests. Use at your own peril.
"""
__all__ = [
'assert_fails_with',
'AsynchronousDeferredRunTest',
'AsynchronousDeferredRunTestForBrokenTwisted',
'SynchronousDeferredRunTest',
]
import sys
from testtools.compat import StringIO
from testtools.content import (
Content,
text_content,
)
from testtools.content_type import UTF8_TEXT
from testtools.runtest import RunTest
from testtools._spinner import (
extract_result,
NoResultError,
Spinner,
TimeoutError,
trap_unhandled_errors,
)
from twisted.internet import defer
from twisted.python import log
from twisted.trial.unittest import _LogObserver
class _DeferredRunTest(RunTest):
"""Base for tests that return Deferreds."""
def _got_user_failure(self, failure, tb_label='traceback'):
"""We got a failure from user code."""
return self._got_user_exception(
(failure.type, failure.value, failure.getTracebackObject()),
tb_label=tb_label)
class SynchronousDeferredRunTest(_DeferredRunTest):
"""Runner for tests that return synchronous Deferreds."""
def _run_user(self, function, *args):
d = defer.maybeDeferred(function, *args)
d.addErrback(self._got_user_failure)
result = extract_result(d)
return result
def run_with_log_observers(observers, function, *args, **kwargs):
"""Run 'function' with the given Twisted log observers."""
real_observers = list(log.theLogPublisher.observers)
for observer in real_observers:
log.theLogPublisher.removeObserver(observer)
for observer in observers:
log.theLogPublisher.addObserver(observer)
try:
return function(*args, **kwargs)
finally:
for observer in observers:
log.theLogPublisher.removeObserver(observer)
for observer in real_observers:
log.theLogPublisher.addObserver(observer)
# Observer of the Twisted log that we install during tests.
_log_observer = _LogObserver()
class AsynchronousDeferredRunTest(_DeferredRunTest):
"""Runner for tests that return Deferreds that fire asynchronously.
That is, this test runner assumes that the Deferreds will only fire if the
reactor is left to spin for a while.
Do not rely too heavily on the nuances of the behaviour of this class.
What it does to the reactor is black magic, and if we can find nicer ways
of doing it we will gladly break backwards compatibility.
This is highly experimental code. Use at your own risk.
"""
def __init__(self, case, handlers=None, reactor=None, timeout=0.005,
debug=False):
"""Construct an `AsynchronousDeferredRunTest`.
:param case: The `TestCase` to run.
:param handlers: A list of exception handlers (ExceptionType, handler)
where 'handler' is a callable that takes a `TestCase`, a
``testtools.TestResult`` and the exception raised.
:param reactor: The Twisted reactor to use. If not given, we use the
default reactor.
:param timeout: The maximum time allowed for running a test. The
default is 0.005s.
:param debug: Whether or not to enable Twisted's debugging. Use this
to get information about unhandled Deferreds and left-over
DelayedCalls. Defaults to False.
"""
super(AsynchronousDeferredRunTest, self).__init__(case, handlers)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self._timeout = timeout
self._debug = debug
@classmethod
def make_factory(cls, reactor=None, timeout=0.005, debug=False):
"""Make a factory that conforms to the RunTest factory interface."""
# This is horrible, but it means that the return value of the method
# will be able to be assigned to a class variable *and* also be
# invoked directly.
class AsynchronousDeferredRunTestFactory:
def __call__(self, case, handlers=None):
return cls(case, handlers, reactor, timeout, debug)
return AsynchronousDeferredRunTestFactory()
@defer.deferredGenerator
def _run_cleanups(self):
"""Run the cleanups on the test case.
We expect that the cleanups on the test case can also return
asynchronous Deferreds. As such, we take the responsibility for
running the cleanups, rather than letting TestCase do it.
"""
while self.case._cleanups:
f, args, kwargs = self.case._cleanups.pop()
d = defer.maybeDeferred(f, *args, **kwargs)
thing = defer.waitForDeferred(d)
yield thing
try:
thing.getResult()
except Exception:
exc_info = sys.exc_info()
self.case._report_traceback(exc_info)
last_exception = exc_info[1]
yield last_exception
def _make_spinner(self):
"""Make the `Spinner` to be used to run the tests."""
return Spinner(self._reactor, debug=self._debug)
def _run_deferred(self):
"""Run the test, assuming everything in it is Deferred-returning.
This should return a Deferred that fires with True if the test was
successful and False if the test was not successful. It should *not*
call addSuccess on the result, because there's reactor clean up that
we needs to be done afterwards.
"""
fails = []
def fail_if_exception_caught(exception_caught):
if self.exception_caught == exception_caught:
fails.append(None)
def clean_up(ignored=None):
"""Run the cleanups."""
d = self._run_cleanups()
def clean_up_done(result):
if result is not None:
self._exceptions.append(result)
fails.append(None)
return d.addCallback(clean_up_done)
def set_up_done(exception_caught):
"""Set up is done, either clean up or run the test."""
if self.exception_caught == exception_caught:
fails.append(None)
return clean_up()
else:
d = self._run_user(self.case._run_test_method, self.result)
d.addCallback(fail_if_exception_caught)
d.addBoth(tear_down)
return d
def tear_down(ignored):
d = self._run_user(self.case._run_teardown, self.result)
d.addCallback(fail_if_exception_caught)
d.addBoth(clean_up)
return d
d = self._run_user(self.case._run_setup, self.result)
d.addCallback(set_up_done)
d.addBoth(lambda ignored: len(fails) == 0)
return d
def _log_user_exception(self, e):
"""Raise 'e' and report it as a user exception."""
try:
raise e
except e.__class__:
self._got_user_exception(sys.exc_info())
def _blocking_run_deferred(self, spinner):
try:
return trap_unhandled_errors(
spinner.run, self._timeout, self._run_deferred)
except NoResultError:
# We didn't get a result at all! This could be for any number of
# reasons, but most likely someone hit Ctrl-C during the test.
raise KeyboardInterrupt
except TimeoutError:
# The function took too long to run.
self._log_user_exception(TimeoutError(self.case, self._timeout))
return False, []
def _run_core(self):
# Add an observer to trap all logged errors.
self.case.reactor = self._reactor
error_observer = _log_observer
full_log = StringIO()
full_observer = log.FileLogObserver(full_log)
spinner = self._make_spinner()
successful, unhandled = run_with_log_observers(
[error_observer.gotEvent, full_observer.emit],
self._blocking_run_deferred, spinner)
self.case.addDetail(
'twisted-log', Content(UTF8_TEXT, full_log.readlines))
logged_errors = error_observer.flushErrors()
for logged_error in logged_errors:
successful = False
self._got_user_failure(logged_error, tb_label='logged-error')
if unhandled:
successful = False
for debug_info in unhandled:
f = debug_info.failResult
info = debug_info._getDebugTracebacks()
if info:
self.case.addDetail(
'unhandled-error-in-deferred-debug',
text_content(info))
self._got_user_failure(f, 'unhandled-error-in-deferred')
junk = spinner.clear_junk()
if junk:
successful = False
self._log_user_exception(UncleanReactorError(junk))
if successful:
self.result.addSuccess(self.case, details=self.case.getDetails())
def _run_user(self, function, *args):
"""Run a user-supplied function.
This just makes sure that it returns a Deferred, regardless of how the
user wrote it.
"""
d = defer.maybeDeferred(function, *args)
return d.addErrback(self._got_user_failure)
class AsynchronousDeferredRunTestForBrokenTwisted(AsynchronousDeferredRunTest):
"""Test runner that works around Twisted brokenness re reactor junk.
There are many APIs within Twisted itself where a Deferred fires but
leaves cleanup work scheduled for the reactor to do. Arguably, many of
these are bugs. This runner iterates the reactor event loop a number of
times after every test, in order to shake out these buggy-but-commonplace
events.
"""
def _make_spinner(self):
spinner = super(
AsynchronousDeferredRunTestForBrokenTwisted, self)._make_spinner()
spinner._OBLIGATORY_REACTOR_ITERATIONS = 2
return spinner
def assert_fails_with(d, *exc_types, **kwargs):
"""Assert that 'd' will fail with one of 'exc_types'.
The normal way to use this is to return the result of 'assert_fails_with'
from your unit test.
Note that this function is experimental and unstable. Use at your own
peril; expect the API to change.
:param d: A Deferred that is expected to fail.
:param exc_types: The exception types that the Deferred is expected to
fail with.
:param failureException: An optional keyword argument. If provided, will
raise that exception instead of
``testtools.TestCase.failureException``.
:return: A Deferred that will fail with an ``AssertionError`` if 'd' does
not fail with one of the exception types.
"""
failureException = kwargs.pop('failureException', None)
if failureException is None:
# Avoid circular imports.
from testtools import TestCase
failureException = TestCase.failureException
expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types)
def got_success(result):
raise failureException(
"%s not raised (%r returned)" % (expected_names, result))
def got_failure(failure):
if failure.check(*exc_types):
return failure.value
raise failureException("%s raised instead of %s:\n %s" % (
failure.type.__name__, expected_names, failure.getTraceback()))
return d.addCallbacks(got_success, got_failure)
def flush_logged_errors(*error_types):
return _log_observer.flushErrors(*error_types)
class UncleanReactorError(Exception):
"""Raised when the reactor has junk in it."""
def __init__(self, junk):
Exception.__init__(self,
"The reactor still thinks it needs to do things. Close all "
"connections, kill all processes and make sure all delayed "
"calls have either fired or been cancelled:\n%s"
% ''.join(map(self._get_junk_info, junk)))
def _get_junk_info(self, junk):
from twisted.internet.base import DelayedCall
if isinstance(junk, DelayedCall):
ret = str(junk)
else:
ret = repr(junk)
return ' %s\n' % (ret,)

View File

@ -1,62 +0,0 @@
# Copyright (c) 2010-2011 testtools developers . See LICENSE for details.
"""Extensions to the standard Python unittest library."""
import sys
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from testtools.run import TestProgram, TestToolsTestRunner
class TestCommand(Command):
"""Command to run unit tests with testtools"""
description = "run unit tests with testtools"
user_options = [
('catch', 'c', "Catch ctrl-C and display results so far"),
('buffer', 'b', "Buffer stdout and stderr during tests"),
('failfast', 'f', "Stop on first fail or error"),
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')")
]
def __init__(self, dist):
Command.__init__(self, dist)
self.runner = TestToolsTestRunner(sys.stdout)
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.catch = None
self.buffer = None
self.failfast = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
raise DistutilsOptionError(
"You must specify a module or a suite to run tests from")
else:
self.test_suite = self.test_module+".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both")
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0, '--verbose')
if self.buffer:
self.test_args.insert(0, '--buffer')
if self.catch:
self.test_args.insert(0, '--catch')
if self.failfast:
self.test_args.insert(0, '--failfast')
def run(self):
self.program = TestProgram(
argv=self.test_args, testRunner=self.runner, stdout=sys.stdout,
exit=False)

View File

@ -1,119 +0,0 @@
# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
__all__ = [
'safe_hasattr',
'try_import',
'try_imports',
]
import sys
def try_import(name, alternative=None, error_callback=None):
"""Attempt to import ``name``. If it fails, return ``alternative``.
When supporting multiple versions of Python or optional dependencies, it
is useful to be able to try to import a module.
:param name: The name of the object to import, e.g. ``os.path`` or
``os.path.join``.
:param alternative: The value to return if no module can be imported.
Defaults to None.
:param error_callback: If non-None, a callable that is passed the ImportError
when the module cannot be loaded.
"""
module_segments = name.split('.')
last_error = None
while module_segments:
module_name = '.'.join(module_segments)
try:
module = __import__(module_name)
except ImportError:
last_error = sys.exc_info()[1]
module_segments.pop()
continue
else:
break
else:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
nonexistent = object()
for segment in name.split('.')[1:]:
module = getattr(module, segment, nonexistent)
if module is nonexistent:
if last_error is not None and error_callback is not None:
error_callback(last_error)
return alternative
return module
_RAISE_EXCEPTION = object()
def try_imports(module_names, alternative=_RAISE_EXCEPTION, error_callback=None):
"""Attempt to import modules.
Tries to import the first module in ``module_names``. If it can be
imported, we return it. If not, we go on to the second module and try
that. The process continues until we run out of modules to try. If none
of the modules can be imported, either raise an exception or return the
provided ``alternative`` value.
:param module_names: A sequence of module names to try to import.
:param alternative: The value to return if no module can be imported.
If unspecified, we raise an ImportError.
:param error_callback: If None, called with the ImportError for *each*
module that fails to load.
:raises ImportError: If none of the modules can be imported and no
alternative value was specified.
"""
module_names = list(module_names)
for module_name in module_names:
module = try_import(module_name, error_callback=error_callback)
if module:
return module
if alternative is _RAISE_EXCEPTION:
raise ImportError(
"Could not import any of: %s" % ', '.join(module_names))
return alternative
def safe_hasattr(obj, attr, _marker=object()):
"""Does 'obj' have an attribute 'attr'?
Use this rather than built-in hasattr, as the built-in swallows exceptions
in some versions of Python and behaves unpredictably with respect to
properties.
"""
return getattr(obj, attr, _marker) is not _marker
def map_values(function, dictionary):
"""Map ``function`` across the values of ``dictionary``.
:return: A dict with the same keys as ``dictionary``, where the value
of each key ``k`` is ``function(dictionary[k])``.
"""
return dict((k, function(dictionary[k])) for k in dictionary)
def filter_values(function, dictionary):
"""Filter ``dictionary`` by its values using ``function``."""
return dict((k, v) for k, v in dictionary.items() if function(v))
def dict_subtract(a, b):
"""Return the part of ``a`` that's not in ``b``."""
return dict((k, a[k]) for k in set(a) - set(b))
def list_subtract(a, b):
"""Return a list ``a`` without the elements of ``b``.
If a particular value is in ``a`` twice and ``b`` once then the returned
list then that value will appear once in the returned list.
"""
a_only = list(a)
for x in b:
if x in a_only:
a_only.remove(x)
return a_only

View File

@ -1,113 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
"""All the matchers.
Matchers, a way to express complex assertions outside the testcase.
Inspired by 'hamcrest'.
Matcher provides the abstract API that all matchers need to implement.
Bundled matchers are listed in __all__: a list can be obtained by running
$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
"""
__all__ = [
'AfterPreprocessing',
'AllMatch',
'Annotate',
'Contains',
'ContainsAll',
'ContainedByDict',
'ContainsDict',
'DirContains',
'DirExists',
'DocTestMatches',
'EndsWith',
'Equals',
'FileContains',
'FileExists',
'GreaterThan',
'HasPermissions',
'Is',
'IsInstance',
'KeysEqual',
'LessThan',
'MatchesAll',
'MatchesAny',
'MatchesDict',
'MatchesException',
'MatchesListwise',
'MatchesPredicate',
'MatchesRegex',
'MatchesSetwise',
'MatchesStructure',
'NotEquals',
'Not',
'PathExists',
'Raises',
'raises',
'SamePath',
'StartsWith',
'TarballContains',
]
from ._basic import (
Contains,
EndsWith,
Equals,
GreaterThan,
Is,
IsInstance,
LessThan,
MatchesRegex,
NotEquals,
StartsWith,
)
from ._datastructures import (
ContainsAll,
MatchesListwise,
MatchesSetwise,
MatchesStructure,
)
from ._dict import (
ContainedByDict,
ContainsDict,
KeysEqual,
MatchesDict,
)
from ._doctest import (
DocTestMatches,
)
from ._exception import (
MatchesException,
Raises,
raises,
)
from ._filesystem import (
DirContains,
DirExists,
FileContains,
FileExists,
HasPermissions,
PathExists,
SamePath,
TarballContains,
)
from ._higherorder import (
AfterPreprocessing,
AllMatch,
Annotate,
MatchesAll,
MatchesAny,
MatchesPredicate,
Not,
)
# XXX: These are not explicitly included in __all__. It's unclear how much of
# the public interface they really are.
from ._impl import (
Matcher,
Mismatch,
MismatchError,
)

View File

@ -1,315 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
__all__ = [
'Contains',
'EndsWith',
'Equals',
'GreaterThan',
'Is',
'IsInstance',
'LessThan',
'MatchesRegex',
'NotEquals',
'StartsWith',
]
import operator
from pprint import pformat
import re
from ..compat import (
_isbytes,
istext,
str_is_unicode,
text_repr,
)
from ..helpers import list_subtract
from ._higherorder import PostfixedMismatch
from ._impl import (
Matcher,
Mismatch,
)
def _format(thing):
"""
Blocks of text with newlines are formatted as triple-quote
strings. Everything else is pretty-printed.
"""
if istext(thing) or _isbytes(thing):
return text_repr(thing)
return pformat(thing)
class _BinaryComparison(object):
"""Matcher that compares an object to another object."""
def __init__(self, expected):
self.expected = expected
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.expected)
def match(self, other):
if self.comparator(other, self.expected):
return None
return _BinaryMismatch(self.expected, self.mismatch_string, other)
def comparator(self, expected, other):
raise NotImplementedError(self.comparator)
class _BinaryMismatch(Mismatch):
"""Two things did not match."""
def __init__(self, expected, mismatch_string, other):
self.expected = expected
self._mismatch_string = mismatch_string
self.other = other
def describe(self):
left = repr(self.expected)
right = repr(self.other)
if len(left) + len(right) > 70:
return "%s:\nreference = %s\nactual = %s\n" % (
self._mismatch_string, _format(self.expected),
_format(self.other))
else:
return "%s %s %s" % (left, self._mismatch_string, right)
class Equals(_BinaryComparison):
"""Matches if the items are equal."""
comparator = operator.eq
mismatch_string = '!='
class NotEquals(_BinaryComparison):
"""Matches if the items are not equal.
In most cases, this is equivalent to ``Not(Equals(foo))``. The difference
only matters when testing ``__ne__`` implementations.
"""
comparator = operator.ne
mismatch_string = '=='
class Is(_BinaryComparison):
"""Matches if the items are identical."""
comparator = operator.is_
mismatch_string = 'is not'
class LessThan(_BinaryComparison):
"""Matches if the item is less than the matchers reference object."""
comparator = operator.__lt__
mismatch_string = 'is not >'
class GreaterThan(_BinaryComparison):
"""Matches if the item is greater than the matchers reference object."""
comparator = operator.__gt__
mismatch_string = 'is not <'
class SameMembers(Matcher):
"""Matches if two iterators have the same members.
This is not the same as set equivalence. The two iterators must be of the
same length and have the same repetitions.
"""
def __init__(self, expected):
super(SameMembers, self).__init__()
self.expected = expected
def __str__(self):
return '%s(%r)' % (self.__class__.__name__, self.expected)
def match(self, observed):
expected_only = list_subtract(self.expected, observed)
observed_only = list_subtract(observed, self.expected)
if expected_only == observed_only == []:
return
return PostfixedMismatch(
"\nmissing: %s\nextra: %s" % (
_format(expected_only), _format(observed_only)),
_BinaryMismatch(self.expected, 'elements differ', observed))
class DoesNotStartWith(Mismatch):
def __init__(self, matchee, expected):
"""Create a DoesNotStartWith Mismatch.
:param matchee: the string that did not match.
:param expected: the string that 'matchee' was expected to start with.
"""
self.matchee = matchee
self.expected = expected
def describe(self):
return "%s does not start with %s." % (
text_repr(self.matchee), text_repr(self.expected))
class StartsWith(Matcher):
"""Checks whether one string starts with another."""
def __init__(self, expected):
"""Create a StartsWith Matcher.
:param expected: the string that matchees should start with.
"""
self.expected = expected
def __str__(self):
return "StartsWith(%r)" % (self.expected,)
def match(self, matchee):
if not matchee.startswith(self.expected):
return DoesNotStartWith(matchee, self.expected)
return None
class DoesNotEndWith(Mismatch):
def __init__(self, matchee, expected):
"""Create a DoesNotEndWith Mismatch.
:param matchee: the string that did not match.
:param expected: the string that 'matchee' was expected to end with.
"""
self.matchee = matchee
self.expected = expected
def describe(self):
return "%s does not end with %s." % (
text_repr(self.matchee), text_repr(self.expected))
class EndsWith(Matcher):
"""Checks whether one string ends with another."""
def __init__(self, expected):
"""Create a EndsWith Matcher.
:param expected: the string that matchees should end with.
"""
self.expected = expected
def __str__(self):
return "EndsWith(%r)" % (self.expected,)
def match(self, matchee):
if not matchee.endswith(self.expected):
return DoesNotEndWith(matchee, self.expected)
return None
class IsInstance(object):
"""Matcher that wraps isinstance."""
def __init__(self, *types):
self.types = tuple(types)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__,
', '.join(type.__name__ for type in self.types))
def match(self, other):
if isinstance(other, self.types):
return None
return NotAnInstance(other, self.types)
class NotAnInstance(Mismatch):
def __init__(self, matchee, types):
"""Create a NotAnInstance Mismatch.
:param matchee: the thing which is not an instance of any of types.
:param types: A tuple of the types which were expected.
"""
self.matchee = matchee
self.types = types
def describe(self):
if len(self.types) == 1:
typestr = self.types[0].__name__
else:
typestr = 'any of (%s)' % ', '.join(type.__name__ for type in
self.types)
return "'%s' is not an instance of %s" % (self.matchee, typestr)
class DoesNotContain(Mismatch):
def __init__(self, matchee, needle):
"""Create a DoesNotContain Mismatch.
:param matchee: the object that did not contain needle.
:param needle: the needle that 'matchee' was expected to contain.
"""
self.matchee = matchee
self.needle = needle
def describe(self):
return "%r not in %r" % (self.needle, self.matchee)
class Contains(Matcher):
"""Checks whether something is contained in another thing."""
def __init__(self, needle):
"""Create a Contains Matcher.
:param needle: the thing that needs to be contained by matchees.
"""
self.needle = needle
def __str__(self):
return "Contains(%r)" % (self.needle,)
def match(self, matchee):
try:
if self.needle not in matchee:
return DoesNotContain(matchee, self.needle)
except TypeError:
# e.g. 1 in 2 will raise TypeError
return DoesNotContain(matchee, self.needle)
return None
class MatchesRegex(object):
"""Matches if the matchee is matched by a regular expression."""
def __init__(self, pattern, flags=0):
self.pattern = pattern
self.flags = flags
def __str__(self):
args = ['%r' % self.pattern]
flag_arg = []
# dir() sorts the attributes for us, so we don't need to do it again.
for flag in dir(re):
if len(flag) == 1:
if self.flags & getattr(re, flag):
flag_arg.append('re.%s' % flag)
if flag_arg:
args.append('|'.join(flag_arg))
return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
def match(self, value):
if not re.match(self.pattern, value, self.flags):
pattern = self.pattern
if not isinstance(pattern, str_is_unicode and str or unicode):
pattern = pattern.decode("latin1")
pattern = pattern.encode("unicode_escape").decode("ascii")
return Mismatch("%r does not match /%s/" % (
value, pattern.replace("\\\\", "\\")))

View File

@ -1,228 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
__all__ = [
'ContainsAll',
'MatchesListwise',
'MatchesSetwise',
'MatchesStructure',
]
"""Matchers that operate with knowledge of Python data structures."""
from ..helpers import map_values
from ._higherorder import (
Annotate,
MatchesAll,
MismatchesAll,
)
from ._impl import Mismatch
def ContainsAll(items):
"""Make a matcher that checks whether a list of things is contained
in another thing.
The matcher effectively checks that the provided sequence is a subset of
the matchee.
"""
from ._basic import Contains
return MatchesAll(*map(Contains, items), first_only=False)
class MatchesListwise(object):
"""Matches if each matcher matches the corresponding value.
More easily explained by example than in words:
>>> from ._basic import Equals
>>> MatchesListwise([Equals(1)]).match([1])
>>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2])
>>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe())
Differences: [
1 != 2
2 != 1
]
>>> matcher = MatchesListwise([Equals(1), Equals(2)], first_only=True)
>>> print (matcher.match([3, 4]).describe())
1 != 3
"""
def __init__(self, matchers, first_only=False):
"""Construct a MatchesListwise matcher.
:param matchers: A list of matcher that the matched values must match.
:param first_only: If True, then only report the first mismatch,
otherwise report all of them. Defaults to False.
"""
self.matchers = matchers
self.first_only = first_only
def match(self, values):
from ._basic import Equals
mismatches = []
length_mismatch = Annotate(
"Length mismatch", Equals(len(self.matchers))).match(len(values))
if length_mismatch:
mismatches.append(length_mismatch)
for matcher, value in zip(self.matchers, values):
mismatch = matcher.match(value)
if mismatch:
if self.first_only:
return mismatch
mismatches.append(mismatch)
if mismatches:
return MismatchesAll(mismatches)
class MatchesStructure(object):
"""Matcher that matches an object structurally.
'Structurally' here means that attributes of the object being matched are
compared against given matchers.
`fromExample` allows the creation of a matcher from a prototype object and
then modified versions can be created with `update`.
`byEquality` creates a matcher in much the same way as the constructor,
except that the matcher for each of the attributes is assumed to be
`Equals`.
`byMatcher` creates a similar matcher to `byEquality`, but you get to pick
the matcher, rather than just using `Equals`.
"""
def __init__(self, **kwargs):
"""Construct a `MatchesStructure`.
:param kwargs: A mapping of attributes to matchers.
"""
self.kws = kwargs
@classmethod
def byEquality(cls, **kwargs):
"""Matches an object where the attributes equal the keyword values.
Similar to the constructor, except that the matcher is assumed to be
Equals.
"""
from ._basic import Equals
return cls.byMatcher(Equals, **kwargs)
@classmethod
def byMatcher(cls, matcher, **kwargs):
"""Matches an object where the attributes match the keyword values.
Similar to the constructor, except that the provided matcher is used
to match all of the values.
"""
return cls(**map_values(matcher, kwargs))
@classmethod
def fromExample(cls, example, *attributes):
from ._basic import Equals
kwargs = {}
for attr in attributes:
kwargs[attr] = Equals(getattr(example, attr))
return cls(**kwargs)
def update(self, **kws):
new_kws = self.kws.copy()
for attr, matcher in kws.items():
if matcher is None:
new_kws.pop(attr, None)
else:
new_kws[attr] = matcher
return type(self)(**new_kws)
def __str__(self):
kws = []
for attr, matcher in sorted(self.kws.items()):
kws.append("%s=%s" % (attr, matcher))
return "%s(%s)" % (self.__class__.__name__, ', '.join(kws))
def match(self, value):
matchers = []
values = []
for attr, matcher in sorted(self.kws.items()):
matchers.append(Annotate(attr, matcher))
values.append(getattr(value, attr))
return MatchesListwise(matchers).match(values)
class MatchesSetwise(object):
"""Matches if all the matchers match elements of the value being matched.
That is, each element in the 'observed' set must match exactly one matcher
from the set of matchers, with no matchers left over.
The difference compared to `MatchesListwise` is that the order of the
matchings does not matter.
"""
def __init__(self, *matchers):
self.matchers = matchers
def match(self, observed):
remaining_matchers = set(self.matchers)
not_matched = []
for value in observed:
for matcher in remaining_matchers:
if matcher.match(value) is None:
remaining_matchers.remove(matcher)
break
else:
not_matched.append(value)
if not_matched or remaining_matchers:
remaining_matchers = list(remaining_matchers)
# There are various cases that all should be reported somewhat
# differently.
# There are two trivial cases:
# 1) There are just some matchers left over.
# 2) There are just some values left over.
# Then there are three more interesting cases:
# 3) There are the same number of matchers and values left over.
# 4) There are more matchers left over than values.
# 5) There are more values left over than matchers.
if len(not_matched) == 0:
if len(remaining_matchers) > 1:
msg = "There were %s matchers left over: " % (
len(remaining_matchers),)
else:
msg = "There was 1 matcher left over: "
msg += ', '.join(map(str, remaining_matchers))
return Mismatch(msg)
elif len(remaining_matchers) == 0:
if len(not_matched) > 1:
return Mismatch(
"There were %s values left over: %s" % (
len(not_matched), not_matched))
else:
return Mismatch(
"There was 1 value left over: %s" % (
not_matched, ))
else:
common_length = min(len(remaining_matchers), len(not_matched))
if common_length == 0:
raise AssertionError("common_length can't be 0 here")
if common_length > 1:
msg = "There were %s mismatches" % (common_length,)
else:
msg = "There was 1 mismatch"
if len(remaining_matchers) > len(not_matched):
extra_matchers = remaining_matchers[common_length:]
msg += " and %s extra matcher" % (len(extra_matchers), )
if len(extra_matchers) > 1:
msg += "s"
msg += ': ' + ', '.join(map(str, extra_matchers))
elif len(not_matched) > len(remaining_matchers):
extra_values = not_matched[common_length:]
msg += " and %s extra value" % (len(extra_values), )
if len(extra_values) > 1:
msg += "s"
msg += ': ' + str(extra_values)
return Annotate(
msg, MatchesListwise(remaining_matchers[:common_length])
).match(not_matched[:common_length])

View File

@ -1,259 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
__all__ = [
'KeysEqual',
]
from ..helpers import (
dict_subtract,
filter_values,
map_values,
)
from ._higherorder import (
AnnotatedMismatch,
PrefixedMismatch,
MismatchesAll,
)
from ._impl import Matcher, Mismatch
def LabelledMismatches(mismatches, details=None):
"""A collection of mismatches, each labelled."""
return MismatchesAll(
(PrefixedMismatch(k, v) for (k, v) in sorted(mismatches.items())),
wrap=False)
class MatchesAllDict(Matcher):
"""Matches if all of the matchers it is created with match.
A lot like ``MatchesAll``, but takes a dict of Matchers and labels any
mismatches with the key of the dictionary.
"""
def __init__(self, matchers):
super(MatchesAllDict, self).__init__()
self.matchers = matchers
def __str__(self):
return 'MatchesAllDict(%s)' % (_format_matcher_dict(self.matchers),)
def match(self, observed):
mismatches = {}
for label in self.matchers:
mismatches[label] = self.matchers[label].match(observed)
return _dict_to_mismatch(
mismatches, result_mismatch=LabelledMismatches)
class DictMismatches(Mismatch):
"""A mismatch with a dict of child mismatches."""
def __init__(self, mismatches, details=None):
super(DictMismatches, self).__init__(None, details=details)
self.mismatches = mismatches
def describe(self):
lines = ['{']
lines.extend(
[' %r: %s,' % (key, mismatch.describe())
for (key, mismatch) in sorted(self.mismatches.items())])
lines.append('}')
return '\n'.join(lines)
def _dict_to_mismatch(data, to_mismatch=None,
result_mismatch=DictMismatches):
if to_mismatch:
data = map_values(to_mismatch, data)
mismatches = filter_values(bool, data)
if mismatches:
return result_mismatch(mismatches)
class _MatchCommonKeys(Matcher):
"""Match on keys in a dictionary.
Given a dictionary where the values are matchers, this will look for
common keys in the matched dictionary and match if and only if all common
keys match the given matchers.
Thus::
>>> structure = {'a': Equals('x'), 'b': Equals('y')}
>>> _MatchCommonKeys(structure).match({'a': 'x', 'c': 'z'})
None
"""
def __init__(self, dict_of_matchers):
super(_MatchCommonKeys, self).__init__()
self._matchers = dict_of_matchers
def _compare_dicts(self, expected, observed):
common_keys = set(expected.keys()) & set(observed.keys())
mismatches = {}
for key in common_keys:
mismatch = expected[key].match(observed[key])
if mismatch:
mismatches[key] = mismatch
return mismatches
def match(self, observed):
mismatches = self._compare_dicts(self._matchers, observed)
if mismatches:
return DictMismatches(mismatches)
class _SubDictOf(Matcher):
"""Matches if the matched dict only has keys that are in given dict."""
def __init__(self, super_dict, format_value=repr):
super(_SubDictOf, self).__init__()
self.super_dict = super_dict
self.format_value = format_value
def match(self, observed):
excess = dict_subtract(observed, self.super_dict)
return _dict_to_mismatch(
excess, lambda v: Mismatch(self.format_value(v)))
class _SuperDictOf(Matcher):
"""Matches if all of the keys in the given dict are in the matched dict.
"""
def __init__(self, sub_dict, format_value=repr):
super(_SuperDictOf, self).__init__()
self.sub_dict = sub_dict
self.format_value = format_value
def match(self, super_dict):
return _SubDictOf(super_dict, self.format_value).match(self.sub_dict)
def _format_matcher_dict(matchers):
return '{%s}' % (
', '.join(sorted('%r: %s' % (k, v) for k, v in matchers.items())))
class _CombinedMatcher(Matcher):
"""Many matchers labelled and combined into one uber-matcher.
Subclass this and then specify a dict of matcher factories that take a
single 'expected' value and return a matcher. The subclass will match
only if all of the matchers made from factories match.
Not **entirely** dissimilar from ``MatchesAll``.
"""
matcher_factories = {}
def __init__(self, expected):
super(_CombinedMatcher, self).__init__()
self._expected = expected
def format_expected(self, expected):
return repr(expected)
def __str__(self):
return '%s(%s)' % (
self.__class__.__name__, self.format_expected(self._expected))
def match(self, observed):
matchers = dict(
(k, v(self._expected)) for k, v in self.matcher_factories.items())
return MatchesAllDict(matchers).match(observed)
class MatchesDict(_CombinedMatcher):
"""Match a dictionary exactly, by its keys.
Specify a dictionary mapping keys (often strings) to matchers. This is
the 'expected' dict. Any dictionary that matches this must have exactly
the same keys, and the values must match the corresponding matchers in the
expected dict.
"""
matcher_factories = {
'Extra': _SubDictOf,
'Missing': lambda m: _SuperDictOf(m, format_value=str),
'Differences': _MatchCommonKeys,
}
format_expected = lambda self, expected: _format_matcher_dict(expected)
class ContainsDict(_CombinedMatcher):
"""Match a dictionary for that contains a specified sub-dictionary.
Specify a dictionary mapping keys (often strings) to matchers. This is
the 'expected' dict. Any dictionary that matches this must have **at
least** these keys, and the values must match the corresponding matchers
in the expected dict. Dictionaries that have more keys will also match.
In other words, any matching dictionary must contain the dictionary given
to the constructor.
Does not check for strict sub-dictionary. That is, equal dictionaries
match.
"""
matcher_factories = {
'Missing': lambda m: _SuperDictOf(m, format_value=str),
'Differences': _MatchCommonKeys,
}
format_expected = lambda self, expected: _format_matcher_dict(expected)
class ContainedByDict(_CombinedMatcher):
"""Match a dictionary for which this is a super-dictionary.
Specify a dictionary mapping keys (often strings) to matchers. This is
the 'expected' dict. Any dictionary that matches this must have **only**
these keys, and the values must match the corresponding matchers in the
expected dict. Dictionaries that have fewer keys can also match.
In other words, any matching dictionary must be contained by the
dictionary given to the constructor.
Does not check for strict super-dictionary. That is, equal dictionaries
match.
"""
matcher_factories = {
'Extra': _SubDictOf,
'Differences': _MatchCommonKeys,
}
format_expected = lambda self, expected: _format_matcher_dict(expected)
class KeysEqual(Matcher):
"""Checks whether a dict has particular keys."""
def __init__(self, *expected):
"""Create a `KeysEqual` Matcher.
:param expected: The keys the dict is expected to have. If a dict,
then we use the keys of that dict, if a collection, we assume it
is a collection of expected keys.
"""
super(KeysEqual, self).__init__()
try:
self.expected = expected.keys()
except AttributeError:
self.expected = list(expected)
def __str__(self):
return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
def match(self, matchee):
from ._basic import _BinaryMismatch, Equals
expected = sorted(self.expected)
matched = Equals(expected).match(sorted(matchee.keys()))
if matched:
return AnnotatedMismatch(
'Keys not equal',
_BinaryMismatch(expected, 'does not match', matchee))
return None

View File

@ -1,104 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
__all__ = [
'DocTestMatches',
]
import doctest
import re
from ..compat import str_is_unicode
from ._impl import Mismatch
class _NonManglingOutputChecker(doctest.OutputChecker):
"""Doctest checker that works with unicode rather than mangling strings
This is needed because current Python versions have tried to fix string
encoding related problems, but regressed the default behaviour with
unicode inputs in the process.
In Python 2.6 and 2.7 ``OutputChecker.output_difference`` is was changed
to return a bytestring encoded as per ``sys.stdout.encoding``, or utf-8 if
that can't be determined. Worse, that encoding process happens in the
innocent looking `_indent` global function. Because the
`DocTestMismatch.describe` result may well not be destined for printing to
stdout, this is no good for us. To get a unicode return as before, the
method is monkey patched if ``doctest._encoding`` exists.
Python 3 has a different problem. For some reason both inputs are encoded
to ascii with 'backslashreplace', making an escaped string matches its
unescaped form. Overriding the offending ``OutputChecker._toAscii`` method
is sufficient to revert this.
"""
def _toAscii(self, s):
"""Return ``s`` unchanged rather than mangling it to ascii"""
return s
# Only do this overriding hackery if doctest has a broken _input function
if getattr(doctest, "_encoding", None) is not None:
from types import FunctionType as __F
__f = doctest.OutputChecker.output_difference.im_func
__g = dict(__f.func_globals)
def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)):
"""Prepend non-empty lines in ``s`` with ``indent`` number of spaces"""
return _pattern.sub(indent*" ", s)
__g["_indent"] = _indent
output_difference = __F(__f.func_code, __g, "output_difference")
del __F, __f, __g, _indent
class DocTestMatches(object):
"""See if a string matches a doctest example."""
def __init__(self, example, flags=0):
"""Create a DocTestMatches to match example.
:param example: The example to match e.g. 'foo bar baz'
:param flags: doctest comparison flags to match on. e.g.
doctest.ELLIPSIS.
"""
if not example.endswith('\n'):
example += '\n'
self.want = example # required variable name by doctest.
self.flags = flags
self._checker = _NonManglingOutputChecker()
def __str__(self):
if self.flags:
flagstr = ", flags=%d" % self.flags
else:
flagstr = ""
return 'DocTestMatches(%r%s)' % (self.want, flagstr)
def _with_nl(self, actual):
result = self.want.__class__(actual)
if not result.endswith('\n'):
result += '\n'
return result
def match(self, actual):
with_nl = self._with_nl(actual)
if self._checker.check_output(self.want, with_nl, self.flags):
return None
return DocTestMismatch(self, with_nl)
def _describe_difference(self, with_nl):
return self._checker.output_difference(self, with_nl, self.flags)
class DocTestMismatch(Mismatch):
"""Mismatch object for DocTestMatches."""
def __init__(self, matcher, with_nl):
self.matcher = matcher
self.with_nl = with_nl
def describe(self):
s = self.matcher._describe_difference(self.with_nl)
if str_is_unicode or isinstance(s, unicode):
return s
# GZ 2011-08-24: This is actually pretty bogus, most C0 codes should
# be escaped, in addition to non-ascii bytes.
return s.decode("latin1").encode("ascii", "backslashreplace")

View File

@ -1,124 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
__all__ = [
'MatchesException',
'Raises',
'raises',
]
import sys
from testtools.compat import (
classtypes,
_error_repr,
isbaseexception,
istext,
)
from ._basic import MatchesRegex
from ._higherorder import AfterPreproccessing
from ._impl import (
Matcher,
Mismatch,
)
class MatchesException(Matcher):
"""Match an exc_info tuple against an exception instance or type."""
def __init__(self, exception, value_re=None):
"""Create a MatchesException that will match exc_info's for exception.
:param exception: Either an exception instance or type.
If an instance is given, the type and arguments of the exception
are checked. If a type is given only the type of the exception is
checked. If a tuple is given, then as with isinstance, any of the
types in the tuple matching is sufficient to match.
:param value_re: If 'exception' is a type, and the matchee exception
is of the right type, then match against this. If value_re is a
string, then assume value_re is a regular expression and match
the str() of the exception against it. Otherwise, assume value_re
is a matcher, and match the exception against it.
"""
Matcher.__init__(self)
self.expected = exception
if istext(value_re):
value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
self.value_re = value_re
self._is_instance = type(self.expected) not in classtypes() + (tuple,)
def match(self, other):
if type(other) != tuple:
return Mismatch('%r is not an exc_info tuple' % other)
expected_class = self.expected
if self._is_instance:
expected_class = expected_class.__class__
if not issubclass(other[0], expected_class):
return Mismatch('%r is not a %r' % (other[0], expected_class))
if self._is_instance:
if other[1].args != self.expected.args:
return Mismatch('%s has different arguments to %s.' % (
_error_repr(other[1]), _error_repr(self.expected)))
elif self.value_re is not None:
return self.value_re.match(other[1])
def __str__(self):
if self._is_instance:
return "MatchesException(%s)" % _error_repr(self.expected)
return "MatchesException(%s)" % repr(self.expected)
class Raises(Matcher):
"""Match if the matchee raises an exception when called.
Exceptions which are not subclasses of Exception propogate out of the
Raises.match call unless they are explicitly matched.
"""
def __init__(self, exception_matcher=None):
"""Create a Raises matcher.
:param exception_matcher: Optional validator for the exception raised
by matchee. If supplied the exc_info tuple for the exception raised
is passed into that matcher. If no exception_matcher is supplied
then the simple fact of raising an exception is considered enough
to match on.
"""
self.exception_matcher = exception_matcher
def match(self, matchee):
try:
result = matchee()
return Mismatch('%r returned %r' % (matchee, result))
# Catch all exceptions: Raises() should be able to match a
# KeyboardInterrupt or SystemExit.
except:
exc_info = sys.exc_info()
if self.exception_matcher:
mismatch = self.exception_matcher.match(exc_info)
if not mismatch:
del exc_info
return
else:
mismatch = None
# The exception did not match, or no explicit matching logic was
# performed. If the exception is a non-user exception (that is, not
# a subclass of Exception on Python 2.5+) then propogate it.
if isbaseexception(exc_info[1]):
del exc_info
raise
return mismatch
def __str__(self):
return 'Raises()'
def raises(exception):
"""Make a matcher that checks that a callable raises an exception.
This is a convenience function, exactly equivalent to::
return Raises(MatchesException(exception))
See `Raises` and `MatchesException` for more information.
"""
return Raises(MatchesException(exception))

View File

@ -1,192 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""Matchers for things related to the filesystem."""
__all__ = [
'FileContains',
'DirExists',
'FileExists',
'HasPermissions',
'PathExists',
'SamePath',
'TarballContains',
]
import os
import tarfile
from ._basic import Equals
from ._higherorder import (
MatchesAll,
MatchesPredicate,
)
from ._impl import (
Matcher,
)
def PathExists():
"""Matches if the given path exists.
Use like this::
assertThat('/some/path', PathExists())
"""
return MatchesPredicate(os.path.exists, "%s does not exist.")
def DirExists():
"""Matches if the path exists and is a directory."""
return MatchesAll(
PathExists(),
MatchesPredicate(os.path.isdir, "%s is not a directory."),
first_only=True)
def FileExists():
"""Matches if the given path exists and is a file."""
return MatchesAll(
PathExists(),
MatchesPredicate(os.path.isfile, "%s is not a file."),
first_only=True)
class DirContains(Matcher):
"""Matches if the given directory contains files with the given names.
That is, is the directory listing exactly equal to the given files?
"""
def __init__(self, filenames=None, matcher=None):
"""Construct a ``DirContains`` matcher.
Can be used in a basic mode where the whole directory listing is
matched against an expected directory listing (by passing
``filenames``). Can also be used in a more advanced way where the
whole directory listing is matched against an arbitrary matcher (by
passing ``matcher`` instead).
:param filenames: If specified, match the sorted directory listing
against this list of filenames, sorted.
:param matcher: If specified, match the sorted directory listing
against this matcher.
"""
if filenames == matcher == None:
raise AssertionError(
"Must provide one of `filenames` or `matcher`.")
if None not in (filenames, matcher):
raise AssertionError(
"Must provide either `filenames` or `matcher`, not both.")
if filenames is None:
self.matcher = matcher
else:
self.matcher = Equals(sorted(filenames))
def match(self, path):
mismatch = DirExists().match(path)
if mismatch is not None:
return mismatch
return self.matcher.match(sorted(os.listdir(path)))
class FileContains(Matcher):
"""Matches if the given file has the specified contents."""
def __init__(self, contents=None, matcher=None):
"""Construct a ``FileContains`` matcher.
Can be used in a basic mode where the file contents are compared for
equality against the expected file contents (by passing ``contents``).
Can also be used in a more advanced way where the file contents are
matched against an arbitrary matcher (by passing ``matcher`` instead).
:param contents: If specified, match the contents of the file with
these contents.
:param matcher: If specified, match the contents of the file against
this matcher.
"""
if contents == matcher == None:
raise AssertionError(
"Must provide one of `contents` or `matcher`.")
if None not in (contents, matcher):
raise AssertionError(
"Must provide either `contents` or `matcher`, not both.")
if matcher is None:
self.matcher = Equals(contents)
else:
self.matcher = matcher
def match(self, path):
mismatch = PathExists().match(path)
if mismatch is not None:
return mismatch
f = open(path)
try:
actual_contents = f.read()
return self.matcher.match(actual_contents)
finally:
f.close()
def __str__(self):
return "File at path exists and contains %s" % self.contents
class HasPermissions(Matcher):
"""Matches if a file has the given permissions.
Permissions are specified and matched as a four-digit octal string.
"""
def __init__(self, octal_permissions):
"""Construct a HasPermissions matcher.
:param octal_permissions: A four digit octal string, representing the
intended access permissions. e.g. '0775' for rwxrwxr-x.
"""
super(HasPermissions, self).__init__()
self.octal_permissions = octal_permissions
def match(self, filename):
permissions = oct(os.stat(filename).st_mode)[-4:]
return Equals(self.octal_permissions).match(permissions)
class SamePath(Matcher):
"""Matches if two paths are the same.
That is, the paths are equal, or they point to the same file but in
different ways. The paths do not have to exist.
"""
def __init__(self, path):
super(SamePath, self).__init__()
self.path = path
def match(self, other_path):
f = lambda x: os.path.abspath(os.path.realpath(x))
return Equals(f(self.path)).match(f(other_path))
class TarballContains(Matcher):
"""Matches if the given tarball contains the given paths.
Uses TarFile.getnames() to get the paths out of the tarball.
"""
def __init__(self, paths):
super(TarballContains, self).__init__()
self.paths = paths
self.path_matcher = Equals(sorted(self.paths))
def match(self, tarball_path):
# Open underlying file first to ensure it's always closed:
# <http://bugs.python.org/issue10233>
f = open(tarball_path, "rb")
try:
tarball = tarfile.open(tarball_path, fileobj=f)
try:
return self.path_matcher.match(sorted(tarball.getnames()))
finally:
tarball.close()
finally:
f.close()

View File

@ -1,289 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
__all__ = [
'AfterPreprocessing',
'AllMatch',
'Annotate',
'MatchesAny',
'MatchesAll',
'Not',
]
import types
from ._impl import (
Matcher,
Mismatch,
MismatchDecorator,
)
class MatchesAny(object):
"""Matches if any of the matchers it is created with match."""
def __init__(self, *matchers):
self.matchers = matchers
def match(self, matchee):
results = []
for matcher in self.matchers:
mismatch = matcher.match(matchee)
if mismatch is None:
return None
results.append(mismatch)
return MismatchesAll(results)
def __str__(self):
return "MatchesAny(%s)" % ', '.join([
str(matcher) for matcher in self.matchers])
class MatchesAll(object):
"""Matches if all of the matchers it is created with match."""
def __init__(self, *matchers, **options):
"""Construct a MatchesAll matcher.
Just list the component matchers as arguments in the ``*args``
style. If you want only the first mismatch to be reported, past in
first_only=True as a keyword argument. By default, all mismatches are
reported.
"""
self.matchers = matchers
self.first_only = options.get('first_only', False)
def __str__(self):
return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
def match(self, matchee):
results = []
for matcher in self.matchers:
mismatch = matcher.match(matchee)
if mismatch is not None:
if self.first_only:
return mismatch
results.append(mismatch)
if results:
return MismatchesAll(results)
else:
return None
class MismatchesAll(Mismatch):
"""A mismatch with many child mismatches."""
def __init__(self, mismatches, wrap=True):
self.mismatches = mismatches
self._wrap = wrap
def describe(self):
descriptions = []
if self._wrap:
descriptions = ["Differences: ["]
for mismatch in self.mismatches:
descriptions.append(mismatch.describe())
if self._wrap:
descriptions.append("]")
return '\n'.join(descriptions)
class Not(object):
"""Inverts a matcher."""
def __init__(self, matcher):
self.matcher = matcher
def __str__(self):
return 'Not(%s)' % (self.matcher,)
def match(self, other):
mismatch = self.matcher.match(other)
if mismatch is None:
return MatchedUnexpectedly(self.matcher, other)
else:
return None
class MatchedUnexpectedly(Mismatch):
"""A thing matched when it wasn't supposed to."""
def __init__(self, matcher, other):
self.matcher = matcher
self.other = other
def describe(self):
return "%r matches %s" % (self.other, self.matcher)
class Annotate(object):
"""Annotates a matcher with a descriptive string.
Mismatches are then described as '<mismatch>: <annotation>'.
"""
def __init__(self, annotation, matcher):
self.annotation = annotation
self.matcher = matcher
@classmethod
def if_message(cls, annotation, matcher):
"""Annotate ``matcher`` only if ``annotation`` is non-empty."""
if not annotation:
return matcher
return cls(annotation, matcher)
def __str__(self):
return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
def match(self, other):
mismatch = self.matcher.match(other)
if mismatch is not None:
return AnnotatedMismatch(self.annotation, mismatch)
class PostfixedMismatch(MismatchDecorator):
"""A mismatch annotated with a descriptive string."""
def __init__(self, annotation, mismatch):
super(PostfixedMismatch, self).__init__(mismatch)
self.annotation = annotation
self.mismatch = mismatch
def describe(self):
return '%s: %s' % (self.original.describe(), self.annotation)
AnnotatedMismatch = PostfixedMismatch
class PrefixedMismatch(MismatchDecorator):
def __init__(self, prefix, mismatch):
super(PrefixedMismatch, self).__init__(mismatch)
self.prefix = prefix
def describe(self):
return '%s: %s' % (self.prefix, self.original.describe())
class AfterPreprocessing(object):
"""Matches if the value matches after passing through a function.
This can be used to aid in creating trivial matchers as functions, for
example::
def PathHasFileContent(content):
def _read(path):
return open(path).read()
return AfterPreprocessing(_read, Equals(content))
"""
def __init__(self, preprocessor, matcher, annotate=True):
"""Create an AfterPreprocessing matcher.
:param preprocessor: A function called with the matchee before
matching.
:param matcher: What to match the preprocessed matchee against.
:param annotate: Whether or not to annotate the matcher with
something explaining how we transformed the matchee. Defaults
to True.
"""
self.preprocessor = preprocessor
self.matcher = matcher
self.annotate = annotate
def _str_preprocessor(self):
if isinstance(self.preprocessor, types.FunctionType):
return '<function %s>' % self.preprocessor.__name__
return str(self.preprocessor)
def __str__(self):
return "AfterPreprocessing(%s, %s)" % (
self._str_preprocessor(), self.matcher)
def match(self, value):
after = self.preprocessor(value)
if self.annotate:
matcher = Annotate(
"after %s on %r" % (self._str_preprocessor(), value),
self.matcher)
else:
matcher = self.matcher
return matcher.match(after)
# This is the old, deprecated. spelling of the name, kept for backwards
# compatibility.
AfterPreproccessing = AfterPreprocessing
class AllMatch(object):
"""Matches if all provided values match the given matcher."""
def __init__(self, matcher):
self.matcher = matcher
def __str__(self):
return 'AllMatch(%s)' % (self.matcher,)
def match(self, values):
mismatches = []
for value in values:
mismatch = self.matcher.match(value)
if mismatch:
mismatches.append(mismatch)
if mismatches:
return MismatchesAll(mismatches)
class AnyMatch(object):
"""Matches if any of the provided values match the given matcher."""
def __init__(self, matcher):
self.matcher = matcher
def __str__(self):
return 'AnyMatch(%s)' % (self.matcher,)
def match(self, values):
mismatches = []
for value in values:
mismatch = self.matcher.match(value)
if mismatch:
mismatches.append(mismatch)
else:
return None
return MismatchesAll(mismatches)
class MatchesPredicate(Matcher):
"""Match if a given function returns True.
It is reasonably common to want to make a very simple matcher based on a
function that you already have that returns True or False given a single
argument (i.e. a predicate function). This matcher makes it very easy to
do so. e.g.::
IsEven = MatchesPredicate(lambda x: x % 2 == 0, '%s is not even')
self.assertThat(4, IsEven)
"""
def __init__(self, predicate, message):
"""Create a ``MatchesPredicate`` matcher.
:param predicate: A function that takes a single argument and returns
a value that will be interpreted as a boolean.
:param message: A message to describe a mismatch. It will be formatted
with '%' and be given whatever was passed to ``match()``. Thus, it
needs to contain exactly one thing like '%s', '%d' or '%f'.
"""
self.predicate = predicate
self.message = message
def __str__(self):
return '%s(%r, %r)' % (
self.__class__.__name__, self.predicate, self.message)
def match(self, x):
if not self.predicate(x):
return Mismatch(self.message % x)

View File

@ -1,175 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""Matchers, a way to express complex assertions outside the testcase.
Inspired by 'hamcrest'.
Matcher provides the abstract API that all matchers need to implement.
Bundled matchers are listed in __all__: a list can be obtained by running
$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
"""
__all__ = [
'Matcher',
'Mismatch',
'MismatchDecorator',
'MismatchError',
]
from testtools.compat import (
_isbytes,
istext,
str_is_unicode,
text_repr
)
class Matcher(object):
"""A pattern matcher.
A Matcher must implement match and __str__ to be used by
testtools.TestCase.assertThat. Matcher.match(thing) returns None when
thing is completely matched, and a Mismatch object otherwise.
Matchers can be useful outside of test cases, as they are simply a
pattern matching language expressed as objects.
testtools.matchers is inspired by hamcrest, but is pythonic rather than
a Java transcription.
"""
def match(self, something):
"""Return None if this matcher matches something, a Mismatch otherwise.
"""
raise NotImplementedError(self.match)
def __str__(self):
"""Get a sensible human representation of the matcher.
This should include the parameters given to the matcher and any
state that would affect the matches operation.
"""
raise NotImplementedError(self.__str__)
class Mismatch(object):
"""An object describing a mismatch detected by a Matcher."""
def __init__(self, description=None, details=None):
"""Construct a `Mismatch`.
:param description: A description to use. If not provided,
`Mismatch.describe` must be implemented.
:param details: Extra details about the mismatch. Defaults
to the empty dict.
"""
if description:
self._description = description
if details is None:
details = {}
self._details = details
def describe(self):
"""Describe the mismatch.
This should be either a human-readable string or castable to a string.
In particular, is should either be plain ascii or unicode on Python 2,
and care should be taken to escape control characters.
"""
try:
return self._description
except AttributeError:
raise NotImplementedError(self.describe)
def get_details(self):
"""Get extra details about the mismatch.
This allows the mismatch to provide extra information beyond the basic
description, including large text or binary files, or debugging internals
without having to force it to fit in the output of 'describe'.
The testtools assertion assertThat will query get_details and attach
all its values to the test, permitting them to be reported in whatever
manner the test environment chooses.
:return: a dict mapping names to Content objects. name is a string to
name the detail, and the Content object is the detail to add
to the result. For more information see the API to which items from
this dict are passed testtools.TestCase.addDetail.
"""
return getattr(self, '_details', {})
def __repr__(self):
return "<testtools.matchers.Mismatch object at %x attributes=%r>" % (
id(self), self.__dict__)
class MismatchError(AssertionError):
"""Raised when a mismatch occurs."""
# This class exists to work around
# <https://bugs.launchpad.net/testtools/+bug/804127>. It provides a
# guaranteed way of getting a readable exception, no matter what crazy
# characters are in the matchee, matcher or mismatch.
def __init__(self, matchee, matcher, mismatch, verbose=False):
# Have to use old-style upcalling for Python 2.4 and 2.5
# compatibility.
AssertionError.__init__(self)
self.matchee = matchee
self.matcher = matcher
self.mismatch = mismatch
self.verbose = verbose
def __str__(self):
difference = self.mismatch.describe()
if self.verbose:
# GZ 2011-08-24: Smelly API? Better to take any object and special
# case text inside?
if istext(self.matchee) or _isbytes(self.matchee):
matchee = text_repr(self.matchee, multiline=False)
else:
matchee = repr(self.matchee)
return (
'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n'
% (matchee, self.matcher, difference))
else:
return difference
if not str_is_unicode:
__unicode__ = __str__
def __str__(self):
return self.__unicode__().encode("ascii", "backslashreplace")
class MismatchDecorator(object):
"""Decorate a ``Mismatch``.
Forwards all messages to the original mismatch object. Probably the best
way to use this is inherit from this class and then provide your own
custom decoration logic.
"""
def __init__(self, original):
"""Construct a `MismatchDecorator`.
:param original: A `Mismatch` object to decorate.
"""
self.original = original
def __repr__(self):
return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,)
def describe(self):
return self.original.describe()
def get_details(self):
return self.original.get_details()
# Signal that this is part of the testing framework, and that code from this
# should not normally appear in tracebacks.
__unittest = True

View File

@ -1,97 +0,0 @@
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Helpers for monkey-patching Python code."""
__all__ = [
'MonkeyPatcher',
'patch',
]
class MonkeyPatcher(object):
"""A set of monkey-patches that can be applied and removed all together.
Use this to cover up attributes with new objects. Particularly useful for
testing difficult code.
"""
# Marker used to indicate that the patched attribute did not exist on the
# object before we patched it.
_NO_SUCH_ATTRIBUTE = object()
def __init__(self, *patches):
"""Construct a `MonkeyPatcher`.
:param patches: The patches to apply, each should be (obj, name,
new_value). Providing patches here is equivalent to calling
`add_patch`.
"""
# List of patches to apply in (obj, name, value).
self._patches_to_apply = []
# List of the original values for things that have been patched.
# (obj, name, value) format.
self._originals = []
for patch in patches:
self.add_patch(*patch)
def add_patch(self, obj, name, value):
"""Add a patch to overwrite 'name' on 'obj' with 'value'.
The attribute C{name} on C{obj} will be assigned to C{value} when
C{patch} is called or during C{run_with_patches}.
You can restore the original values with a call to restore().
"""
self._patches_to_apply.append((obj, name, value))
def patch(self):
"""Apply all of the patches that have been specified with `add_patch`.
Reverse this operation using L{restore}.
"""
for obj, name, value in self._patches_to_apply:
original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
self._originals.append((obj, name, original_value))
setattr(obj, name, value)
def restore(self):
"""Restore all original values to any patched objects.
If the patched attribute did not exist on an object before it was
patched, `restore` will delete the attribute so as to return the
object to its original state.
"""
while self._originals:
obj, name, value = self._originals.pop()
if value is self._NO_SUCH_ATTRIBUTE:
delattr(obj, name)
else:
setattr(obj, name, value)
def run_with_patches(self, f, *args, **kw):
"""Run 'f' with the given args and kwargs with all patches applied.
Restores all objects to their original state when finished.
"""
self.patch()
try:
return f(*args, **kw)
finally:
self.restore()
def patch(obj, attribute, value):
"""Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
If 'attribute' is not set on 'obj' already, then the returned callable
will delete the attribute when called.
:param obj: An object to monkey-patch.
:param attribute: The name of the attribute to patch.
:param value: The value to set 'obj.attribute' to.
:return: A nullary callable that, when run, will restore 'obj' to its
original state.
"""
patcher = MonkeyPatcher((obj, attribute, value))
patcher.patch()
return patcher.restore

View File

@ -1,351 +0,0 @@
# Copyright (c) 2009 testtools developers. See LICENSE for details.
"""python -m testtools.run testspec [testspec...]
Run some tests with the testtools extended API.
For instance, to run the testtools test suite.
$ python -m testtools.run testtools.tests.test_suite
"""
import os
import unittest
import sys
from testtools import TextTestResult
from testtools.compat import classtypes, istext, unicode_output_stream
from testtools.testsuite import iterate_tests, sorted_tests
defaultTestLoader = unittest.defaultTestLoader
defaultTestLoaderCls = unittest.TestLoader
if getattr(defaultTestLoader, 'discover', None) is None:
try:
import discover
defaultTestLoader = discover.DiscoveringTestLoader()
defaultTestLoaderCls = discover.DiscoveringTestLoader
have_discover = True
except ImportError:
have_discover = False
else:
have_discover = True
class TestToolsTestRunner(object):
""" A thunk object to support unittest.TestProgram."""
def __init__(self, verbosity=None, failfast=None, buffer=None):
"""Create a TestToolsTestRunner.
:param verbosity: Ignored.
:param failfast: Stop running tests at the first failure.
:param buffer: Ignored.
"""
self.failfast = failfast
def run(self, test):
"Run the given test case or test suite."
result = TextTestResult(
unicode_output_stream(sys.stdout), failfast=self.failfast)
result.startTestRun()
try:
return test.run(result)
finally:
result.stopTestRun()
####################
# Taken from python 2.7 and slightly modified for compatibility with
# older versions. Delete when 2.7 is the oldest supported version.
# Modifications:
# - Use have_discover to raise an error if the user tries to use
# discovery on an old version and doesn't have discover installed.
# - If --catch is given check that installHandler is available, as
# it won't be on old python versions.
# - print calls have been been made single-source python3 compatibile.
# - exception handling likewise.
# - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE
# removed.
# - A tweak has been added to detect 'python -m *.run' and use a
# better progName in that case.
# - self.module is more comprehensively set to None when being invoked from
# the commandline - __name__ is used as a sentinel value.
# - --list has been added which can list tests (should be upstreamed).
# - --load-list has been added which can reduce the tests used (should be
# upstreamed).
# - The limitation of using getopt is declared to the user.
# - http://bugs.python.org/issue16709 is worked around, by sorting tests when
# discover is used.
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
-l, --list List tests rather than executing them.
--load-list Specifies a file containing test ids, only tests matching
those ids are executed.
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
All options must come before [tests]. [tests] can be a list of any number of
test modules, classes and test methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
-l, --list List tests rather than executing them.
--load-list Specifies a file containing test ids, only tests matching
those ids are executed.
For test discovery all test modules must be importable from the top
level directory of the project.
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_AS_MAIN
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module=__name__, defaultTest=None, argv=None,
testRunner=None, testLoader=defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, stdout=None):
if module == __name__:
self.module = None
elif istext(module):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
if stdout is None:
stdout = sys.stdout
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
self.defaultTest = defaultTest
self.listtests = False
self.load_list = None
self.testRunner = testRunner
self.testLoader = testLoader
progName = argv[0]
if progName.endswith('%srun.py' % os.path.sep):
elements = progName.split(os.path.sep)
progName = '%s.run' % elements[-2]
else:
progName = os.path.basename(argv[0])
self.progName = progName
self.parseArgs(argv)
if self.load_list:
# TODO: preserve existing suites (like testresources does in
# OptimisingTestSuite.add, but with a standard protocol).
# This is needed because the load_tests hook allows arbitrary
# suites, even if that is rarely used.
source = open(self.load_list, 'rb')
try:
lines = source.readlines()
finally:
source.close()
test_ids = set(line.strip().decode('utf-8') for line in lines)
filtered = unittest.TestSuite()
for test in iterate_tests(self.test):
if test.id() in test_ids:
filtered.addTest(test)
self.test = filtered
if not self.listtests:
self.runTests()
else:
for test in iterate_tests(self.test):
stdout.write('%s\n' % test.id())
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer',
'list', 'load-list=']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if opt in ('-l', '--list'):
self.listtests = True
if opt == '--load-list':
self.load_list = value
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error:
self.usageExit(sys.exc_info()[1])
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=defaultTestLoaderCls):
# handle command line args for test discovery
if not have_discover:
raise AssertionError("Unable to use discovery, must use python 2.7 "
"or greater, or install the discover package.")
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
parser.add_option('-l', '--list', dest='listtests', default=False, action="store_true",
help='List tests rather than running them.')
parser.add_option('--load-list', dest='load_list', default=None,
help='Specify a filename containing the test ids to use.')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
self.listtests = options.listtests
self.load_list = options.load_list
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
# See http://bugs.python.org/issue16709
# While sorting here is intrusive, its better than being random.
# Rules for the sort:
# - standard suites are flattened, and the resulting tests sorted by
# id.
# - non-standard suites are preserved as-is, and sorted into position
# by the first test found by iterating the suite.
# We do this by a DSU process: flatten and grab a key, sort, strip the
# keys.
loaded = loader.discover(start_dir, pattern, top_level_dir)
self.test = sorted_tests(loaded)
def runTests(self):
if (self.catchbreak
and getattr(unittest, 'installHandler', None) is not None):
unittest.installHandler()
if self.testRunner is None:
self.testRunner = TestToolsTestRunner
if isinstance(self.testRunner, classtypes()):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
################
def main(argv, stdout):
program = TestProgram(argv=argv, testRunner=TestToolsTestRunner,
stdout=stdout)
if __name__ == '__main__':
main(sys.argv, sys.stdout)

View File

@ -1,205 +0,0 @@
# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
"""Individual test case execution."""
__all__ = [
'MultipleExceptions',
'RunTest',
]
import sys
from testtools.testresult import ExtendedToOriginalDecorator
class MultipleExceptions(Exception):
"""Represents many exceptions raised from some operation.
:ivar args: The sys.exc_info() tuples for each exception.
"""
class RunTest(object):
"""An object to run a test.
RunTest objects are used to implement the internal logic involved in
running a test. TestCase.__init__ stores _RunTest as the class of RunTest
to execute. Passing the runTest= parameter to TestCase.__init__ allows a
different RunTest class to be used to execute the test.
Subclassing or replacing RunTest can be useful to add functionality to the
way that tests are run in a given project.
:ivar case: The test case that is to be run.
:ivar result: The result object a case is reporting to.
:ivar handlers: A list of (ExceptionClass, handler_function) for
exceptions that should be caught if raised from the user
code. Exceptions that are caught are checked against this list in
first to last order. There is a catch-all of 'Exception' at the end
of the list, so to add a new exception to the list, insert it at the
front (which ensures that it will be checked before any existing base
classes in the list. If you add multiple exceptions some of which are
subclasses of each other, add the most specific exceptions last (so
they come before their parent classes in the list).
:ivar exception_caught: An object returned when _run_user catches an
exception.
:ivar _exceptions: A list of caught exceptions, used to do the single
reporting of error/failure/skip etc.
"""
def __init__(self, case, handlers=None):
"""Create a RunTest to run a case.
:param case: A testtools.TestCase test case object.
:param handlers: Exception handlers for this RunTest. These are stored
in self.handlers and can be modified later if needed.
"""
self.case = case
self.handlers = handlers or []
self.exception_caught = object()
self._exceptions = []
def run(self, result=None):
"""Run self.case reporting activity to result.
:param result: Optional testtools.TestResult to report activity to.
:return: The result object the test was run against.
"""
if result is None:
actual_result = self.case.defaultTestResult()
actual_result.startTestRun()
else:
actual_result = result
try:
return self._run_one(actual_result)
finally:
if result is None:
actual_result.stopTestRun()
def _run_one(self, result):
"""Run one test reporting to result.
:param result: A testtools.TestResult to report activity to.
This result object is decorated with an ExtendedToOriginalDecorator
to ensure that the latest TestResult API can be used with
confidence by client code.
:return: The result object the test was run against.
"""
return self._run_prepared_result(ExtendedToOriginalDecorator(result))
def _run_prepared_result(self, result):
"""Run one test reporting to result.
:param result: A testtools.TestResult to report activity to.
:return: The result object the test was run against.
"""
result.startTest(self.case)
self.result = result
try:
self._exceptions = []
self._run_core()
if self._exceptions:
# One or more caught exceptions, now trigger the test's
# reporting method for just one.
e = self._exceptions.pop()
for exc_class, handler in self.handlers:
if isinstance(e, exc_class):
handler(self.case, self.result, e)
break
finally:
result.stopTest(self.case)
return result
def _run_core(self):
"""Run the user supplied test code."""
if self.exception_caught == self._run_user(self.case._run_setup,
self.result):
# Don't run the test method if we failed getting here.
self._run_cleanups(self.result)
return
# Run everything from here on in. If any of the methods raise an
# exception we'll have failed.
failed = False
try:
if self.exception_caught == self._run_user(
self.case._run_test_method, self.result):
failed = True
finally:
try:
if self.exception_caught == self._run_user(
self.case._run_teardown, self.result):
failed = True
finally:
try:
if self.exception_caught == self._run_user(
self._run_cleanups, self.result):
failed = True
finally:
if not failed:
self.result.addSuccess(self.case,
details=self.case.getDetails())
def _run_cleanups(self, result):
"""Run the cleanups that have been added with addCleanup.
See the docstring for addCleanup for more information.
:return: None if all cleanups ran without error,
``exception_caught`` if there was an error.
"""
failing = False
while self.case._cleanups:
function, arguments, keywordArguments = self.case._cleanups.pop()
got_exception = self._run_user(
function, *arguments, **keywordArguments)
if got_exception == self.exception_caught:
failing = True
if failing:
return self.exception_caught
def _run_user(self, fn, *args, **kwargs):
"""Run a user supplied function.
Exceptions are processed by `_got_user_exception`.
:return: Either whatever 'fn' returns or ``exception_caught`` if
'fn' raised an exception.
"""
try:
return fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
return self._got_user_exception(sys.exc_info())
def _got_user_exception(self, exc_info, tb_label='traceback'):
"""Called when user code raises an exception.
If 'exc_info' is a `MultipleExceptions`, then we recurse into it
unpacking the errors that it's made up from.
:param exc_info: A sys.exc_info() tuple for the user error.
:param tb_label: An optional string label for the error. If
not specified, will default to 'traceback'.
:return: 'exception_caught' if we catch one of the exceptions that
have handlers in 'handlers', otherwise raise the error.
"""
if exc_info[0] is MultipleExceptions:
for sub_exc_info in exc_info[1].args:
self._got_user_exception(sub_exc_info, tb_label)
return self.exception_caught
try:
e = exc_info[1]
self.case.onException(exc_info, tb_label=tb_label)
finally:
del exc_info
for exc_class, handler in self.handlers:
if isinstance(e, exc_class):
self._exceptions.append(e)
return self.exception_caught
raise e
# Signal that this is part of the testing framework, and that code from this
# should not normally appear in tracebacks.
__unittest = True

View File

@ -1,34 +0,0 @@
# Copyright (c) 2012 testtools developers. See LICENSE for details.
"""Tag support."""
class TagContext(object):
"""A tag context."""
def __init__(self, parent=None):
"""Create a new TagContext.
:param parent: If provided, uses this as the parent context. Any tags
that are current on the parent at the time of construction are
current in this context.
"""
self.parent = parent
self._tags = set()
if parent:
self._tags.update(parent.get_current_tags())
def get_current_tags(self):
"""Return any current tags."""
return set(self._tags)
def change_tags(self, new_tags, gone_tags):
"""Change the tags on this context.
:param new_tags: A set of tags to add to this context.
:param gone_tags: A set of tags to remove from this context.
:return: The tags now current on this context.
"""
self._tags.update(new_tags)
self._tags.difference_update(gone_tags)
return self.get_current_tags()

View File

@ -1,798 +0,0 @@
# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
"""Test case related stuff."""
__metaclass__ = type
__all__ = [
'clone_test_with_new_id',
'ExpectedException',
'gather_details',
'run_test_with',
'skip',
'skipIf',
'skipUnless',
'TestCase',
]
import copy
import itertools
import sys
import types
import unittest
from testtools import (
content,
try_import,
)
from testtools.compat import (
advance_iterator,
reraise,
)
from testtools.matchers import (
Annotate,
Contains,
Equals,
MatchesAll,
MatchesException,
MismatchError,
Is,
IsInstance,
Not,
Raises,
)
from testtools.monkey import patch
from testtools.runtest import RunTest
from testtools.testresult import (
ExtendedToOriginalDecorator,
TestResult,
)
wraps = try_import('functools.wraps')
class TestSkipped(Exception):
"""Raised within TestCase.run() when a test is skipped."""
testSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
class _UnexpectedSuccess(Exception):
"""An unexpected success was raised.
Note that this exception is private plumbing in testtools' testcase
module.
"""
_UnexpectedSuccess = try_import(
'unittest2.case._UnexpectedSuccess', _UnexpectedSuccess)
_UnexpectedSuccess = try_import(
'unittest.case._UnexpectedSuccess', _UnexpectedSuccess)
class _ExpectedFailure(Exception):
"""An expected failure occured.
Note that this exception is private plumbing in testtools' testcase
module.
"""
_ExpectedFailure = try_import(
'unittest2.case._ExpectedFailure', _ExpectedFailure)
_ExpectedFailure = try_import(
'unittest.case._ExpectedFailure', _ExpectedFailure)
def run_test_with(test_runner, **kwargs):
"""Decorate a test as using a specific ``RunTest``.
e.g.::
@run_test_with(CustomRunner, timeout=42)
def test_foo(self):
self.assertTrue(True)
The returned decorator works by setting an attribute on the decorated
function. `TestCase.__init__` looks for this attribute when deciding on a
``RunTest`` factory. If you wish to use multiple decorators on a test
method, then you must either make this one the top-most decorator, or you
must write your decorators so that they update the wrapping function with
the attributes of the wrapped function. The latter is recommended style
anyway. ``functools.wraps``, ``functools.wrapper`` and
``twisted.python.util.mergeFunctionMetadata`` can help you do this.
:param test_runner: A ``RunTest`` factory that takes a test case and an
optional list of exception handlers. See ``RunTest``.
:param kwargs: Keyword arguments to pass on as extra arguments to
'test_runner'.
:return: A decorator to be used for marking a test as needing a special
runner.
"""
def decorator(function):
# Set an attribute on 'function' which will inform TestCase how to
# make the runner.
function._run_test_with = (
lambda case, handlers=None:
test_runner(case, handlers=handlers, **kwargs))
return function
return decorator
def _copy_content(content_object):
"""Make a copy of the given content object.
The content within ``content_object`` is iterated and saved. This is
useful when the source of the content is volatile, a log file in a
temporary directory for example.
:param content_object: A `content.Content` instance.
:return: A `content.Content` instance with the same mime-type as
``content_object`` and a non-volatile copy of its content.
"""
content_bytes = list(content_object.iter_bytes())
content_callback = lambda: content_bytes
return content.Content(content_object.content_type, content_callback)
def gather_details(source_dict, target_dict):
"""Merge the details from ``source_dict`` into ``target_dict``.
:param source_dict: A dictionary of details will be gathered.
:param target_dict: A dictionary into which details will be gathered.
"""
for name, content_object in source_dict.items():
new_name = name
disambiguator = itertools.count(1)
while new_name in target_dict:
new_name = '%s-%d' % (name, advance_iterator(disambiguator))
name = new_name
target_dict[name] = _copy_content(content_object)
class TestCase(unittest.TestCase):
"""Extensions to the basic TestCase.
:ivar exception_handlers: Exceptions to catch from setUp, runTest and
tearDown. This list is able to be modified at any time and consists of
(exception_class, handler(case, result, exception_value)) pairs.
:cvar run_tests_with: A factory to make the ``RunTest`` to run tests with.
Defaults to ``RunTest``. The factory is expected to take a test case
and an optional list of exception handlers.
"""
skipException = TestSkipped
run_tests_with = RunTest
def __init__(self, *args, **kwargs):
"""Construct a TestCase.
:param testMethod: The name of the method to run.
:keyword runTest: Optional class to use to execute the test. If not
supplied ``RunTest`` is used. The instance to be used is created
when run() is invoked, so will be fresh each time. Overrides
``TestCase.run_tests_with`` if given.
"""
runTest = kwargs.pop('runTest', None)
super(TestCase, self).__init__(*args, **kwargs)
self._cleanups = []
self._unique_id_gen = itertools.count(1)
# Generators to ensure unique traceback ids. Maps traceback label to
# iterators.
self._traceback_id_gens = {}
self.__setup_called = False
self.__teardown_called = False
# __details is lazy-initialized so that a constructed-but-not-run
# TestCase is safe to use with clone_test_with_new_id.
self.__details = None
test_method = self._get_test_method()
if runTest is None:
runTest = getattr(
test_method, '_run_test_with', self.run_tests_with)
self.__RunTest = runTest
self.__exception_handlers = []
self.exception_handlers = [
(self.skipException, self._report_skip),
(self.failureException, self._report_failure),
(_ExpectedFailure, self._report_expected_failure),
(_UnexpectedSuccess, self._report_unexpected_success),
(Exception, self._report_error),
]
if sys.version_info < (2, 6):
# Catch old-style string exceptions with None as the instance
self.exception_handlers.append((type(None), self._report_error))
def __eq__(self, other):
eq = getattr(unittest.TestCase, '__eq__', None)
if eq is not None and not unittest.TestCase.__eq__(self, other):
return False
return self.__dict__ == other.__dict__
def __repr__(self):
# We add id to the repr because it makes testing testtools easier.
return "<%s id=0x%0x>" % (self.id(), id(self))
def addDetail(self, name, content_object):
"""Add a detail to be reported with this test's outcome.
For more details see pydoc testtools.TestResult.
:param name: The name to give this detail.
:param content_object: The content object for this detail. See
testtools.content for more detail.
"""
if self.__details is None:
self.__details = {}
self.__details[name] = content_object
def getDetails(self):
"""Get the details dict that will be reported with this test's outcome.
For more details see pydoc testtools.TestResult.
"""
if self.__details is None:
self.__details = {}
return self.__details
def patch(self, obj, attribute, value):
"""Monkey-patch 'obj.attribute' to 'value' while the test is running.
If 'obj' has no attribute, then the monkey-patch will still go ahead,
and the attribute will be deleted instead of restored to its original
value.
:param obj: The object to patch. Can be anything.
:param attribute: The attribute on 'obj' to patch.
:param value: The value to set 'obj.attribute' to.
"""
self.addCleanup(patch(obj, attribute, value))
def shortDescription(self):
return self.id()
def skipTest(self, reason):
"""Cause this test to be skipped.
This raises self.skipException(reason). skipException is raised
to permit a skip to be triggered at any point (during setUp or the
testMethod itself). The run() method catches skipException and
translates that into a call to the result objects addSkip method.
:param reason: The reason why the test is being skipped. This must
support being cast into a unicode string for reporting.
"""
raise self.skipException(reason)
# skipTest is how python2.7 spells this. Sometime in the future
# This should be given a deprecation decorator - RBC 20100611.
skip = skipTest
def _formatTypes(self, classOrIterable):
"""Format a class or a bunch of classes for display in an error."""
className = getattr(classOrIterable, '__name__', None)
if className is None:
className = ', '.join(klass.__name__ for klass in classOrIterable)
return className
def addCleanup(self, function, *arguments, **keywordArguments):
"""Add a cleanup function to be called after tearDown.
Functions added with addCleanup will be called in reverse order of
adding after tearDown, or after setUp if setUp raises an exception.
If a function added with addCleanup raises an exception, the error
will be recorded as a test error, and the next cleanup will then be
run.
Cleanup functions are always called before a test finishes running,
even if setUp is aborted by an exception.
"""
self._cleanups.append((function, arguments, keywordArguments))
def addOnException(self, handler):
"""Add a handler to be called when an exception occurs in test code.
This handler cannot affect what result methods are called, and is
called before any outcome is called on the result object. An example
use for it is to add some diagnostic state to the test details dict
which is expensive to calculate and not interesting for reporting in
the success case.
Handlers are called before the outcome (such as addFailure) that
the exception has caused.
Handlers are called in first-added, first-called order, and if they
raise an exception, that will propogate out of the test running
machinery, halting test processing. As a result, do not call code that
may unreasonably fail.
"""
self.__exception_handlers.append(handler)
def _add_reason(self, reason):
self.addDetail('reason', content.text_content(reason))
def assertEqual(self, expected, observed, message=''):
"""Assert that 'expected' is equal to 'observed'.
:param expected: The expected value.
:param observed: The observed value.
:param message: An optional message to include in the error.
"""
matcher = Equals(expected)
self.assertThat(observed, matcher, message)
failUnlessEqual = assertEquals = assertEqual
def assertIn(self, needle, haystack):
"""Assert that needle is in haystack."""
self.assertThat(haystack, Contains(needle))
def assertIsNone(self, observed, message=''):
"""Assert that 'observed' is equal to None.
:param observed: The observed value.
:param message: An optional message describing the error.
"""
matcher = Is(None)
self.assertThat(observed, matcher, message)
def assertIsNotNone(self, observed, message=''):
"""Assert that 'observed' is not equal to None.
:param observed: The observed value.
:param message: An optional message describing the error.
"""
matcher = Not(Is(None))
self.assertThat(observed, matcher, message)
def assertIs(self, expected, observed, message=''):
"""Assert that 'expected' is 'observed'.
:param expected: The expected value.
:param observed: The observed value.
:param message: An optional message describing the error.
"""
matcher = Is(expected)
self.assertThat(observed, matcher, message)
def assertIsNot(self, expected, observed, message=''):
"""Assert that 'expected' is not 'observed'."""
matcher = Not(Is(expected))
self.assertThat(observed, matcher, message)
def assertNotIn(self, needle, haystack):
"""Assert that needle is not in haystack."""
matcher = Not(Contains(needle))
self.assertThat(haystack, matcher)
def assertIsInstance(self, obj, klass, msg=None):
if isinstance(klass, tuple):
matcher = IsInstance(*klass)
else:
matcher = IsInstance(klass)
self.assertThat(obj, matcher, msg)
def assertRaises(self, excClass, callableObj, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
class ReRaiseOtherTypes(object):
def match(self, matchee):
if not issubclass(matchee[0], excClass):
reraise(*matchee)
class CaptureMatchee(object):
def match(self, matchee):
self.matchee = matchee[1]
capture = CaptureMatchee()
matcher = Raises(MatchesAll(ReRaiseOtherTypes(),
MatchesException(excClass), capture))
our_callable = Nullary(callableObj, *args, **kwargs)
self.assertThat(our_callable, matcher)
return capture.matchee
failUnlessRaises = assertRaises
def assertThat(self, matchee, matcher, message='', verbose=False):
"""Assert that matchee is matched by matcher.
:param matchee: An object to match with matcher.
:param matcher: An object meeting the testtools.Matcher protocol.
:raises MismatchError: When matcher does not match thing.
"""
matcher = Annotate.if_message(message, matcher)
mismatch = matcher.match(matchee)
if not mismatch:
return
existing_details = self.getDetails()
for (name, content) in mismatch.get_details().items():
full_name = name
suffix = 1
while full_name in existing_details:
full_name = "%s-%d" % (name, suffix)
suffix += 1
self.addDetail(full_name, content)
raise MismatchError(matchee, matcher, mismatch, verbose)
def defaultTestResult(self):
return TestResult()
def expectFailure(self, reason, predicate, *args, **kwargs):
"""Check that a test fails in a particular way.
If the test fails in the expected way, a KnownFailure is caused. If it
succeeds an UnexpectedSuccess is caused.
The expected use of expectFailure is as a barrier at the point in a
test where the test would fail. For example:
>>> def test_foo(self):
>>> self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0)
>>> self.assertEqual(1, 0)
If in the future 1 were to equal 0, the expectFailure call can simply
be removed. This separation preserves the original intent of the test
while it is in the expectFailure mode.
"""
# TODO: implement with matchers.
self._add_reason(reason)
try:
predicate(*args, **kwargs)
except self.failureException:
# GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new
# unittest _ExpectedFailure wants old traceback
exc_info = sys.exc_info()
try:
self._report_traceback(exc_info)
raise _ExpectedFailure(exc_info)
finally:
del exc_info
else:
raise _UnexpectedSuccess(reason)
def getUniqueInteger(self):
"""Get an integer unique to this test.
Returns an integer that is guaranteed to be unique to this instance.
Use this when you need an arbitrary integer in your test, or as a
helper for custom anonymous factory methods.
"""
return advance_iterator(self._unique_id_gen)
def getUniqueString(self, prefix=None):
"""Get a string unique to this test.
Returns a string that is guaranteed to be unique to this instance. Use
this when you need an arbitrary string in your test, or as a helper
for custom anonymous factory methods.
:param prefix: The prefix of the string. If not provided, defaults
to the id of the tests.
:return: A bytestring of '<prefix>-<unique_int>'.
"""
if prefix is None:
prefix = self.id()
return '%s-%d' % (prefix, self.getUniqueInteger())
def onException(self, exc_info, tb_label='traceback'):
"""Called when an exception propogates from test code.
:seealso addOnException:
"""
if exc_info[0] not in [
TestSkipped, _UnexpectedSuccess, _ExpectedFailure]:
self._report_traceback(exc_info, tb_label=tb_label)
for handler in self.__exception_handlers:
handler(exc_info)
@staticmethod
def _report_error(self, result, err):
result.addError(self, details=self.getDetails())
@staticmethod
def _report_expected_failure(self, result, err):
result.addExpectedFailure(self, details=self.getDetails())
@staticmethod
def _report_failure(self, result, err):
result.addFailure(self, details=self.getDetails())
@staticmethod
def _report_skip(self, result, err):
if err.args:
reason = err.args[0]
else:
reason = "no reason given."
self._add_reason(reason)
result.addSkip(self, details=self.getDetails())
def _report_traceback(self, exc_info, tb_label='traceback'):
id_gen = self._traceback_id_gens.setdefault(
tb_label, itertools.count(0))
tb_id = advance_iterator(id_gen)
if tb_id:
tb_label = '%s-%d' % (tb_label, tb_id)
self.addDetail(tb_label, content.TracebackContent(exc_info, self))
@staticmethod
def _report_unexpected_success(self, result, err):
result.addUnexpectedSuccess(self, details=self.getDetails())
def run(self, result=None):
return self.__RunTest(self, self.exception_handlers).run(result)
def _run_setup(self, result):
"""Run the setUp function for this test.
:param result: A testtools.TestResult to report activity to.
:raises ValueError: If the base class setUp is not called, a
ValueError is raised.
"""
ret = self.setUp()
if not self.__setup_called:
raise ValueError(
"TestCase.setUp was not called. Have you upcalled all the "
"way up the hierarchy from your setUp? e.g. Call "
"super(%s, self).setUp() from your setUp()."
% self.__class__.__name__)
return ret
def _run_teardown(self, result):
"""Run the tearDown function for this test.
:param result: A testtools.TestResult to report activity to.
:raises ValueError: If the base class tearDown is not called, a
ValueError is raised.
"""
ret = self.tearDown()
if not self.__teardown_called:
raise ValueError(
"TestCase.tearDown was not called. Have you upcalled all the "
"way up the hierarchy from your tearDown? e.g. Call "
"super(%s, self).tearDown() from your tearDown()."
% self.__class__.__name__)
return ret
def _get_test_method(self):
absent_attr = object()
# Python 2.5+
method_name = getattr(self, '_testMethodName', absent_attr)
if method_name is absent_attr:
# Python 2.4
method_name = getattr(self, '_TestCase__testMethodName')
return getattr(self, method_name)
def _run_test_method(self, result):
"""Run the test method for this test.
:param result: A testtools.TestResult to report activity to.
:return: None.
"""
return self._get_test_method()()
def useFixture(self, fixture):
"""Use fixture in a test case.
The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
:param fixture: The fixture to use.
:return: The fixture, after setting it up and scheduling a cleanup for
it.
"""
try:
fixture.setUp()
except:
gather_details(fixture.getDetails(), self.getDetails())
raise
else:
self.addCleanup(fixture.cleanUp)
self.addCleanup(
gather_details, fixture.getDetails(), self.getDetails())
return fixture
def setUp(self):
super(TestCase, self).setUp()
self.__setup_called = True
def tearDown(self):
super(TestCase, self).tearDown()
unittest.TestCase.tearDown(self)
self.__teardown_called = True
class PlaceHolder(object):
"""A placeholder test.
`PlaceHolder` implements much of the same interface as TestCase and is
particularly suitable for being added to TestResults.
"""
failureException = None
def __init__(self, test_id, short_description=None, details=None,
outcome='addSuccess', error=None):
"""Construct a `PlaceHolder`.
:param test_id: The id of the placeholder test.
:param short_description: The short description of the place holder
test. If not provided, the id will be used instead.
:param details: Outcome details as accepted by addSuccess etc.
:param outcome: The outcome to call. Defaults to 'addSuccess'.
"""
self._test_id = test_id
self._short_description = short_description
self._details = details or {}
self._outcome = outcome
if error is not None:
self._details['traceback'] = content.TracebackContent(error, self)
def __call__(self, result=None):
return self.run(result=result)
def __repr__(self):
internal = [self._outcome, self._test_id, self._details]
if self._short_description is not None:
internal.append(self._short_description)
return "<%s.%s(%s)>" % (
self.__class__.__module__,
self.__class__.__name__,
", ".join(map(repr, internal)))
def __str__(self):
return self.id()
def countTestCases(self):
return 1
def debug(self):
pass
def id(self):
return self._test_id
def _result(self, result):
if result is None:
return TestResult()
else:
return ExtendedToOriginalDecorator(result)
def run(self, result=None):
result = self._result(result)
result.startTest(self)
outcome = getattr(result, self._outcome)
outcome(self, details=self._details)
result.stopTest(self)
def shortDescription(self):
if self._short_description is None:
return self.id()
else:
return self._short_description
def ErrorHolder(test_id, error, short_description=None, details=None):
"""Construct an `ErrorHolder`.
:param test_id: The id of the test.
:param error: The exc info tuple that will be used as the test's error.
This is inserted into the details as 'traceback' - any existing key
will be overridden.
:param short_description: An optional short description of the test.
:param details: Outcome details as accepted by addSuccess etc.
"""
return PlaceHolder(test_id, short_description=short_description,
details=details, outcome='addError', error=error)
# Python 2.4 did not know how to copy functions.
if types.FunctionType not in copy._copy_dispatch:
copy._copy_dispatch[types.FunctionType] = copy._copy_immutable
def clone_test_with_new_id(test, new_id):
"""Copy a `TestCase`, and give the copied test a new id.
This is only expected to be used on tests that have been constructed but
not executed.
"""
newTest = copy.copy(test)
newTest.id = lambda: new_id
return newTest
def skip(reason):
"""A decorator to skip unit tests.
This is just syntactic sugar so users don't have to change any of their
unit tests in order to migrate to python 2.7, which provides the
@unittest.skip decorator.
"""
def decorator(test_item):
if wraps is not None:
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise TestCase.skipException(reason)
else:
def skip_wrapper(test_item):
test_item.skip(reason)
return skip_wrapper
return decorator
def skipIf(condition, reason):
"""Skip a test if the condition is true."""
if condition:
return skip(reason)
def _id(obj):
return obj
return _id
def skipUnless(condition, reason):
"""Skip a test unless the condition is true."""
if not condition:
return skip(reason)
def _id(obj):
return obj
return _id
class ExpectedException:
"""A context manager to handle expected exceptions.
In Python 2.5 or later::
def test_foo(self):
with ExpectedException(ValueError, 'fo.*'):
raise ValueError('foo')
will pass. If the raised exception has a type other than the specified
type, it will be re-raised. If it has a 'str()' that does not match the
given regular expression, an AssertionError will be raised. If no
exception is raised, an AssertionError will be raised.
"""
def __init__(self, exc_type, value_re=None):
"""Construct an `ExpectedException`.
:param exc_type: The type of exception to expect.
:param value_re: A regular expression to match against the
'str()' of the raised exception.
"""
self.exc_type = exc_type
self.value_re = value_re
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
raise AssertionError('%s not raised.' % self.exc_type.__name__)
if exc_type != self.exc_type:
return False
if self.value_re:
matcher = MatchesException(self.exc_type, self.value_re)
mismatch = matcher.match((exc_type, exc_value, traceback))
if mismatch:
raise AssertionError(mismatch.describe())
return True
class Nullary(object):
"""Turn a callable into a nullary callable.
The advantage of this over ``lambda: f(*args, **kwargs)`` is that it
preserves the ``repr()`` of ``f``.
"""
def __init__(self, callable_object, *args, **kwargs):
self._callable_object = callable_object
self._args = args
self._kwargs = kwargs
def __call__(self):
return self._callable_object(*self._args, **self._kwargs)
def __repr__(self):
return repr(self._callable_object)
# Signal that this is part of the testing framework, and that code from this
# should not normally appear in tracebacks.
__unittest = True

View File

@ -1,25 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
"""Test result objects."""
__all__ = [
'ExtendedToOriginalDecorator',
'MultiTestResult',
'Tagger',
'TestByTestResult',
'TestResult',
'TestResultDecorator',
'TextTestResult',
'ThreadsafeForwardingResult',
]
from testtools.testresult.real import (
ExtendedToOriginalDecorator,
MultiTestResult,
Tagger,
TestByTestResult,
TestResult,
TestResultDecorator,
TextTestResult,
ThreadsafeForwardingResult,
)

View File

@ -1,150 +0,0 @@
# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
"""Doubles of test result objects, useful for testing unittest code."""
__all__ = [
'Python26TestResult',
'Python27TestResult',
'ExtendedTestResult',
]
from testtools.tags import TagContext
class LoggingBase(object):
"""Basic support for logging of results."""
def __init__(self):
self._events = []
self.shouldStop = False
self._was_successful = True
self.testsRun = 0
class Python26TestResult(LoggingBase):
"""A precisely python 2.6 like test result, that logs."""
def addError(self, test, err):
self._was_successful = False
self._events.append(('addError', test, err))
def addFailure(self, test, err):
self._was_successful = False
self._events.append(('addFailure', test, err))
def addSuccess(self, test):
self._events.append(('addSuccess', test))
def startTest(self, test):
self._events.append(('startTest', test))
self.testsRun += 1
def stop(self):
self.shouldStop = True
def stopTest(self, test):
self._events.append(('stopTest', test))
def wasSuccessful(self):
return self._was_successful
class Python27TestResult(Python26TestResult):
"""A precisely python 2.7 like test result, that logs."""
def __init__(self):
super(Python27TestResult, self).__init__()
self.failfast = False
def addError(self, test, err):
super(Python27TestResult, self).addError(test, err)
if self.failfast:
self.stop()
def addFailure(self, test, err):
super(Python27TestResult, self).addFailure(test, err)
if self.failfast:
self.stop()
def addExpectedFailure(self, test, err):
self._events.append(('addExpectedFailure', test, err))
def addSkip(self, test, reason):
self._events.append(('addSkip', test, reason))
def addUnexpectedSuccess(self, test):
self._events.append(('addUnexpectedSuccess', test))
if self.failfast:
self.stop()
def startTestRun(self):
self._events.append(('startTestRun',))
def stopTestRun(self):
self._events.append(('stopTestRun',))
class ExtendedTestResult(Python27TestResult):
"""A test result like the proposed extended unittest result API."""
def __init__(self):
super(ExtendedTestResult, self).__init__()
self._tags = TagContext()
def addError(self, test, err=None, details=None):
self._was_successful = False
self._events.append(('addError', test, err or details))
def addFailure(self, test, err=None, details=None):
self._was_successful = False
self._events.append(('addFailure', test, err or details))
def addExpectedFailure(self, test, err=None, details=None):
self._events.append(('addExpectedFailure', test, err or details))
def addSkip(self, test, reason=None, details=None):
self._events.append(('addSkip', test, reason or details))
def addSuccess(self, test, details=None):
if details:
self._events.append(('addSuccess', test, details))
else:
self._events.append(('addSuccess', test))
def addUnexpectedSuccess(self, test, details=None):
self._was_successful = False
if details is not None:
self._events.append(('addUnexpectedSuccess', test, details))
else:
self._events.append(('addUnexpectedSuccess', test))
def progress(self, offset, whence):
self._events.append(('progress', offset, whence))
def startTestRun(self):
super(ExtendedTestResult, self).startTestRun()
self._was_successful = True
self._tags = TagContext()
def startTest(self, test):
super(ExtendedTestResult, self).startTest(test)
self._tags = TagContext(self._tags)
def stopTest(self, test):
self._tags = self._tags.parent
super(ExtendedTestResult, self).stopTest(test)
@property
def current_tags(self):
return self._tags.get_current_tags()
def tags(self, new_tags, gone_tags):
self._tags.change_tags(new_tags, gone_tags)
self._events.append(('tags', new_tags, gone_tags))
def time(self, time):
self._events.append(('time', time))
def wasSuccessful(self):
return self._was_successful

View File

@ -1,981 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
"""Test results and related things."""
__metaclass__ = type
__all__ = [
'ExtendedToOriginalDecorator',
'MultiTestResult',
'Tagger',
'TestResult',
'TestResultDecorator',
'ThreadsafeForwardingResult',
]
import datetime
import sys
import unittest
from testtools.compat import all, str_is_unicode, _u
from testtools.content import (
text_content,
TracebackContent,
)
from testtools.helpers import safe_hasattr
from testtools.tags import TagContext
# From http://docs.python.org/library/datetime.html
_ZERO = datetime.timedelta(0)
# A UTC class.
class UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
utc = UTC()
class TestResult(unittest.TestResult):
"""Subclass of unittest.TestResult extending the protocol for flexability.
This test result supports an experimental protocol for providing additional
data to in test outcomes. All the outcome methods take an optional dict
'details'. If supplied any other detail parameters like 'err' or 'reason'
should not be provided. The details dict is a mapping from names to
MIME content objects (see testtools.content). This permits attaching
tracebacks, log files, or even large objects like databases that were
part of the test fixture. Until this API is accepted into upstream
Python it is considered experimental: it may be replaced at any point
by a newer version more in line with upstream Python. Compatibility would
be aimed for in this case, but may not be possible.
:ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
"""
def __init__(self, failfast=False):
# startTestRun resets all attributes, and older clients don't know to
# call startTestRun, so it is called once here.
# Because subclasses may reasonably not expect this, we call the
# specific version we want to run.
self.failfast = failfast
TestResult.startTestRun(self)
def addExpectedFailure(self, test, err=None, details=None):
"""Called when a test has failed in an expected manner.
Like with addSuccess and addError, testStopped should still be called.
:param test: The test that has been skipped.
:param err: The exc_info of the error that was raised.
:return: None
"""
# This is the python 2.7 implementation
self.expectedFailures.append(
(test, self._err_details_to_string(test, err, details)))
def addError(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.errors.append((test,
self._err_details_to_string(test, err, details)))
if self.failfast:
self.stop()
def addFailure(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.failures.append((test,
self._err_details_to_string(test, err, details)))
if self.failfast:
self.stop()
def addSkip(self, test, reason=None, details=None):
"""Called when a test has been skipped rather than running.
Like with addSuccess and addError, testStopped should still be called.
This must be called by the TestCase. 'addError' and 'addFailure' will
not call addSkip, since they have no assumptions about the kind of
errors that a test can raise.
:param test: The test that has been skipped.
:param reason: The reason for the test being skipped. For instance,
u"pyGL is not available".
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
:return: None
"""
if reason is None:
reason = details.get('reason')
if reason is None:
reason = 'No reason given'
else:
reason = reason.as_text()
skip_list = self.skip_reasons.setdefault(reason, [])
skip_list.append(test)
def addSuccess(self, test, details=None):
"""Called when a test succeeded."""
def addUnexpectedSuccess(self, test, details=None):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
if self.failfast:
self.stop()
def wasSuccessful(self):
"""Has this result been successful so far?
If there have been any errors, failures or unexpected successes,
return False. Otherwise, return True.
Note: This differs from standard unittest in that we consider
unexpected successes to be equivalent to failures, rather than
successes.
"""
return not (self.errors or self.failures or self.unexpectedSuccesses)
def _err_details_to_string(self, test, err=None, details=None):
"""Convert an error in exc_info form or a contents dict to a string."""
if err is not None:
return TracebackContent(err, test).as_text()
return _details_to_str(details, special='traceback')
def _exc_info_to_unicode(self, err, test):
# Deprecated. Only present because subunit upcalls to it. See
# <https://bugs.launchpad.net/testtools/+bug/929063>.
return TracebackContent(err, test).as_text()
def _now(self):
"""Return the current 'test time'.
If the time() method has not been called, this is equivalent to
datetime.now(), otherwise its the last supplied datestamp given to the
time() method.
"""
if self.__now is None:
return datetime.datetime.now(utc)
else:
return self.__now
def startTestRun(self):
"""Called before a test run starts.
New in Python 2.7. The testtools version resets the result to a
pristine condition ready for use in another test run. Note that this
is different from Python 2.7's startTestRun, which does nothing.
"""
# failfast is reset by the super __init__, so stash it.
failfast = self.failfast
super(TestResult, self).__init__()
self.skip_reasons = {}
self.__now = None
self._tags = TagContext()
# -- Start: As per python 2.7 --
self.expectedFailures = []
self.unexpectedSuccesses = []
self.failfast = failfast
# -- End: As per python 2.7 --
def stopTestRun(self):
"""Called after a test run completes
New in python 2.7
"""
def startTest(self, test):
super(TestResult, self).startTest(test)
self._tags = TagContext(self._tags)
def stopTest(self, test):
self._tags = self._tags.parent
super(TestResult, self).stopTest(test)
@property
def current_tags(self):
"""The currently set tags."""
return self._tags.get_current_tags()
def tags(self, new_tags, gone_tags):
"""Add and remove tags from the test.
:param new_tags: A set of tags to be added to the stream.
:param gone_tags: A set of tags to be removed from the stream.
"""
self._tags.change_tags(new_tags, gone_tags)
def time(self, a_datetime):
"""Provide a timestamp to represent the current time.
This is useful when test activity is time delayed, or happening
concurrently and getting the system time between API calls will not
accurately represent the duration of tests (or the whole run).
Calling time() sets the datetime used by the TestResult object.
Time is permitted to go backwards when using this call.
:param a_datetime: A datetime.datetime object with TZ information or
None to reset the TestResult to gathering time from the system.
"""
self.__now = a_datetime
def done(self):
"""Called when the test runner is done.
deprecated in favour of stopTestRun.
"""
class MultiTestResult(TestResult):
"""A test result that dispatches to many test results."""
def __init__(self, *results):
# Setup _results first, as the base class __init__ assigns to failfast.
self._results = list(map(ExtendedToOriginalDecorator, results))
super(MultiTestResult, self).__init__()
def __repr__(self):
return '<%s (%s)>' % (
self.__class__.__name__, ', '.join(map(repr, self._results)))
def _dispatch(self, message, *args, **kwargs):
return tuple(
getattr(result, message)(*args, **kwargs)
for result in self._results)
def _get_failfast(self):
return getattr(self._results[0], 'failfast', False)
def _set_failfast(self, value):
self._dispatch('__setattr__', 'failfast', value)
failfast = property(_get_failfast, _set_failfast)
def _get_shouldStop(self):
return any(self._dispatch('__getattr__', 'shouldStop'))
def _set_shouldStop(self, value):
# Called because we subclass TestResult. Probably should not do that.
pass
shouldStop = property(_get_shouldStop, _set_shouldStop)
def startTest(self, test):
super(MultiTestResult, self).startTest(test)
return self._dispatch('startTest', test)
def stop(self):
return self._dispatch('stop')
def stopTest(self, test):
super(MultiTestResult, self).stopTest(test)
return self._dispatch('stopTest', test)
def addError(self, test, error=None, details=None):
return self._dispatch('addError', test, error, details=details)
def addExpectedFailure(self, test, err=None, details=None):
return self._dispatch(
'addExpectedFailure', test, err, details=details)
def addFailure(self, test, err=None, details=None):
return self._dispatch('addFailure', test, err, details=details)
def addSkip(self, test, reason=None, details=None):
return self._dispatch('addSkip', test, reason, details=details)
def addSuccess(self, test, details=None):
return self._dispatch('addSuccess', test, details=details)
def addUnexpectedSuccess(self, test, details=None):
return self._dispatch('addUnexpectedSuccess', test, details=details)
def startTestRun(self):
super(MultiTestResult, self).startTestRun()
return self._dispatch('startTestRun')
def stopTestRun(self):
return self._dispatch('stopTestRun')
def tags(self, new_tags, gone_tags):
super(MultiTestResult, self).tags(new_tags, gone_tags)
return self._dispatch('tags', new_tags, gone_tags)
def time(self, a_datetime):
return self._dispatch('time', a_datetime)
def done(self):
return self._dispatch('done')
def wasSuccessful(self):
"""Was this result successful?
Only returns True if every constituent result was successful.
"""
return all(self._dispatch('wasSuccessful'))
class TextTestResult(TestResult):
"""A TestResult which outputs activity to a text stream."""
def __init__(self, stream, failfast=False):
"""Construct a TextTestResult writing to stream."""
super(TextTestResult, self).__init__(failfast=failfast)
self.stream = stream
self.sep1 = '=' * 70 + '\n'
self.sep2 = '-' * 70 + '\n'
def _delta_to_float(self, a_timedelta):
return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
a_timedelta.microseconds / 1000000.0)
def _show_list(self, label, error_list):
for test, output in error_list:
self.stream.write(self.sep1)
self.stream.write("%s: %s\n" % (label, test.id()))
self.stream.write(self.sep2)
self.stream.write(output)
def startTestRun(self):
super(TextTestResult, self).startTestRun()
self.__start = self._now()
self.stream.write("Tests running...\n")
def stopTestRun(self):
if self.testsRun != 1:
plural = 's'
else:
plural = ''
stop = self._now()
self._show_list('ERROR', self.errors)
self._show_list('FAIL', self.failures)
for test in self.unexpectedSuccesses:
self.stream.write(
"%sUNEXPECTED SUCCESS: %s\n%s" % (
self.sep1, test.id(), self.sep2))
self.stream.write("\nRan %d test%s in %.3fs\n" %
(self.testsRun, plural,
self._delta_to_float(stop - self.__start)))
if self.wasSuccessful():
self.stream.write("OK\n")
else:
self.stream.write("FAILED (")
details = []
details.append("failures=%d" % (
sum(map(len, (
self.failures, self.errors, self.unexpectedSuccesses)))))
self.stream.write(", ".join(details))
self.stream.write(")\n")
super(TextTestResult, self).stopTestRun()
class ThreadsafeForwardingResult(TestResult):
"""A TestResult which ensures the target does not receive mixed up calls.
Multiple ``ThreadsafeForwardingResults`` can forward to the same target
result, and that target result will only ever receive the complete set of
events for one test at a time.
This is enforced using a semaphore, which further guarantees that tests
will be sent atomically even if the ``ThreadsafeForwardingResults`` are in
different threads.
``ThreadsafeForwardingResult`` is typically used by
``ConcurrentTestSuite``, which creates one ``ThreadsafeForwardingResult``
per thread, each of which wraps of the TestResult that
``ConcurrentTestSuite.run()`` is called with.
target.startTestRun() and target.stopTestRun() are called once for each
ThreadsafeForwardingResult that forwards to the same target. If the target
takes special action on these events, it should take care to accommodate
this.
time() and tags() calls are batched to be adjacent to the test result and
in the case of tags() are coerced into test-local scope, avoiding the
opportunity for bugs around global state in the target.
"""
def __init__(self, target, semaphore):
"""Create a ThreadsafeForwardingResult forwarding to target.
:param target: A ``TestResult``.
:param semaphore: A ``threading.Semaphore`` with limit 1.
"""
TestResult.__init__(self)
self.result = ExtendedToOriginalDecorator(target)
self.semaphore = semaphore
self._test_start = None
self._global_tags = set(), set()
self._test_tags = set(), set()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.result)
def _any_tags(self, tags):
return bool(tags[0] or tags[1])
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
now = self._now()
self.semaphore.acquire()
try:
self.result.time(self._test_start)
self.result.startTest(test)
self.result.time(now)
if self._any_tags(self._global_tags):
self.result.tags(*self._global_tags)
if self._any_tags(self._test_tags):
self.result.tags(*self._test_tags)
self._test_tags = set(), set()
try:
method(test, *args, **kwargs)
finally:
self.result.stopTest(test)
finally:
self.semaphore.release()
self._test_start = None
def addError(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addError,
test, err, details=details)
def addExpectedFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addExpectedFailure,
test, err, details=details)
def addFailure(self, test, err=None, details=None):
self._add_result_with_semaphore(self.result.addFailure,
test, err, details=details)
def addSkip(self, test, reason=None, details=None):
self._add_result_with_semaphore(self.result.addSkip,
test, reason, details=details)
def addSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addSuccess,
test, details=details)
def addUnexpectedSuccess(self, test, details=None):
self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
test, details=details)
def progress(self, offset, whence):
pass
def startTestRun(self):
super(ThreadsafeForwardingResult, self).startTestRun()
self.semaphore.acquire()
try:
self.result.startTestRun()
finally:
self.semaphore.release()
def _get_shouldStop(self):
self.semaphore.acquire()
try:
return self.result.shouldStop
finally:
self.semaphore.release()
def _set_shouldStop(self, value):
# Another case where we should not subclass TestResult
pass
shouldStop = property(_get_shouldStop, _set_shouldStop)
def stop(self):
self.semaphore.acquire()
try:
self.result.stop()
finally:
self.semaphore.release()
def stopTestRun(self):
self.semaphore.acquire()
try:
self.result.stopTestRun()
finally:
self.semaphore.release()
def done(self):
self.semaphore.acquire()
try:
self.result.done()
finally:
self.semaphore.release()
def startTest(self, test):
self._test_start = self._now()
super(ThreadsafeForwardingResult, self).startTest(test)
def wasSuccessful(self):
return self.result.wasSuccessful()
def tags(self, new_tags, gone_tags):
"""See `TestResult`."""
super(ThreadsafeForwardingResult, self).tags(new_tags, gone_tags)
if self._test_start is not None:
self._test_tags = _merge_tags(
self._test_tags, (new_tags, gone_tags))
else:
self._global_tags = _merge_tags(
self._global_tags, (new_tags, gone_tags))
def _merge_tags(existing, changed):
new_tags, gone_tags = changed
result_new = set(existing[0])
result_gone = set(existing[1])
result_new.update(new_tags)
result_new.difference_update(gone_tags)
result_gone.update(gone_tags)
result_gone.difference_update(new_tags)
return result_new, result_gone
class ExtendedToOriginalDecorator(object):
"""Permit new TestResult API code to degrade gracefully with old results.
This decorates an existing TestResult and converts missing outcomes
such as addSkip to older outcomes such as addSuccess. It also supports
the extended details protocol. In all cases the most recent protocol
is attempted first, and fallbacks only occur when the decorated result
does not support the newer style of calling.
"""
def __init__(self, decorated):
self.decorated = decorated
self._tags = TagContext()
# Only used for old TestResults that do not have failfast.
self._failfast = False
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.decorated)
def __getattr__(self, name):
return getattr(self.decorated, name)
def addError(self, test, err=None, details=None):
try:
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addError(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addError(test, err)
finally:
if self.failfast:
self.stop()
def addExpectedFailure(self, test, err=None, details=None):
self._check_args(err, details)
addExpectedFailure = getattr(
self.decorated, 'addExpectedFailure', None)
if addExpectedFailure is None:
return self.addSuccess(test)
if details is not None:
try:
return addExpectedFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return addExpectedFailure(test, err)
def addFailure(self, test, err=None, details=None):
try:
self._check_args(err, details)
if details is not None:
try:
return self.decorated.addFailure(test, details=details)
except TypeError:
# have to convert
err = self._details_to_exc_info(details)
return self.decorated.addFailure(test, err)
finally:
if self.failfast:
self.stop()
def addSkip(self, test, reason=None, details=None):
self._check_args(reason, details)
addSkip = getattr(self.decorated, 'addSkip', None)
if addSkip is None:
return self.decorated.addSuccess(test)
if details is not None:
try:
return addSkip(test, details=details)
except TypeError:
# extract the reason if it's available
try:
reason = details['reason'].as_text()
except KeyError:
reason = _details_to_str(details)
return addSkip(test, reason)
def addUnexpectedSuccess(self, test, details=None):
try:
outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
if outcome is None:
try:
test.fail("")
except test.failureException:
return self.addFailure(test, sys.exc_info())
if details is not None:
try:
return outcome(test, details=details)
except TypeError:
pass
return outcome(test)
finally:
if self.failfast:
self.stop()
def addSuccess(self, test, details=None):
if details is not None:
try:
return self.decorated.addSuccess(test, details=details)
except TypeError:
pass
return self.decorated.addSuccess(test)
def _check_args(self, err, details):
param_count = 0
if err is not None:
param_count += 1
if details is not None:
param_count += 1
if param_count != 1:
raise ValueError("Must pass only one of err '%s' and details '%s"
% (err, details))
def _details_to_exc_info(self, details):
"""Convert a details dict to an exc_info tuple."""
return (
_StringException,
_StringException(_details_to_str(details, special='traceback')),
None)
@property
def current_tags(self):
return getattr(
self.decorated, 'current_tags', self._tags.get_current_tags())
def done(self):
try:
return self.decorated.done()
except AttributeError:
return
def _get_failfast(self):
return getattr(self.decorated, 'failfast', self._failfast)
def _set_failfast(self, value):
if safe_hasattr(self.decorated, 'failfast'):
self.decorated.failfast = value
else:
self._failfast = value
failfast = property(_get_failfast, _set_failfast)
def progress(self, offset, whence):
method = getattr(self.decorated, 'progress', None)
if method is None:
return
return method(offset, whence)
@property
def shouldStop(self):
return self.decorated.shouldStop
def startTest(self, test):
self._tags = TagContext(self._tags)
return self.decorated.startTest(test)
def startTestRun(self):
self._tags = TagContext()
try:
return self.decorated.startTestRun()
except AttributeError:
return
def stop(self):
return self.decorated.stop()
def stopTest(self, test):
self._tags = self._tags.parent
return self.decorated.stopTest(test)
def stopTestRun(self):
try:
return self.decorated.stopTestRun()
except AttributeError:
return
def tags(self, new_tags, gone_tags):
method = getattr(self.decorated, 'tags', None)
if method is not None:
return method(new_tags, gone_tags)
else:
self._tags.change_tags(new_tags, gone_tags)
def time(self, a_datetime):
method = getattr(self.decorated, 'time', None)
if method is None:
return
return method(a_datetime)
def wasSuccessful(self):
return self.decorated.wasSuccessful()
class TestResultDecorator(object):
"""General pass-through decorator.
This provides a base that other TestResults can inherit from to
gain basic forwarding functionality.
"""
def __init__(self, decorated):
"""Create a TestResultDecorator forwarding to decorated."""
self.decorated = decorated
def startTest(self, test):
return self.decorated.startTest(test)
def startTestRun(self):
return self.decorated.startTestRun()
def stopTest(self, test):
return self.decorated.stopTest(test)
def stopTestRun(self):
return self.decorated.stopTestRun()
def addError(self, test, err=None, details=None):
return self.decorated.addError(test, err, details=details)
def addFailure(self, test, err=None, details=None):
return self.decorated.addFailure(test, err, details=details)
def addSuccess(self, test, details=None):
return self.decorated.addSuccess(test, details=details)
def addSkip(self, test, reason=None, details=None):
return self.decorated.addSkip(test, reason, details=details)
def addExpectedFailure(self, test, err=None, details=None):
return self.decorated.addExpectedFailure(test, err, details=details)
def addUnexpectedSuccess(self, test, details=None):
return self.decorated.addUnexpectedSuccess(test, details=details)
def progress(self, offset, whence):
return self.decorated.progress(offset, whence)
def wasSuccessful(self):
return self.decorated.wasSuccessful()
@property
def current_tags(self):
return self.decorated.current_tags
@property
def shouldStop(self):
return self.decorated.shouldStop
def stop(self):
return self.decorated.stop()
@property
def testsRun(self):
return self.decorated.testsRun
def tags(self, new_tags, gone_tags):
return self.decorated.tags(new_tags, gone_tags)
def time(self, a_datetime):
return self.decorated.time(a_datetime)
class Tagger(TestResultDecorator):
"""Tag each test individually."""
def __init__(self, decorated, new_tags, gone_tags):
"""Wrap 'decorated' such that each test is tagged.
:param new_tags: Tags to be added for each test.
:param gone_tags: Tags to be removed for each test.
"""
super(Tagger, self).__init__(decorated)
self._new_tags = set(new_tags)
self._gone_tags = set(gone_tags)
def startTest(self, test):
super(Tagger, self).startTest(test)
self.tags(self._new_tags, self._gone_tags)
class TestByTestResult(TestResult):
"""Call something every time a test completes."""
def __init__(self, on_test):
"""Construct a ``TestByTestResult``.
:param on_test: A callable that take a test case, a status (one of
"success", "failure", "error", "skip", or "xfail"), a start time
(a ``datetime`` with timezone), a stop time, an iterable of tags,
and a details dict. Is called at the end of each test (i.e. on
``stopTest``) with the accumulated values for that test.
"""
super(TestByTestResult, self).__init__()
self._on_test = on_test
def startTest(self, test):
super(TestByTestResult, self).startTest(test)
self._start_time = self._now()
# There's no supported (i.e. tested) behaviour that relies on these
# being set, but it makes me more comfortable all the same. -- jml
self._status = None
self._details = None
self._stop_time = None
def stopTest(self, test):
self._stop_time = self._now()
tags = set(self.current_tags)
super(TestByTestResult, self).stopTest(test)
self._on_test(
test=test,
status=self._status,
start_time=self._start_time,
stop_time=self._stop_time,
tags=tags,
details=self._details)
def _err_to_details(self, test, err, details):
if details:
return details
return {'traceback': TracebackContent(err, test)}
def addSuccess(self, test, details=None):
super(TestByTestResult, self).addSuccess(test)
self._status = 'success'
self._details = details
def addFailure(self, test, err=None, details=None):
super(TestByTestResult, self).addFailure(test, err, details)
self._status = 'failure'
self._details = self._err_to_details(test, err, details)
def addError(self, test, err=None, details=None):
super(TestByTestResult, self).addError(test, err, details)
self._status = 'error'
self._details = self._err_to_details(test, err, details)
def addSkip(self, test, reason=None, details=None):
super(TestByTestResult, self).addSkip(test, reason, details)
self._status = 'skip'
if details is None:
details = {'reason': text_content(reason)}
elif reason:
# XXX: What if details already has 'reason' key?
details['reason'] = text_content(reason)
self._details = details
def addExpectedFailure(self, test, err=None, details=None):
super(TestByTestResult, self).addExpectedFailure(test, err, details)
self._status = 'xfail'
self._details = self._err_to_details(test, err, details)
def addUnexpectedSuccess(self, test, details=None):
super(TestByTestResult, self).addUnexpectedSuccess(test, details)
self._status = 'success'
self._details = details
class _StringException(Exception):
"""An exception made from an arbitrary string."""
if not str_is_unicode:
def __init__(self, string):
if type(string) is not unicode:
raise TypeError("_StringException expects unicode, got %r" %
(string,))
Exception.__init__(self, string)
def __str__(self):
return self.args[0].encode("utf-8")
def __unicode__(self):
return self.args[0]
# For 3.0 and above the default __str__ is fine, so we don't define one.
def __hash__(self):
return id(self)
def __eq__(self, other):
try:
return self.args == other.args
except AttributeError:
return False
def _format_text_attachment(name, text):
if '\n' in text:
return "%s: {{{\n%s\n}}}\n" % (name, text)
return "%s: {{{%s}}}" % (name, text)
def _details_to_str(details, special=None):
"""Convert a details dict to a string.
:param details: A dictionary mapping short names to ``Content`` objects.
:param special: If specified, an attachment that should have special
attention drawn to it. The primary attachment. Normally it's the
traceback that caused the test to fail.
:return: A formatted string that can be included in text test results.
"""
empty_attachments = []
binary_attachments = []
text_attachments = []
special_content = None
# sorted is for testing, may want to remove that and use a dict
# subclass with defined order for items instead.
for key, content in sorted(details.items()):
if content.content_type.type != 'text':
binary_attachments.append((key, content.content_type))
continue
text = content.as_text().strip()
if not text:
empty_attachments.append(key)
continue
# We want the 'special' attachment to be at the bottom.
if key == special:
special_content = '%s\n' % (text,)
continue
text_attachments.append(_format_text_attachment(key, text))
if text_attachments and not text_attachments[-1].endswith('\n'):
text_attachments.append('')
if special_content:
text_attachments.append(special_content)
lines = []
if binary_attachments:
lines.append('Binary content:\n')
for name, content_type in binary_attachments:
lines.append(' %s (%s)\n' % (name, content_type))
if empty_attachments:
lines.append('Empty attachments:\n')
for name in empty_attachments:
lines.append(' %s\n' % (name,))
if (binary_attachments or empty_attachments) and text_attachments:
lines.append('\n')
lines.append('\n'.join(text_attachments))
return _u('').join(lines)

View File

@ -1,46 +0,0 @@
"""Tests for testtools itself."""
# See README for copyright and licensing details.
from unittest import TestSuite
def test_suite():
from testtools.tests import (
matchers,
test_compat,
test_content,
test_content_type,
test_deferredruntest,
test_distutilscmd,
test_fixturesupport,
test_helpers,
test_monkey,
test_run,
test_runtest,
test_spinner,
test_tags,
test_testcase,
test_testresult,
test_testsuite,
)
modules = [
matchers,
test_compat,
test_content,
test_content_type,
test_deferredruntest,
test_distutilscmd,
test_fixturesupport,
test_helpers,
test_monkey,
test_run,
test_runtest,
test_spinner,
test_tags,
test_testcase,
test_testresult,
test_testsuite,
]
suites = map(lambda x: x.test_suite(), modules)
return TestSuite(suites)

View File

@ -1,109 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
"""Helpers for tests."""
__all__ = [
'LoggingResult',
]
import sys
from testtools import TestResult
from testtools.helpers import (
safe_hasattr,
)
from testtools.content import TracebackContent
from testtools import runtest
# Importing to preserve compatibility.
safe_hasattr
# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle
try:
raise Exception
except Exception:
an_exc_info = sys.exc_info()
# Deprecated: This classes attributes are somewhat non deterministic which
# leads to hard to predict tests (because Python upstream are changing things.
class LoggingResult(TestResult):
"""TestResult that logs its event to a list."""
def __init__(self, log):
self._events = log
super(LoggingResult, self).__init__()
def startTest(self, test):
self._events.append(('startTest', test))
super(LoggingResult, self).startTest(test)
def stop(self):
self._events.append('stop')
super(LoggingResult, self).stop()
def stopTest(self, test):
self._events.append(('stopTest', test))
super(LoggingResult, self).stopTest(test)
def addFailure(self, test, error):
self._events.append(('addFailure', test, error))
super(LoggingResult, self).addFailure(test, error)
def addError(self, test, error):
self._events.append(('addError', test, error))
super(LoggingResult, self).addError(test, error)
def addSkip(self, test, reason):
self._events.append(('addSkip', test, reason))
super(LoggingResult, self).addSkip(test, reason)
def addSuccess(self, test):
self._events.append(('addSuccess', test))
super(LoggingResult, self).addSuccess(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def done(self):
self._events.append('done')
super(LoggingResult, self).done()
def tags(self, new_tags, gone_tags):
self._events.append(('tags', new_tags, gone_tags))
super(LoggingResult, self).tags(new_tags, gone_tags)
def time(self, a_datetime):
self._events.append(('time', a_datetime))
super(LoggingResult, self).time(a_datetime)
def is_stack_hidden():
return TracebackContent.HIDE_INTERNAL_STACK
def hide_testtools_stack(should_hide=True):
result = TracebackContent.HIDE_INTERNAL_STACK
TracebackContent.HIDE_INTERNAL_STACK = should_hide
return result
def run_with_stack_hidden(should_hide, f, *args, **kwargs):
old_should_hide = hide_testtools_stack(should_hide)
try:
return f(*args, **kwargs)
finally:
hide_testtools_stack(old_should_hide)
class FullStackRunTest(runtest.RunTest):
def _run_user(self, fn, *args, **kwargs):
return run_with_stack_hidden(
False,
super(FullStackRunTest, self)._run_user, fn, *args, **kwargs)

View File

@ -1,29 +0,0 @@
# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
from unittest import TestSuite
def test_suite():
from testtools.tests.matchers import (
test_basic,
test_datastructures,
test_dict,
test_doctest,
test_exception,
test_filesystem,
test_higherorder,
test_impl,
)
modules = [
test_basic,
test_datastructures,
test_dict,
test_doctest,
test_exception,
test_filesystem,
test_higherorder,
test_impl,
]
suites = map(lambda x: x.test_suite(), modules)
return TestSuite(suites)

View File

@ -1,42 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
from testtools.tests.helpers import FullStackRunTest
class TestMatchersInterface(object):
run_tests_with = FullStackRunTest
def test_matches_match(self):
matcher = self.matches_matcher
matches = self.matches_matches
mismatches = self.matches_mismatches
for candidate in matches:
self.assertEqual(None, matcher.match(candidate))
for candidate in mismatches:
mismatch = matcher.match(candidate)
self.assertNotEqual(None, mismatch)
self.assertNotEqual(None, getattr(mismatch, 'describe', None))
def test__str__(self):
# [(expected, object to __str__)].
from testtools.matchers._doctest import DocTestMatches
examples = self.str_examples
for expected, matcher in examples:
self.assertThat(matcher, DocTestMatches(expected))
def test_describe_difference(self):
# [(expected, matchee, matcher), ...]
examples = self.describe_examples
for difference, matchee, matcher in examples:
mismatch = matcher.match(matchee)
self.assertEqual(difference, mismatch.describe())
def test_mismatch_details(self):
# The mismatch object must provide get_details, which must return a
# dictionary mapping names to Content objects.
examples = self.describe_examples
for difference, matchee, matcher in examples:
mismatch = matcher.match(matchee)
details = mismatch.get_details()
self.assertEqual(dict(details), details)

View File

@ -1,374 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
import re
from testtools import TestCase
from testtools.compat import (
text_repr,
_b,
_u,
)
from testtools.matchers._basic import (
_BinaryMismatch,
Contains,
DoesNotEndWith,
DoesNotStartWith,
EndsWith,
Equals,
Is,
IsInstance,
LessThan,
GreaterThan,
MatchesRegex,
NotEquals,
SameMembers,
StartsWith,
)
from testtools.tests.helpers import FullStackRunTest
from testtools.tests.matchers.helpers import TestMatchersInterface
class Test_BinaryMismatch(TestCase):
"""Mismatches from binary comparisons need useful describe output"""
_long_string = "This is a longish multiline non-ascii string\n\xa7"
_long_b = _b(_long_string)
_long_u = _u(_long_string)
def test_short_objects(self):
o1, o2 = object(), object()
mismatch = _BinaryMismatch(o1, "!~", o2)
self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
def test_short_mixed_strings(self):
b, u = _b("\xa7"), _u("\xa7")
mismatch = _BinaryMismatch(b, "!~", u)
self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u))
def test_long_bytes(self):
one_line_b = self._long_b.replace(_b("\n"), _b(" "))
mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b)
self.assertEqual(mismatch.describe(),
"%s:\nreference = %s\nactual = %s\n" % ("!~",
text_repr(one_line_b),
text_repr(self._long_b, multiline=True)))
def test_long_unicode(self):
one_line_u = self._long_u.replace("\n", " ")
mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u)
self.assertEqual(mismatch.describe(),
"%s:\nreference = %s\nactual = %s\n" % ("!~",
text_repr(one_line_u),
text_repr(self._long_u, multiline=True)))
def test_long_mixed_strings(self):
mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u)
self.assertEqual(mismatch.describe(),
"%s:\nreference = %s\nactual = %s\n" % ("!~",
text_repr(self._long_b, multiline=True),
text_repr(self._long_u, multiline=True)))
def test_long_bytes_and_object(self):
obj = object()
mismatch = _BinaryMismatch(self._long_b, "!~", obj)
self.assertEqual(mismatch.describe(),
"%s:\nreference = %s\nactual = %s\n" % ("!~",
text_repr(self._long_b, multiline=True),
repr(obj)))
def test_long_unicode_and_object(self):
obj = object()
mismatch = _BinaryMismatch(self._long_u, "!~", obj)
self.assertEqual(mismatch.describe(),
"%s:\nreference = %s\nactual = %s\n" % ("!~",
text_repr(self._long_u, multiline=True),
repr(obj)))
class TestEqualsInterface(TestCase, TestMatchersInterface):
matches_matcher = Equals(1)
matches_matches = [1]
matches_mismatches = [2]
str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
describe_examples = [("1 != 2", 2, Equals(1))]
class TestNotEqualsInterface(TestCase, TestMatchersInterface):
matches_matcher = NotEquals(1)
matches_matches = [2]
matches_mismatches = [1]
str_examples = [
("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
describe_examples = [("1 == 1", 1, NotEquals(1))]
class TestIsInterface(TestCase, TestMatchersInterface):
foo = object()
bar = object()
matches_matcher = Is(foo)
matches_matches = [foo]
matches_mismatches = [bar, 1]
str_examples = [("Is(2)", Is(2))]
describe_examples = [("1 is not 2", 2, Is(1))]
class TestIsInstanceInterface(TestCase, TestMatchersInterface):
class Foo:pass
matches_matcher = IsInstance(Foo)
matches_matches = [Foo()]
matches_mismatches = [object(), 1, Foo]
str_examples = [
("IsInstance(str)", IsInstance(str)),
("IsInstance(str, int)", IsInstance(str, int)),
]
describe_examples = [
("'foo' is not an instance of int", 'foo', IsInstance(int)),
("'foo' is not an instance of any of (int, type)", 'foo',
IsInstance(int, type)),
]
class TestLessThanInterface(TestCase, TestMatchersInterface):
matches_matcher = LessThan(4)
matches_matches = [-5, 3]
matches_mismatches = [4, 5, 5000]
str_examples = [
("LessThan(12)", LessThan(12)),
]
describe_examples = [
('4 is not > 5', 5, LessThan(4)),
('4 is not > 4', 4, LessThan(4)),
]
class TestGreaterThanInterface(TestCase, TestMatchersInterface):
matches_matcher = GreaterThan(4)
matches_matches = [5, 8]
matches_mismatches = [-2, 0, 4]
str_examples = [
("GreaterThan(12)", GreaterThan(12)),
]
describe_examples = [
('5 is not < 4', 4, GreaterThan(5)),
('4 is not < 4', 4, GreaterThan(4)),
]
class TestContainsInterface(TestCase, TestMatchersInterface):
matches_matcher = Contains('foo')
matches_matches = ['foo', 'afoo', 'fooa']
matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao']
str_examples = [
("Contains(1)", Contains(1)),
("Contains('foo')", Contains('foo')),
]
describe_examples = [("1 not in 2", 2, Contains(1))]
class DoesNotStartWithTests(TestCase):
run_tests_with = FullStackRunTest
def test_describe(self):
mismatch = DoesNotStartWith("fo", "bo")
self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
def test_describe_non_ascii_unicode(self):
string = _u("A\xA7")
suffix = _u("B\xA7")
mismatch = DoesNotStartWith(string, suffix)
self.assertEqual("%s does not start with %s." % (
text_repr(string), text_repr(suffix)),
mismatch.describe())
def test_describe_non_ascii_bytes(self):
string = _b("A\xA7")
suffix = _b("B\xA7")
mismatch = DoesNotStartWith(string, suffix)
self.assertEqual("%r does not start with %r." % (string, suffix),
mismatch.describe())
class StartsWithTests(TestCase):
run_tests_with = FullStackRunTest
def test_str(self):
matcher = StartsWith("bar")
self.assertEqual("StartsWith('bar')", str(matcher))
def test_str_with_bytes(self):
b = _b("\xA7")
matcher = StartsWith(b)
self.assertEqual("StartsWith(%r)" % (b,), str(matcher))
def test_str_with_unicode(self):
u = _u("\xA7")
matcher = StartsWith(u)
self.assertEqual("StartsWith(%r)" % (u,), str(matcher))
def test_match(self):
matcher = StartsWith("bar")
self.assertIs(None, matcher.match("barf"))
def test_mismatch_returns_does_not_start_with(self):
matcher = StartsWith("bar")
self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
def test_mismatch_sets_matchee(self):
matcher = StartsWith("bar")
mismatch = matcher.match("foo")
self.assertEqual("foo", mismatch.matchee)
def test_mismatch_sets_expected(self):
matcher = StartsWith("bar")
mismatch = matcher.match("foo")
self.assertEqual("bar", mismatch.expected)
class DoesNotEndWithTests(TestCase):
run_tests_with = FullStackRunTest
def test_describe(self):
mismatch = DoesNotEndWith("fo", "bo")
self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
def test_describe_non_ascii_unicode(self):
string = _u("A\xA7")
suffix = _u("B\xA7")
mismatch = DoesNotEndWith(string, suffix)
self.assertEqual("%s does not end with %s." % (
text_repr(string), text_repr(suffix)),
mismatch.describe())
def test_describe_non_ascii_bytes(self):
string = _b("A\xA7")
suffix = _b("B\xA7")
mismatch = DoesNotEndWith(string, suffix)
self.assertEqual("%r does not end with %r." % (string, suffix),
mismatch.describe())
class EndsWithTests(TestCase):
run_tests_with = FullStackRunTest
def test_str(self):
matcher = EndsWith("bar")
self.assertEqual("EndsWith('bar')", str(matcher))
def test_str_with_bytes(self):
b = _b("\xA7")
matcher = EndsWith(b)
self.assertEqual("EndsWith(%r)" % (b,), str(matcher))
def test_str_with_unicode(self):
u = _u("\xA7")
matcher = EndsWith(u)
self.assertEqual("EndsWith(%r)" % (u,), str(matcher))
def test_match(self):
matcher = EndsWith("arf")
self.assertIs(None, matcher.match("barf"))
def test_mismatch_returns_does_not_end_with(self):
matcher = EndsWith("bar")
self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
def test_mismatch_sets_matchee(self):
matcher = EndsWith("bar")
mismatch = matcher.match("foo")
self.assertEqual("foo", mismatch.matchee)
def test_mismatch_sets_expected(self):
matcher = EndsWith("bar")
mismatch = matcher.match("foo")
self.assertEqual("bar", mismatch.expected)
class TestSameMembers(TestCase, TestMatchersInterface):
matches_matcher = SameMembers([1, 1, 2, 3, {'foo': 'bar'}])
matches_matches = [
[1, 1, 2, 3, {'foo': 'bar'}],
[3, {'foo': 'bar'}, 1, 2, 1],
[3, 2, 1, {'foo': 'bar'}, 1],
(2, {'foo': 'bar'}, 3, 1, 1),
]
matches_mismatches = [
set([1, 2, 3]),
[1, 1, 2, 3, 5],
[1, 2, 3, {'foo': 'bar'}],
'foo',
]
describe_examples = [
(("elements differ:\n"
"reference = ['apple', 'orange', 'canteloupe', 'watermelon', 'lemon', 'banana']\n"
"actual = ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe']\n"
": \n"
"missing: ['watermelon']\n"
"extra: ['sparrow']"
),
['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe',],
SameMembers(
['apple', 'orange', 'canteloupe', 'watermelon',
'lemon', 'banana',])),
]
str_examples = [
('SameMembers([1, 2, 3])', SameMembers([1, 2, 3])),
]
class TestMatchesRegex(TestCase, TestMatchersInterface):
matches_matcher = MatchesRegex('a|b')
matches_matches = ['a', 'b']
matches_mismatches = ['c']
str_examples = [
("MatchesRegex('a|b')", MatchesRegex('a|b')),
("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)),
("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)),
("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))),
("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))),
]
describe_examples = [
("'c' does not match /a|b/", 'c', MatchesRegex('a|b')),
("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')),
("%r does not match /\\s+\\xa7/" % (_b('c'),),
_b('c'), MatchesRegex(_b("\\s+\xA7"))),
("%r does not match /\\s+\\xa7/" % (_u('c'),),
_u('c'), MatchesRegex(_u("\\s+\xA7"))),
]
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,209 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
import doctest
import re
import sys
from testtools import TestCase
from testtools.compat import StringIO
from testtools.matchers import (
Annotate,
Equals,
LessThan,
MatchesRegex,
NotEquals,
)
from testtools.matchers._datastructures import (
ContainsAll,
MatchesListwise,
MatchesStructure,
MatchesSetwise,
)
from testtools.tests.helpers import FullStackRunTest
from testtools.tests.matchers.helpers import TestMatchersInterface
def run_doctest(obj, name):
p = doctest.DocTestParser()
t = p.get_doctest(
obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0)
r = doctest.DocTestRunner()
output = StringIO()
r.run(t, out=output.write)
return r.failures, output.getvalue()
class TestMatchesListwise(TestCase):
run_tests_with = FullStackRunTest
def test_docstring(self):
failure_count, output = run_doctest(
MatchesListwise, "MatchesListwise")
if failure_count:
self.fail("Doctest failed with %s" % output)
class TestMatchesStructure(TestCase, TestMatchersInterface):
class SimpleClass:
def __init__(self, x, y):
self.x = x
self.y = y
matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2))
matches_matches = [SimpleClass(1, 2)]
matches_mismatches = [
SimpleClass(2, 2),
SimpleClass(1, 1),
SimpleClass(3, 3),
]
str_examples = [
("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))),
("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))),
("MatchesStructure(x=Equals(1), y=Equals(2))",
MatchesStructure(x=Equals(1), y=Equals(2))),
]
describe_examples = [
("""\
Differences: [
3 != 1: x
]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))),
("""\
Differences: [
3 != 2: y
]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))),
("""\
Differences: [
0 != 1: x
0 != 2: y
]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))),
]
def test_fromExample(self):
self.assertThat(
self.SimpleClass(1, 2),
MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x'))
def test_byEquality(self):
self.assertThat(
self.SimpleClass(1, 2),
MatchesStructure.byEquality(x=1))
def test_withStructure(self):
self.assertThat(
self.SimpleClass(1, 2),
MatchesStructure.byMatcher(LessThan, x=2))
def test_update(self):
self.assertThat(
self.SimpleClass(1, 2),
MatchesStructure(x=NotEquals(1)).update(x=Equals(1)))
def test_update_none(self):
self.assertThat(
self.SimpleClass(1, 2),
MatchesStructure(x=Equals(1), z=NotEquals(42)).update(
z=None))
class TestMatchesSetwise(TestCase):
run_tests_with = FullStackRunTest
def assertMismatchWithDescriptionMatching(self, value, matcher,
description_matcher):
mismatch = matcher.match(value)
if mismatch is None:
self.fail("%s matched %s" % (matcher, value))
actual_description = mismatch.describe()
self.assertThat(
actual_description,
Annotate(
"%s matching %s" % (matcher, value),
description_matcher))
def test_matches(self):
self.assertIs(
None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1]))
def test_mismatches(self):
self.assertMismatchWithDescriptionMatching(
[2, 3], MatchesSetwise(Equals(1), Equals(2)),
MatchesRegex('.*There was 1 mismatch$', re.S))
def test_too_many_matchers(self):
self.assertMismatchWithDescriptionMatching(
[2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
Equals('There was 1 matcher left over: Equals(1)'))
def test_too_many_values(self):
self.assertMismatchWithDescriptionMatching(
[1, 2, 3], MatchesSetwise(Equals(1), Equals(2)),
Equals('There was 1 value left over: [3]'))
def test_two_too_many_matchers(self):
self.assertMismatchWithDescriptionMatching(
[3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
MatchesRegex(
'There were 2 matchers left over: Equals\([12]\), '
'Equals\([12]\)'))
def test_two_too_many_values(self):
self.assertMismatchWithDescriptionMatching(
[1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
MatchesRegex(
'There were 2 values left over: \[[34], [34]\]'))
def test_mismatch_and_too_many_matchers(self):
self.assertMismatchWithDescriptionMatching(
[2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)),
MatchesRegex(
'.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)',
re.S))
def test_mismatch_and_too_many_values(self):
self.assertMismatchWithDescriptionMatching(
[2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
MatchesRegex(
'.*There was 1 mismatch and 1 extra value: \[[34]\]',
re.S))
def test_mismatch_and_two_too_many_matchers(self):
self.assertMismatchWithDescriptionMatching(
[3, 4], MatchesSetwise(
Equals(0), Equals(1), Equals(2), Equals(3)),
MatchesRegex(
'.*There was 1 mismatch and 2 extra matchers: '
'Equals\([012]\), Equals\([012]\)', re.S))
def test_mismatch_and_two_too_many_values(self):
self.assertMismatchWithDescriptionMatching(
[2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)),
MatchesRegex(
'.*There was 1 mismatch and 2 extra values: \[[145], [145]\]',
re.S))
class TestContainsAllInterface(TestCase, TestMatchersInterface):
matches_matcher = ContainsAll(['foo', 'bar'])
matches_matches = [['foo', 'bar'], ['foo', 'z', 'bar'], ['bar', 'foo']]
matches_mismatches = [['f', 'g'], ['foo', 'baz'], []]
str_examples = [(
"MatchesAll(Contains('foo'), Contains('bar'))",
ContainsAll(['foo', 'bar'])),
]
describe_examples = [("""Differences: [
'baz' not in 'foo'
]""",
'foo', ContainsAll(['foo', 'baz']))]
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,222 +0,0 @@
from testtools import TestCase
from testtools.matchers import (
Equals,
NotEquals,
Not,
)
from testtools.matchers._dict import (
ContainedByDict,
ContainsDict,
KeysEqual,
MatchesAllDict,
MatchesDict,
_SubDictOf,
)
from testtools.tests.matchers.helpers import TestMatchersInterface
class TestMatchesAllDictInterface(TestCase, TestMatchersInterface):
matches_matcher = MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})
matches_matches = [3, 4]
matches_mismatches = [1, 2]
str_examples = [
("MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})",
matches_matcher)]
describe_examples = [
("""a: 1 == 1""", 1, matches_matcher),
]
class TestKeysEqual(TestCase, TestMatchersInterface):
matches_matcher = KeysEqual('foo', 'bar')
matches_matches = [
{'foo': 0, 'bar': 1},
]
matches_mismatches = [
{},
{'foo': 0},
{'bar': 1},
{'foo': 0, 'bar': 1, 'baz': 2},
{'a': None, 'b': None, 'c': None},
]
str_examples = [
("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
]
describe_examples = []
def test_description(self):
matchee = {'foo': 0, 'bar': 1, 'baz': 2}
mismatch = KeysEqual('foo', 'bar').match(matchee)
description = mismatch.describe()
self.assertThat(
description, Equals(
"['bar', 'foo'] does not match %r: Keys not equal"
% (matchee,)))
class TestSubDictOf(TestCase, TestMatchersInterface):
matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'})
matches_matches = [
{'foo': 'bar', 'baz': 'qux'},
{'foo': 'bar'},
]
matches_mismatches = [
{'foo': 'bar', 'baz': 'qux', 'cat': 'dog'},
{'foo': 'bar', 'cat': 'dog'},
]
str_examples = []
describe_examples = []
class TestMatchesDict(TestCase, TestMatchersInterface):
matches_matcher = MatchesDict(
{'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
matches_matches = [
{'foo': 'bar', 'baz': None},
{'foo': 'bar', 'baz': 'quux'},
]
matches_mismatches = [
{},
{'foo': 'bar', 'baz': 'qux'},
{'foo': 'bop', 'baz': 'qux'},
{'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
{'foo': 'bar', 'cat': 'dog'},
]
str_examples = [
("MatchesDict({'baz': %s, 'foo': %s})" % (
Not(Equals('qux')), Equals('bar')),
matches_matcher),
]
describe_examples = [
("Missing: {\n"
" 'baz': Not(Equals('qux')),\n"
" 'foo': Equals('bar'),\n"
"}",
{}, matches_matcher),
("Differences: {\n"
" 'baz': 'qux' matches Equals('qux'),\n"
"}",
{'foo': 'bar', 'baz': 'qux'}, matches_matcher),
("Differences: {\n"
" 'baz': 'qux' matches Equals('qux'),\n"
" 'foo': 'bar' != 'bop',\n"
"}",
{'foo': 'bop', 'baz': 'qux'}, matches_matcher),
("Extra: {\n"
" 'cat': 'dog',\n"
"}",
{'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, matches_matcher),
("Extra: {\n"
" 'cat': 'dog',\n"
"}\n"
"Missing: {\n"
" 'baz': Not(Equals('qux')),\n"
"}",
{'foo': 'bar', 'cat': 'dog'}, matches_matcher),
]
class TestContainsDict(TestCase, TestMatchersInterface):
matches_matcher = ContainsDict(
{'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
matches_matches = [
{'foo': 'bar', 'baz': None},
{'foo': 'bar', 'baz': 'quux'},
{'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
]
matches_mismatches = [
{},
{'foo': 'bar', 'baz': 'qux'},
{'foo': 'bop', 'baz': 'qux'},
{'foo': 'bar', 'cat': 'dog'},
{'foo': 'bar'},
]
str_examples = [
("ContainsDict({'baz': %s, 'foo': %s})" % (
Not(Equals('qux')), Equals('bar')),
matches_matcher),
]
describe_examples = [
("Missing: {\n"
" 'baz': Not(Equals('qux')),\n"
" 'foo': Equals('bar'),\n"
"}",
{}, matches_matcher),
("Differences: {\n"
" 'baz': 'qux' matches Equals('qux'),\n"
"}",
{'foo': 'bar', 'baz': 'qux'}, matches_matcher),
("Differences: {\n"
" 'baz': 'qux' matches Equals('qux'),\n"
" 'foo': 'bar' != 'bop',\n"
"}",
{'foo': 'bop', 'baz': 'qux'}, matches_matcher),
("Missing: {\n"
" 'baz': Not(Equals('qux')),\n"
"}",
{'foo': 'bar', 'cat': 'dog'}, matches_matcher),
]
class TestContainedByDict(TestCase, TestMatchersInterface):
matches_matcher = ContainedByDict(
{'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
matches_matches = [
{},
{'foo': 'bar'},
{'foo': 'bar', 'baz': 'quux'},
{'baz': 'quux'},
]
matches_mismatches = [
{'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
{'foo': 'bar', 'baz': 'qux'},
{'foo': 'bop', 'baz': 'qux'},
{'foo': 'bar', 'cat': 'dog'},
]
str_examples = [
("ContainedByDict({'baz': %s, 'foo': %s})" % (
Not(Equals('qux')), Equals('bar')),
matches_matcher),
]
describe_examples = [
("Differences: {\n"
" 'baz': 'qux' matches Equals('qux'),\n"
"}",
{'foo': 'bar', 'baz': 'qux'}, matches_matcher),
("Differences: {\n"
" 'baz': 'qux' matches Equals('qux'),\n"
" 'foo': 'bar' != 'bop',\n"
"}",
{'foo': 'bop', 'baz': 'qux'}, matches_matcher),
("Extra: {\n"
" 'cat': 'dog',\n"
"}",
{'foo': 'bar', 'cat': 'dog'}, matches_matcher),
]
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,82 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
import doctest
from testtools import TestCase
from testtools.compat import (
str_is_unicode,
_b,
_u,
)
from testtools.matchers._doctest import DocTestMatches
from testtools.tests.helpers import FullStackRunTest
from testtools.tests.matchers.helpers import TestMatchersInterface
class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
DocTestMatches("Ran 1 test in ...s")),
("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
]
describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface):
matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS)
matches_matches = [_u("\xa7"), _u("\xa7 more\n")]
matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")]
str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),),
DocTestMatches(_u("\xa7"))),
]
describe_examples = [(
_u("Expected:\n \xa7\nGot:\n a\n"),
"a",
DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))]
class TestDocTestMatchesSpecific(TestCase):
run_tests_with = FullStackRunTest
def test___init__simple(self):
matcher = DocTestMatches("foo")
self.assertEqual("foo\n", matcher.want)
def test___init__flags(self):
matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
self.assertEqual("bar\n", matcher.want)
self.assertEqual(doctest.ELLIPSIS, matcher.flags)
def test_describe_non_ascii_bytes(self):
"""Even with bytestrings, the mismatch should be coercible to unicode
DocTestMatches is intended for text, but the Python 2 str type also
permits arbitrary binary inputs. This is a slightly bogus thing to do,
and under Python 3 using bytes objects will reasonably raise an error.
"""
header = _b("\x89PNG\r\n\x1a\n...")
if str_is_unicode:
self.assertRaises(TypeError,
DocTestMatches, header, doctest.ELLIPSIS)
return
matcher = DocTestMatches(header, doctest.ELLIPSIS)
mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
# Must be treatable as unicode text, the exact output matters less
self.assertTrue(unicode(mismatch.describe()))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,192 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
import sys
from testtools import TestCase
from testtools.matchers import (
AfterPreprocessing,
Equals,
)
from testtools.matchers._exception import (
MatchesException,
Raises,
raises,
)
from testtools.tests.helpers import FullStackRunTest
from testtools.tests.matchers.helpers import TestMatchersInterface
def make_error(type, *args, **kwargs):
try:
raise type(*args, **kwargs)
except type:
return sys.exc_info()
class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
matches_matcher = MatchesException(ValueError("foo"))
error_foo = make_error(ValueError, 'foo')
error_bar = make_error(ValueError, 'bar')
error_base_foo = make_error(Exception, 'foo')
matches_matches = [error_foo]
matches_mismatches = [error_bar, error_base_foo]
str_examples = [
("MatchesException(Exception('foo',))",
MatchesException(Exception('foo')))
]
describe_examples = [
("%r is not a %r" % (Exception, ValueError),
error_base_foo,
MatchesException(ValueError("foo"))),
("ValueError('bar',) has different arguments to ValueError('foo',).",
error_bar,
MatchesException(ValueError("foo"))),
]
class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
matches_matcher = MatchesException(ValueError)
error_foo = make_error(ValueError, 'foo')
error_sub = make_error(UnicodeError, 'bar')
error_base_foo = make_error(Exception, 'foo')
matches_matches = [error_foo, error_sub]
matches_mismatches = [error_base_foo]
str_examples = [
("MatchesException(%r)" % Exception,
MatchesException(Exception))
]
describe_examples = [
("%r is not a %r" % (Exception, ValueError),
error_base_foo,
MatchesException(ValueError)),
]
class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface):
matches_matcher = MatchesException(ValueError, 'fo.')
error_foo = make_error(ValueError, 'foo')
error_sub = make_error(UnicodeError, 'foo')
error_bar = make_error(ValueError, 'bar')
matches_matches = [error_foo, error_sub]
matches_mismatches = [error_bar]
str_examples = [
("MatchesException(%r)" % Exception,
MatchesException(Exception, 'fo.'))
]
describe_examples = [
("'bar' does not match /fo./",
error_bar, MatchesException(ValueError, "fo.")),
]
class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface):
matches_matcher = MatchesException(
ValueError, AfterPreprocessing(str, Equals('foo')))
error_foo = make_error(ValueError, 'foo')
error_sub = make_error(UnicodeError, 'foo')
error_bar = make_error(ValueError, 'bar')
matches_matches = [error_foo, error_sub]
matches_mismatches = [error_bar]
str_examples = [
("MatchesException(%r)" % Exception,
MatchesException(Exception, Equals('foo')))
]
describe_examples = [
("5 != %r" % (error_bar[1],),
error_bar, MatchesException(ValueError, Equals(5))),
]
class TestRaisesInterface(TestCase, TestMatchersInterface):
matches_matcher = Raises()
def boom():
raise Exception('foo')
matches_matches = [boom]
matches_mismatches = [lambda:None]
# Tricky to get function objects to render constantly, and the interfaces
# helper uses assertEqual rather than (for instance) DocTestMatches.
str_examples = []
describe_examples = []
class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
matches_matcher = Raises(
exception_matcher=MatchesException(Exception('foo')))
def boom_bar():
raise Exception('bar')
def boom_foo():
raise Exception('foo')
matches_matches = [boom_foo]
matches_mismatches = [lambda:None, boom_bar]
# Tricky to get function objects to render constantly, and the interfaces
# helper uses assertEqual rather than (for instance) DocTestMatches.
str_examples = []
describe_examples = []
class TestRaisesBaseTypes(TestCase):
run_tests_with = FullStackRunTest
def raiser(self):
raise KeyboardInterrupt('foo')
def test_KeyboardInterrupt_matched(self):
# When KeyboardInterrupt is matched, it is swallowed.
matcher = Raises(MatchesException(KeyboardInterrupt))
self.assertThat(self.raiser, matcher)
def test_KeyboardInterrupt_propogates(self):
# The default 'it raised' propogates KeyboardInterrupt.
match_keyb = Raises(MatchesException(KeyboardInterrupt))
def raise_keyb_from_match():
matcher = Raises()
matcher.match(self.raiser)
self.assertThat(raise_keyb_from_match, match_keyb)
def test_KeyboardInterrupt_match_Exception_propogates(self):
# If the raised exception isn't matched, and it is not a subclass of
# Exception, it is propogated.
match_keyb = Raises(MatchesException(KeyboardInterrupt))
def raise_keyb_from_match():
if sys.version_info > (2, 5):
matcher = Raises(MatchesException(Exception))
else:
# On Python 2.4 KeyboardInterrupt is a StandardError subclass
# but should propogate from less generic exception matchers
matcher = Raises(MatchesException(EnvironmentError))
matcher.match(self.raiser)
self.assertThat(raise_keyb_from_match, match_keyb)
class TestRaisesConvenience(TestCase):
run_tests_with = FullStackRunTest
def test_exc_type(self):
self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
def test_exc_value(self):
e = RuntimeError("You lose!")
def raiser():
raise e
self.assertThat(raiser, raises(e))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,243 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
import os
import shutil
import tarfile
import tempfile
from testtools import TestCase
from testtools.matchers import (
Contains,
DocTestMatches,
Equals,
)
from testtools.matchers._filesystem import (
DirContains,
DirExists,
FileContains,
FileExists,
HasPermissions,
PathExists,
SamePath,
TarballContains,
)
class PathHelpers(object):
def mkdtemp(self):
directory = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, directory)
return directory
def create_file(self, filename, contents=''):
fp = open(filename, 'w')
try:
fp.write(contents)
finally:
fp.close()
def touch(self, filename):
return self.create_file(filename)
class TestPathExists(TestCase, PathHelpers):
def test_exists(self):
tempdir = self.mkdtemp()
self.assertThat(tempdir, PathExists())
def test_not_exists(self):
doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
mismatch = PathExists().match(doesntexist)
self.assertThat(
"%s does not exist." % doesntexist, Equals(mismatch.describe()))
class TestDirExists(TestCase, PathHelpers):
def test_exists(self):
tempdir = self.mkdtemp()
self.assertThat(tempdir, DirExists())
def test_not_exists(self):
doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
mismatch = DirExists().match(doesntexist)
self.assertThat(
PathExists().match(doesntexist).describe(),
Equals(mismatch.describe()))
def test_not_a_directory(self):
filename = os.path.join(self.mkdtemp(), 'foo')
self.touch(filename)
mismatch = DirExists().match(filename)
self.assertThat(
"%s is not a directory." % filename, Equals(mismatch.describe()))
class TestFileExists(TestCase, PathHelpers):
def test_exists(self):
tempdir = self.mkdtemp()
filename = os.path.join(tempdir, 'filename')
self.touch(filename)
self.assertThat(filename, FileExists())
def test_not_exists(self):
doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
mismatch = FileExists().match(doesntexist)
self.assertThat(
PathExists().match(doesntexist).describe(),
Equals(mismatch.describe()))
def test_not_a_file(self):
tempdir = self.mkdtemp()
mismatch = FileExists().match(tempdir)
self.assertThat(
"%s is not a file." % tempdir, Equals(mismatch.describe()))
class TestDirContains(TestCase, PathHelpers):
def test_empty(self):
tempdir = self.mkdtemp()
self.assertThat(tempdir, DirContains([]))
def test_not_exists(self):
doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
mismatch = DirContains([]).match(doesntexist)
self.assertThat(
PathExists().match(doesntexist).describe(),
Equals(mismatch.describe()))
def test_contains_files(self):
tempdir = self.mkdtemp()
self.touch(os.path.join(tempdir, 'foo'))
self.touch(os.path.join(tempdir, 'bar'))
self.assertThat(tempdir, DirContains(['bar', 'foo']))
def test_matcher(self):
tempdir = self.mkdtemp()
self.touch(os.path.join(tempdir, 'foo'))
self.touch(os.path.join(tempdir, 'bar'))
self.assertThat(tempdir, DirContains(matcher=Contains('bar')))
def test_neither_specified(self):
self.assertRaises(AssertionError, DirContains)
def test_both_specified(self):
self.assertRaises(
AssertionError, DirContains, filenames=[], matcher=Contains('a'))
def test_does_not_contain_files(self):
tempdir = self.mkdtemp()
self.touch(os.path.join(tempdir, 'foo'))
mismatch = DirContains(['bar', 'foo']).match(tempdir)
self.assertThat(
Equals(['bar', 'foo']).match(['foo']).describe(),
Equals(mismatch.describe()))
class TestFileContains(TestCase, PathHelpers):
def test_not_exists(self):
doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
mismatch = FileContains('').match(doesntexist)
self.assertThat(
PathExists().match(doesntexist).describe(),
Equals(mismatch.describe()))
def test_contains(self):
tempdir = self.mkdtemp()
filename = os.path.join(tempdir, 'foo')
self.create_file(filename, 'Hello World!')
self.assertThat(filename, FileContains('Hello World!'))
def test_matcher(self):
tempdir = self.mkdtemp()
filename = os.path.join(tempdir, 'foo')
self.create_file(filename, 'Hello World!')
self.assertThat(
filename, FileContains(matcher=DocTestMatches('Hello World!')))
def test_neither_specified(self):
self.assertRaises(AssertionError, FileContains)
def test_both_specified(self):
self.assertRaises(
AssertionError, FileContains, contents=[], matcher=Contains('a'))
def test_does_not_contain(self):
tempdir = self.mkdtemp()
filename = os.path.join(tempdir, 'foo')
self.create_file(filename, 'Goodbye Cruel World!')
mismatch = FileContains('Hello World!').match(filename)
self.assertThat(
Equals('Hello World!').match('Goodbye Cruel World!').describe(),
Equals(mismatch.describe()))
class TestTarballContains(TestCase, PathHelpers):
def test_match(self):
tempdir = self.mkdtemp()
in_temp_dir = lambda x: os.path.join(tempdir, x)
self.touch(in_temp_dir('a'))
self.touch(in_temp_dir('b'))
tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
tarball.add(in_temp_dir('a'), 'a')
tarball.add(in_temp_dir('b'), 'b')
tarball.close()
self.assertThat(
in_temp_dir('foo.tar.gz'), TarballContains(['b', 'a']))
def test_mismatch(self):
tempdir = self.mkdtemp()
in_temp_dir = lambda x: os.path.join(tempdir, x)
self.touch(in_temp_dir('a'))
self.touch(in_temp_dir('b'))
tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
tarball.add(in_temp_dir('a'), 'a')
tarball.add(in_temp_dir('b'), 'b')
tarball.close()
mismatch = TarballContains(['d', 'c']).match(in_temp_dir('foo.tar.gz'))
self.assertEqual(
mismatch.describe(),
Equals(['c', 'd']).match(['a', 'b']).describe())
class TestSamePath(TestCase, PathHelpers):
def test_same_string(self):
self.assertThat('foo', SamePath('foo'))
def test_relative_and_absolute(self):
path = 'foo'
abspath = os.path.abspath(path)
self.assertThat(path, SamePath(abspath))
self.assertThat(abspath, SamePath(path))
def test_real_path(self):
tempdir = self.mkdtemp()
source = os.path.join(tempdir, 'source')
self.touch(source)
target = os.path.join(tempdir, 'target')
try:
os.symlink(source, target)
except (AttributeError, NotImplementedError):
self.skip("No symlink support")
self.assertThat(source, SamePath(target))
self.assertThat(target, SamePath(source))
class TestHasPermissions(TestCase, PathHelpers):
def test_match(self):
tempdir = self.mkdtemp()
filename = os.path.join(tempdir, 'filename')
self.touch(filename)
permissions = oct(os.stat(filename).st_mode)[-4:]
self.assertThat(filename, HasPermissions(permissions))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,227 +0,0 @@
# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
from testtools import TestCase
from testtools.matchers import (
DocTestMatches,
Equals,
LessThan,
MatchesStructure,
Mismatch,
NotEquals,
)
from testtools.matchers._higherorder import (
AfterPreprocessing,
AllMatch,
Annotate,
AnnotatedMismatch,
AnyMatch,
MatchesAny,
MatchesAll,
MatchesPredicate,
Not,
)
from testtools.tests.helpers import FullStackRunTest
from testtools.tests.matchers.helpers import TestMatchersInterface
class TestAllMatch(TestCase, TestMatchersInterface):
matches_matcher = AllMatch(LessThan(10))
matches_matches = [
[9, 9, 9],
(9, 9),
iter([9, 9, 9, 9, 9]),
]
matches_mismatches = [
[11, 9, 9],
iter([9, 12, 9, 11]),
]
str_examples = [
("AllMatch(LessThan(12))", AllMatch(LessThan(12))),
]
describe_examples = [
('Differences: [\n'
'10 is not > 11\n'
'10 is not > 10\n'
']',
[11, 9, 10],
AllMatch(LessThan(10))),
]
class TestAnyMatch(TestCase, TestMatchersInterface):
matches_matcher = AnyMatch(Equals('elephant'))
matches_matches = [
['grass', 'cow', 'steak', 'milk', 'elephant'],
(13, 'elephant'),
['elephant', 'elephant', 'elephant'],
set(['hippo', 'rhino', 'elephant']),
]
matches_mismatches = [
[],
['grass', 'cow', 'steak', 'milk'],
(13, 12, 10),
['element', 'hephalump', 'pachyderm'],
set(['hippo', 'rhino', 'diplodocus']),
]
str_examples = [
("AnyMatch(Equals('elephant'))", AnyMatch(Equals('elephant'))),
]
describe_examples = [
('Differences: [\n'
'7 != 11\n'
'7 != 9\n'
'7 != 10\n'
']',
[11, 9, 10],
AnyMatch(Equals(7))),
]
class TestAfterPreprocessing(TestCase, TestMatchersInterface):
def parity(x):
return x % 2
matches_matcher = AfterPreprocessing(parity, Equals(1))
matches_matches = [3, 5]
matches_mismatches = [2]
str_examples = [
("AfterPreprocessing(<function parity>, Equals(1))",
AfterPreprocessing(parity, Equals(1))),
]
describe_examples = [
("1 != 0: after <function parity> on 2", 2,
AfterPreprocessing(parity, Equals(1))),
("1 != 0", 2,
AfterPreprocessing(parity, Equals(1), annotate=False)),
]
class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
matches_matches = ["1", "2"]
matches_mismatches = ["3"]
str_examples = [(
"MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
]
describe_examples = [("""Differences: [
Expected:
1
Got:
3
Expected:
2
Got:
3
]""",
"3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
class TestMatchesAllInterface(TestCase, TestMatchersInterface):
matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
matches_matches = [3, 4]
matches_mismatches = [1, 2]
str_examples = [
("MatchesAll(NotEquals(1), NotEquals(2))",
MatchesAll(NotEquals(1), NotEquals(2)))]
describe_examples = [
("""Differences: [
1 == 1
]""",
1, MatchesAll(NotEquals(1), NotEquals(2))),
("1 == 1", 1,
MatchesAll(NotEquals(2), NotEquals(1), Equals(3), first_only=True)),
]
class TestAnnotate(TestCase, TestMatchersInterface):
matches_matcher = Annotate("foo", Equals(1))
matches_matches = [1]
matches_mismatches = [2]
str_examples = [
("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
def test_if_message_no_message(self):
# Annotate.if_message returns the given matcher if there is no
# message.
matcher = Equals(1)
not_annotated = Annotate.if_message('', matcher)
self.assertIs(matcher, not_annotated)
def test_if_message_given_message(self):
# Annotate.if_message returns an annotated version of the matcher if a
# message is provided.
matcher = Equals(1)
expected = Annotate('foo', matcher)
annotated = Annotate.if_message('foo', matcher)
self.assertThat(
annotated,
MatchesStructure.fromExample(expected, 'annotation', 'matcher'))
class TestAnnotatedMismatch(TestCase):
run_tests_with = FullStackRunTest
def test_forwards_details(self):
x = Mismatch('description', {'foo': 'bar'})
annotated = AnnotatedMismatch("annotation", x)
self.assertEqual(x.get_details(), annotated.get_details())
class TestNotInterface(TestCase, TestMatchersInterface):
matches_matcher = Not(Equals(1))
matches_matches = [2]
matches_mismatches = [1]
str_examples = [
("Not(Equals(1))", Not(Equals(1))),
("Not(Equals('1'))", Not(Equals('1')))]
describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
def is_even(x):
return x % 2 == 0
class TestMatchesPredicate(TestCase, TestMatchersInterface):
matches_matcher = MatchesPredicate(is_even, "%s is not even")
matches_matches = [2, 4, 6, 8]
matches_mismatches = [3, 5, 7, 9]
str_examples = [
("MatchesPredicate(%r, %r)" % (is_even, "%s is not even"),
MatchesPredicate(is_even, "%s is not even")),
]
describe_examples = [
('7 is not even', 7, MatchesPredicate(is_even, "%s is not even")),
]
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,132 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
"""Tests for matchers."""
from testtools import (
Matcher, # check that Matcher is exposed at the top level for docs.
TestCase,
)
from testtools.compat import (
str_is_unicode,
text_repr,
_u,
)
from testtools.matchers import (
Equals,
MatchesException,
Raises,
)
from testtools.matchers._impl import (
Mismatch,
MismatchDecorator,
MismatchError,
)
from testtools.tests.helpers import FullStackRunTest
# Silence pyflakes.
Matcher
class TestMismatch(TestCase):
run_tests_with = FullStackRunTest
def test_constructor_arguments(self):
mismatch = Mismatch("some description", {'detail': "things"})
self.assertEqual("some description", mismatch.describe())
self.assertEqual({'detail': "things"}, mismatch.get_details())
def test_constructor_no_arguments(self):
mismatch = Mismatch()
self.assertThat(mismatch.describe,
Raises(MatchesException(NotImplementedError)))
self.assertEqual({}, mismatch.get_details())
class TestMismatchError(TestCase):
def test_is_assertion_error(self):
# MismatchError is an AssertionError, so that most of the time, it
# looks like a test failure, rather than an error.
def raise_mismatch_error():
raise MismatchError(2, Equals(3), Equals(3).match(2))
self.assertRaises(AssertionError, raise_mismatch_error)
def test_default_description_is_mismatch(self):
mismatch = Equals(3).match(2)
e = MismatchError(2, Equals(3), mismatch)
self.assertEqual(mismatch.describe(), str(e))
def test_default_description_unicode(self):
matchee = _u('\xa7')
matcher = Equals(_u('a'))
mismatch = matcher.match(matchee)
e = MismatchError(matchee, matcher, mismatch)
self.assertEqual(mismatch.describe(), str(e))
def test_verbose_description(self):
matchee = 2
matcher = Equals(3)
mismatch = matcher.match(2)
e = MismatchError(matchee, matcher, mismatch, True)
expected = (
'Match failed. Matchee: %r\n'
'Matcher: %s\n'
'Difference: %s\n' % (
matchee,
matcher,
matcher.match(matchee).describe(),
))
self.assertEqual(expected, str(e))
def test_verbose_unicode(self):
# When assertThat is given matchees or matchers that contain non-ASCII
# unicode strings, we can still provide a meaningful error.
matchee = _u('\xa7')
matcher = Equals(_u('a'))
mismatch = matcher.match(matchee)
expected = (
'Match failed. Matchee: %s\n'
'Matcher: %s\n'
'Difference: %s\n' % (
text_repr(matchee),
matcher,
mismatch.describe(),
))
e = MismatchError(matchee, matcher, mismatch, True)
if str_is_unicode:
actual = str(e)
else:
actual = unicode(e)
# Using str() should still work, and return ascii only
self.assertEqual(
expected.replace(matchee, matchee.encode("unicode-escape")),
str(e).decode("ascii"))
self.assertEqual(expected, actual)
class TestMismatchDecorator(TestCase):
run_tests_with = FullStackRunTest
def test_forwards_description(self):
x = Mismatch("description", {'foo': 'bar'})
decorated = MismatchDecorator(x)
self.assertEqual(x.describe(), decorated.describe())
def test_forwards_details(self):
x = Mismatch("description", {'foo': 'bar'})
decorated = MismatchDecorator(x)
self.assertEqual(x.get_details(), decorated.get_details())
def test_repr(self):
x = Mismatch("description", {'foo': 'bar'})
decorated = MismatchDecorator(x)
self.assertEqual(
'<testtools.matchers.MismatchDecorator(%r)>' % (x,),
repr(decorated))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,432 +0,0 @@
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Tests for miscellaneous compatibility functions"""
import linecache
import os
import sys
import tempfile
import traceback
import testtools
from testtools.compat import (
_b,
_detect_encoding,
_get_source_encoding,
_u,
reraise,
str_is_unicode,
text_repr,
unicode_output_stream,
)
from testtools.matchers import (
MatchesException,
Not,
Raises,
)
class TestDetectEncoding(testtools.TestCase):
"""Test detection of Python source encodings"""
def _check_encoding(self, expected, lines, possibly_invalid=False):
"""Check lines are valid Python and encoding is as expected"""
if not possibly_invalid:
compile(_b("".join(lines)), "<str>", "exec")
encoding = _detect_encoding(lines)
self.assertEqual(expected, encoding,
"Encoding %r expected but got %r from lines %r" %
(expected, encoding, lines))
def test_examples_from_pep(self):
"""Check the examples given in PEP 263 all work as specified
See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
"""
# With interpreter binary and using Emacs style file encoding comment:
self._check_encoding("latin-1", (
"#!/usr/bin/python\n",
"# -*- coding: latin-1 -*-\n",
"import os, sys\n"))
self._check_encoding("iso-8859-15", (
"#!/usr/bin/python\n",
"# -*- coding: iso-8859-15 -*-\n",
"import os, sys\n"))
self._check_encoding("ascii", (
"#!/usr/bin/python\n",
"# -*- coding: ascii -*-\n",
"import os, sys\n"))
# Without interpreter line, using plain text:
self._check_encoding("utf-8", (
"# This Python file uses the following encoding: utf-8\n",
"import os, sys\n"))
# Text editors might have different ways of defining the file's
# encoding, e.g.
self._check_encoding("latin-1", (
"#!/usr/local/bin/python\n",
"# coding: latin-1\n",
"import os, sys\n"))
# Without encoding comment, Python's parser will assume ASCII text:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"import os, sys\n"))
# Encoding comments which don't work:
# Missing "coding:" prefix:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"# latin-1\n",
"import os, sys\n"))
# Encoding comment not on line 1 or 2:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"#\n",
"# -*- coding: latin-1 -*-\n",
"import os, sys\n"))
# Unsupported encoding:
self._check_encoding("ascii", (
"#!/usr/local/bin/python\n",
"# -*- coding: utf-42 -*-\n",
"import os, sys\n"),
possibly_invalid=True)
def test_bom(self):
"""Test the UTF-8 BOM counts as an encoding declaration"""
self._check_encoding("utf-8", (
"\xef\xbb\xbfimport sys\n",
))
self._check_encoding("utf-8", (
"\xef\xbb\xbf# File encoding: utf-8\n",
))
self._check_encoding("utf-8", (
'\xef\xbb\xbf"""Module docstring\n',
'\xef\xbb\xbfThat should just be a ZWNB"""\n'))
self._check_encoding("latin-1", (
'"""Is this coding: latin-1 or coding: utf-8 instead?\n',
'\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
self._check_encoding("utf-8", (
"\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
'"""Module docstring say \xe2\x98\x86"""\n'))
def test_multiple_coding_comments(self):
"""Test only the first of multiple coding declarations counts"""
self._check_encoding("iso-8859-1", (
"# Is the coding: iso-8859-1\n",
"# Or is it coding: iso-8859-2\n"),
possibly_invalid=True)
self._check_encoding("iso-8859-1", (
"#!/usr/bin/python\n",
"# Is the coding: iso-8859-1\n",
"# Or is it coding: iso-8859-2\n"))
self._check_encoding("iso-8859-1", (
"# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
"# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
possibly_invalid=True)
self._check_encoding("iso-8859-2", (
"# Is the coding iso-8859-1 or coding: iso-8859-2\n",
"# Spot the missing colon above\n"))
class TestGetSourceEncoding(testtools.TestCase):
"""Test reading and caching the encodings of source files"""
def setUp(self):
testtools.TestCase.setUp(self)
dir = tempfile.mkdtemp()
self.addCleanup(os.rmdir, dir)
self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
self._written = False
def put_source(self, text):
f = open(self.filename, "w")
try:
f.write(text)
finally:
f.close()
if not self._written:
self._written = True
self.addCleanup(os.remove, self.filename)
self.addCleanup(linecache.cache.pop, self.filename, None)
def test_nonexistant_file_as_ascii(self):
"""When file can't be found, the encoding should default to ascii"""
self.assertEquals("ascii", _get_source_encoding(self.filename))
def test_encoding_is_cached(self):
"""The encoding should stay the same if the cache isn't invalidated"""
self.put_source(
"# coding: iso-8859-13\n"
"import os\n")
self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
self.put_source(
"# coding: rot-13\n"
"vzcbeg bf\n")
self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
def test_traceback_rechecks_encoding(self):
"""A traceback function checks the cache and resets the encoding"""
self.put_source(
"# coding: iso-8859-8\n"
"import os\n")
self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
self.put_source(
"# coding: utf-8\n"
"import os\n")
try:
exec (compile("raise RuntimeError\n", self.filename, "exec"))
except RuntimeError:
traceback.extract_tb(sys.exc_info()[2])
else:
self.fail("RuntimeError not raised")
self.assertEquals("utf-8", _get_source_encoding(self.filename))
class _FakeOutputStream(object):
"""A simple file-like object for testing"""
def __init__(self):
self.writelog = []
def write(self, obj):
self.writelog.append(obj)
class TestUnicodeOutputStream(testtools.TestCase):
"""Test wrapping output streams so they work with arbitrary unicode"""
uni = _u("pa\u026a\u03b8\u0259n")
def setUp(self):
super(TestUnicodeOutputStream, self).setUp()
if sys.platform == "cli":
self.skip("IronPython shouldn't wrap streams to do encoding")
def test_no_encoding_becomes_ascii(self):
"""A stream with no encoding attribute gets ascii/replace strings"""
sout = _FakeOutputStream()
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_encoding_as_none_becomes_ascii(self):
"""A stream with encoding value of None gets ascii/replace strings"""
sout = _FakeOutputStream()
sout.encoding = None
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_bogus_encoding_becomes_ascii(self):
"""A stream with a bogus encoding gets ascii/replace strings"""
sout = _FakeOutputStream()
sout.encoding = "bogus"
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa???n")], sout.writelog)
def test_partial_encoding_replace(self):
"""A string which can be partly encoded correctly should be"""
sout = _FakeOutputStream()
sout.encoding = "iso-8859-7"
unicode_output_stream(sout).write(self.uni)
self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
@testtools.skipIf(str_is_unicode, "Tests behaviour when str is not unicode")
def test_unicode_encodings_wrapped_when_str_is_not_unicode(self):
"""A unicode encoding is wrapped but needs no error handler"""
sout = _FakeOutputStream()
sout.encoding = "utf-8"
uout = unicode_output_stream(sout)
self.assertEqual(uout.errors, "strict")
uout.write(self.uni)
self.assertEqual([_b("pa\xc9\xaa\xce\xb8\xc9\x99n")], sout.writelog)
@testtools.skipIf(not str_is_unicode, "Tests behaviour when str is unicode")
def test_unicode_encodings_not_wrapped_when_str_is_unicode(self):
# No wrapping needed if native str type is unicode
sout = _FakeOutputStream()
sout.encoding = "utf-8"
uout = unicode_output_stream(sout)
self.assertIs(uout, sout)
def test_stringio(self):
"""A StringIO object should maybe get an ascii native str type"""
try:
from cStringIO import StringIO
newio = False
except ImportError:
from io import StringIO
newio = True
sout = StringIO()
soutwrapper = unicode_output_stream(sout)
if newio:
self.expectFailure("Python 3 StringIO expects text not bytes",
self.assertThat, lambda: soutwrapper.write(self.uni),
Not(Raises(MatchesException(TypeError))))
soutwrapper.write(self.uni)
self.assertEqual("pa???n", sout.getvalue())
class TestTextRepr(testtools.TestCase):
"""Ensure in extending repr, basic behaviours are not being broken"""
ascii_examples = (
# Single character examples
# C0 control codes should be escaped except multiline \n
("\x00", "'\\x00'", "'''\\\n\\x00'''"),
("\b", "'\\x08'", "'''\\\n\\x08'''"),
("\t", "'\\t'", "'''\\\n\\t'''"),
("\n", "'\\n'", "'''\\\n\n'''"),
("\r", "'\\r'", "'''\\\n\\r'''"),
# Quotes and backslash should match normal repr behaviour
('"', "'\"'", "'''\\\n\"'''"),
("'", "\"'\"", "'''\\\n\\''''"),
("\\", "'\\\\'", "'''\\\n\\\\'''"),
# DEL is also unprintable and should be escaped
("\x7F", "'\\x7f'", "'''\\\n\\x7f'''"),
# Character combinations that need double checking
("\r\n", "'\\r\\n'", "'''\\\n\\r\n'''"),
("\"'", "'\"\\''", "'''\\\n\"\\''''"),
("'\"", "'\\'\"'", "'''\\\n'\"'''"),
("\\n", "'\\\\n'", "'''\\\n\\\\n'''"),
("\\\n", "'\\\\\\n'", "'''\\\n\\\\\n'''"),
("\\' ", "\"\\\\' \"", "'''\\\n\\\\' '''"),
("\\'\n", "\"\\\\'\\n\"", "'''\\\n\\\\'\n'''"),
("\\'\"", "'\\\\\\'\"'", "'''\\\n\\\\'\"'''"),
("\\'''", "\"\\\\'''\"", "'''\\\n\\\\\\'\\'\\''''"),
)
# Bytes with the high bit set should always be escaped
bytes_examples = (
(_b("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
(_b("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
(_b("\xC0"), "'\\xc0'", "'''\\\n\\xc0'''"),
(_b("\xFF"), "'\\xff'", "'''\\\n\\xff'''"),
(_b("\xC2\xA7"), "'\\xc2\\xa7'", "'''\\\n\\xc2\\xa7'''"),
)
# Unicode doesn't escape printable characters as per the Python 3 model
unicode_examples = (
# C1 codes are unprintable
(_u("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
(_u("\x9F"), "'\\x9f'", "'''\\\n\\x9f'''"),
# No-break space is unprintable
(_u("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
# Letters latin alphabets are printable
(_u("\xA1"), _u("'\xa1'"), _u("'''\\\n\xa1'''")),
(_u("\xFF"), _u("'\xff'"), _u("'''\\\n\xff'''")),
(_u("\u0100"), _u("'\u0100'"), _u("'''\\\n\u0100'''")),
# Line and paragraph seperators are unprintable
(_u("\u2028"), "'\\u2028'", "'''\\\n\\u2028'''"),
(_u("\u2029"), "'\\u2029'", "'''\\\n\\u2029'''"),
# Unpaired surrogates are unprintable
(_u("\uD800"), "'\\ud800'", "'''\\\n\\ud800'''"),
(_u("\uDFFF"), "'\\udfff'", "'''\\\n\\udfff'''"),
# Unprintable general categories not fully tested: Cc, Cf, Co, Cn, Zs
)
b_prefix = repr(_b(""))[:-2]
u_prefix = repr(_u(""))[:-2]
def test_ascii_examples_oneline_bytes(self):
for s, expected, _ in self.ascii_examples:
b = _b(s)
actual = text_repr(b, multiline=False)
# Add self.assertIsInstance check?
self.assertEqual(actual, self.b_prefix + expected)
self.assertEqual(eval(actual), b)
def test_ascii_examples_oneline_unicode(self):
for s, expected, _ in self.ascii_examples:
u = _u(s)
actual = text_repr(u, multiline=False)
self.assertEqual(actual, self.u_prefix + expected)
self.assertEqual(eval(actual), u)
def test_ascii_examples_multiline_bytes(self):
for s, _, expected in self.ascii_examples:
b = _b(s)
actual = text_repr(b, multiline=True)
self.assertEqual(actual, self.b_prefix + expected)
self.assertEqual(eval(actual), b)
def test_ascii_examples_multiline_unicode(self):
for s, _, expected in self.ascii_examples:
u = _u(s)
actual = text_repr(u, multiline=True)
self.assertEqual(actual, self.u_prefix + expected)
self.assertEqual(eval(actual), u)
def test_ascii_examples_defaultline_bytes(self):
for s, one, multi in self.ascii_examples:
expected = "\n" in s and multi or one
self.assertEqual(text_repr(_b(s)), self.b_prefix + expected)
def test_ascii_examples_defaultline_unicode(self):
for s, one, multi in self.ascii_examples:
expected = "\n" in s and multi or one
self.assertEqual(text_repr(_u(s)), self.u_prefix + expected)
def test_bytes_examples_oneline(self):
for b, expected, _ in self.bytes_examples:
actual = text_repr(b, multiline=False)
self.assertEqual(actual, self.b_prefix + expected)
self.assertEqual(eval(actual), b)
def test_bytes_examples_multiline(self):
for b, _, expected in self.bytes_examples:
actual = text_repr(b, multiline=True)
self.assertEqual(actual, self.b_prefix + expected)
self.assertEqual(eval(actual), b)
def test_unicode_examples_oneline(self):
for u, expected, _ in self.unicode_examples:
actual = text_repr(u, multiline=False)
self.assertEqual(actual, self.u_prefix + expected)
self.assertEqual(eval(actual), u)
def test_unicode_examples_multiline(self):
for u, _, expected in self.unicode_examples:
actual = text_repr(u, multiline=True)
self.assertEqual(actual, self.u_prefix + expected)
self.assertEqual(eval(actual), u)
class TestReraise(testtools.TestCase):
"""Tests for trivial reraise wrapper needed for Python 2/3 changes"""
def test_exc_info(self):
"""After reraise exc_info matches plus some extra traceback"""
try:
raise ValueError("Bad value")
except ValueError:
_exc_info = sys.exc_info()
try:
reraise(*_exc_info)
except ValueError:
_new_exc_info = sys.exc_info()
self.assertIs(_exc_info[0], _new_exc_info[0])
self.assertIs(_exc_info[1], _new_exc_info[1])
expected_tb = traceback.extract_tb(_exc_info[2])
self.assertEqual(expected_tb,
traceback.extract_tb(_new_exc_info[2])[-len(expected_tb):])
def test_custom_exception_no_args(self):
"""Reraising does not require args attribute to contain params"""
class CustomException(Exception):
"""Exception that expects and sets attrs but not args"""
def __init__(self, value):
Exception.__init__(self)
self.value = value
try:
raise CustomException("Some value")
except CustomException:
_exc_info = sys.exc_info()
self.assertRaises(CustomException, reraise, *_exc_info)
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,277 +0,0 @@
# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
import json
import os
import tempfile
import unittest
from testtools import TestCase
from testtools.compat import (
_b,
_u,
BytesIO,
StringIO,
)
from testtools.content import (
attach_file,
Content,
content_from_file,
content_from_stream,
JSON,
json_content,
TracebackContent,
text_content,
)
from testtools.content_type import (
ContentType,
UTF8_TEXT,
)
from testtools.matchers import (
Equals,
MatchesException,
Raises,
raises,
)
from testtools.tests.helpers import an_exc_info
raises_value_error = Raises(MatchesException(ValueError))
class TestContent(TestCase):
def test___init___None_errors(self):
self.assertThat(lambda: Content(None, None), raises_value_error)
self.assertThat(
lambda: Content(None, lambda: ["traceback"]), raises_value_error)
self.assertThat(
lambda: Content(ContentType("text", "traceback"), None),
raises_value_error)
def test___init___sets_ivars(self):
content_type = ContentType("foo", "bar")
content = Content(content_type, lambda: ["bytes"])
self.assertEqual(content_type, content.content_type)
self.assertEqual(["bytes"], list(content.iter_bytes()))
def test___eq__(self):
content_type = ContentType("foo", "bar")
one_chunk = lambda: [_b("bytes")]
two_chunk = lambda: [_b("by"), _b("tes")]
content1 = Content(content_type, one_chunk)
content2 = Content(content_type, one_chunk)
content3 = Content(content_type, two_chunk)
content4 = Content(content_type, lambda: [_b("by"), _b("te")])
content5 = Content(ContentType("f", "b"), two_chunk)
self.assertEqual(content1, content2)
self.assertEqual(content1, content3)
self.assertNotEqual(content1, content4)
self.assertNotEqual(content1, content5)
def test___repr__(self):
content = Content(ContentType("application", "octet-stream"),
lambda: [_b("\x00bin"), _b("ary\xff")])
self.assertIn("\\x00binary\\xff", repr(content))
def test_iter_text_not_text_errors(self):
content_type = ContentType("foo", "bar")
content = Content(content_type, lambda: ["bytes"])
self.assertThat(content.iter_text, raises_value_error)
def test_iter_text_decodes(self):
content_type = ContentType("text", "strange", {"charset": "utf8"})
content = Content(
content_type, lambda: [_u("bytes\xea").encode("utf8")])
self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
def test_iter_text_default_charset_iso_8859_1(self):
content_type = ContentType("text", "strange")
text = _u("bytes\xea")
iso_version = text.encode("ISO-8859-1")
content = Content(content_type, lambda: [iso_version])
self.assertEqual([text], list(content.iter_text()))
def test_as_text(self):
content_type = ContentType("text", "strange", {"charset": "utf8"})
content = Content(
content_type, lambda: [_u("bytes\xea").encode("utf8")])
self.assertEqual(_u("bytes\xea"), content.as_text())
def test_from_file(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
os.write(fd, _b('some data'))
os.close(fd)
content = content_from_file(path, UTF8_TEXT, chunk_size=2)
self.assertThat(
list(content.iter_bytes()),
Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')]))
def test_from_nonexistent_file(self):
directory = tempfile.mkdtemp()
nonexistent = os.path.join(directory, 'nonexistent-file')
content = content_from_file(nonexistent)
self.assertThat(content.iter_bytes, raises(IOError))
def test_from_file_default_type(self):
content = content_from_file('/nonexistent/path')
self.assertThat(content.content_type, Equals(UTF8_TEXT))
def test_from_file_eager_loading(self):
fd, path = tempfile.mkstemp()
os.write(fd, _b('some data'))
os.close(fd)
content = content_from_file(path, UTF8_TEXT, buffer_now=True)
os.remove(path)
self.assertThat(
''.join(content.iter_text()), Equals('some data'))
def test_from_file_with_simple_seek(self):
f = tempfile.NamedTemporaryFile()
f.write(_b('some data'))
f.flush()
self.addCleanup(f.close)
content = content_from_file(
f.name, UTF8_TEXT, chunk_size=50, seek_offset=5)
self.assertThat(
list(content.iter_bytes()), Equals([_b('data')]))
def test_from_file_with_whence_seek(self):
f = tempfile.NamedTemporaryFile()
f.write(_b('some data'))
f.flush()
self.addCleanup(f.close)
content = content_from_file(
f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
self.assertThat(
list(content.iter_bytes()), Equals([_b('data')]))
def test_from_stream(self):
data = StringIO('some data')
content = content_from_stream(data, UTF8_TEXT, chunk_size=2)
self.assertThat(
list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a']))
def test_from_stream_default_type(self):
data = StringIO('some data')
content = content_from_stream(data)
self.assertThat(content.content_type, Equals(UTF8_TEXT))
def test_from_stream_eager_loading(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
self.addCleanup(os.close, fd)
os.write(fd, _b('some data'))
stream = open(path, 'rb')
self.addCleanup(stream.close)
content = content_from_stream(stream, UTF8_TEXT, buffer_now=True)
os.write(fd, _b('more data'))
self.assertThat(
''.join(content.iter_text()), Equals('some data'))
def test_from_stream_with_simple_seek(self):
data = BytesIO(_b('some data'))
content = content_from_stream(
data, UTF8_TEXT, chunk_size=50, seek_offset=5)
self.assertThat(
list(content.iter_bytes()), Equals([_b('data')]))
def test_from_stream_with_whence_seek(self):
data = BytesIO(_b('some data'))
content = content_from_stream(
data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
self.assertThat(
list(content.iter_bytes()), Equals([_b('data')]))
def test_from_text(self):
data = _u("some data")
expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
self.assertEqual(expected, text_content(data))
def test_json_content(self):
data = {'foo': 'bar'}
expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
self.assertEqual(expected, json_content(data))
class TestTracebackContent(TestCase):
def test___init___None_errors(self):
self.assertThat(
lambda: TracebackContent(None, None), raises_value_error)
def test___init___sets_ivars(self):
content = TracebackContent(an_exc_info, self)
content_type = ContentType("text", "x-traceback",
{"language": "python", "charset": "utf8"})
self.assertEqual(content_type, content.content_type)
result = unittest.TestResult()
expected = result._exc_info_to_string(an_exc_info, self)
self.assertEqual(expected, ''.join(list(content.iter_text())))
class TestAttachFile(TestCase):
def make_file(self, data):
# GZ 2011-04-21: This helper could be useful for methods above trying
# to use mkstemp, but should handle write failures and
# always close the fd. There must be a better way.
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
os.write(fd, _b(data))
os.close(fd)
return path
def test_simple(self):
class SomeTest(TestCase):
def test_foo(self):
pass
test = SomeTest('test_foo')
data = 'some data'
path = self.make_file(data)
my_content = text_content(data)
attach_file(test, path, name='foo')
self.assertEqual({'foo': my_content}, test.getDetails())
def test_optional_name(self):
# If no name is provided, attach_file just uses the base name of the
# file.
class SomeTest(TestCase):
def test_foo(self):
pass
test = SomeTest('test_foo')
path = self.make_file('some data')
base_path = os.path.basename(path)
attach_file(test, path)
self.assertEqual([base_path], list(test.getDetails()))
def test_lazy_read(self):
class SomeTest(TestCase):
def test_foo(self):
pass
test = SomeTest('test_foo')
path = self.make_file('some data')
attach_file(test, path, name='foo', buffer_now=False)
content = test.getDetails()['foo']
content_file = open(path, 'w')
content_file.write('new data')
content_file.close()
self.assertEqual(''.join(content.iter_text()), 'new data')
def test_eager_read_by_default(self):
class SomeTest(TestCase):
def test_foo(self):
pass
test = SomeTest('test_foo')
path = self.make_file('some data')
attach_file(test, path, name='foo')
content = test.getDetails()['foo']
content_file = open(path, 'w')
content_file.write('new data')
content_file.close()
self.assertEqual(''.join(content.iter_text()), 'some data')
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,66 +0,0 @@
# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
from testtools import TestCase
from testtools.matchers import Equals, MatchesException, Raises
from testtools.content_type import (
ContentType,
JSON,
UTF8_TEXT,
)
class TestContentType(TestCase):
def test___init___None_errors(self):
raises_value_error = Raises(MatchesException(ValueError))
self.assertThat(lambda:ContentType(None, None), raises_value_error)
self.assertThat(lambda:ContentType(None, "traceback"),
raises_value_error)
self.assertThat(lambda:ContentType("text", None), raises_value_error)
def test___init___sets_ivars(self):
content_type = ContentType("foo", "bar")
self.assertEqual("foo", content_type.type)
self.assertEqual("bar", content_type.subtype)
self.assertEqual({}, content_type.parameters)
def test___init___with_parameters(self):
content_type = ContentType("foo", "bar", {"quux": "thing"})
self.assertEqual({"quux": "thing"}, content_type.parameters)
def test___eq__(self):
content_type1 = ContentType("foo", "bar", {"quux": "thing"})
content_type2 = ContentType("foo", "bar", {"quux": "thing"})
content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
self.assertTrue(content_type1.__eq__(content_type2))
self.assertFalse(content_type1.__eq__(content_type3))
def test_basic_repr(self):
content_type = ContentType('text', 'plain')
self.assertThat(repr(content_type), Equals('text/plain'))
def test_extended_repr(self):
content_type = ContentType(
'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
self.assertThat(
repr(content_type), Equals('text/plain; baz="qux", foo="bar"'))
class TestBuiltinContentTypes(TestCase):
def test_plain_text(self):
# The UTF8_TEXT content type represents UTF-8 encoded text/plain.
self.assertThat(UTF8_TEXT.type, Equals('text'))
self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
def test_json_content(self):
# The JSON content type represents implictly UTF-8 application/json.
self.assertThat(JSON.type, Equals('application'))
self.assertThat(JSON.subtype, Equals('json'))
self.assertThat(JSON.parameters, Equals({}))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,766 +0,0 @@
# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
"""Tests for the DeferredRunTest single test execution logic."""
import os
import signal
from testtools import (
skipIf,
TestCase,
TestResult,
)
from testtools.content import (
text_content,
)
from testtools.helpers import try_import
from testtools.matchers import (
Equals,
KeysEqual,
MatchesException,
Raises,
)
from testtools.runtest import RunTest
from testtools.testresult.doubles import ExtendedTestResult
from testtools.tests.test_spinner import NeedsTwistedTestCase
assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
AsynchronousDeferredRunTest = try_import(
'testtools.deferredruntest.AsynchronousDeferredRunTest')
flush_logged_errors = try_import(
'testtools.deferredruntest.flush_logged_errors')
SynchronousDeferredRunTest = try_import(
'testtools.deferredruntest.SynchronousDeferredRunTest')
defer = try_import('twisted.internet.defer')
failure = try_import('twisted.python.failure')
log = try_import('twisted.python.log')
DelayedCall = try_import('twisted.internet.base.DelayedCall')
class X(object):
"""Tests that we run as part of our tests, nested to avoid discovery."""
class Base(TestCase):
def setUp(self):
super(X.Base, self).setUp()
self.calls = ['setUp']
self.addCleanup(self.calls.append, 'clean-up')
def test_something(self):
self.calls.append('test')
def tearDown(self):
self.calls.append('tearDown')
super(X.Base, self).tearDown()
class ErrorInSetup(Base):
expected_calls = ['setUp', 'clean-up']
expected_results = [('addError', RuntimeError)]
def setUp(self):
super(X.ErrorInSetup, self).setUp()
raise RuntimeError("Error in setUp")
class ErrorInTest(Base):
expected_calls = ['setUp', 'tearDown', 'clean-up']
expected_results = [('addError', RuntimeError)]
def test_something(self):
raise RuntimeError("Error in test")
class FailureInTest(Base):
expected_calls = ['setUp', 'tearDown', 'clean-up']
expected_results = [('addFailure', AssertionError)]
def test_something(self):
self.fail("test failed")
class ErrorInTearDown(Base):
expected_calls = ['setUp', 'test', 'clean-up']
expected_results = [('addError', RuntimeError)]
def tearDown(self):
raise RuntimeError("Error in tearDown")
class ErrorInCleanup(Base):
expected_calls = ['setUp', 'test', 'tearDown', 'clean-up']
expected_results = [('addError', ZeroDivisionError)]
def test_something(self):
self.calls.append('test')
self.addCleanup(lambda: 1/0)
class TestIntegration(NeedsTwistedTestCase):
def assertResultsMatch(self, test, result):
events = list(result._events)
self.assertEqual(('startTest', test), events.pop(0))
for expected_result in test.expected_results:
result = events.pop(0)
if len(expected_result) == 1:
self.assertEqual((expected_result[0], test), result)
else:
self.assertEqual((expected_result[0], test), result[:2])
error_type = expected_result[1]
self.assertIn(error_type.__name__, str(result[2]))
self.assertEqual([('stopTest', test)], events)
def test_runner(self):
result = ExtendedTestResult()
test = self.test_factory('test_something', runTest=self.runner)
test.run(result)
self.assertEqual(test.calls, self.test_factory.expected_calls)
self.assertResultsMatch(test, result)
def make_integration_tests():
from unittest import TestSuite
from testtools import clone_test_with_new_id
runners = [
('RunTest', RunTest),
('SynchronousDeferredRunTest', SynchronousDeferredRunTest),
('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest),
]
tests = [
X.ErrorInSetup,
X.ErrorInTest,
X.ErrorInTearDown,
X.FailureInTest,
X.ErrorInCleanup,
]
base_test = X.TestIntegration('test_runner')
integration_tests = []
for runner_name, runner in runners:
for test in tests:
new_test = clone_test_with_new_id(
base_test, '%s(%s, %s)' % (
base_test.id(),
runner_name,
test.__name__))
new_test.test_factory = test
new_test.runner = runner
integration_tests.append(new_test)
return TestSuite(integration_tests)
class TestSynchronousDeferredRunTest(NeedsTwistedTestCase):
def make_result(self):
return ExtendedTestResult()
def make_runner(self, test):
return SynchronousDeferredRunTest(test, test.exception_handlers)
def test_success(self):
class SomeCase(TestCase):
def test_success(self):
return defer.succeed(None)
test = SomeCase('test_success')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
result._events, Equals([
('startTest', test),
('addSuccess', test),
('stopTest', test)]))
def test_failure(self):
class SomeCase(TestCase):
def test_failure(self):
return defer.maybeDeferred(self.fail, "Egads!")
test = SomeCase('test_failure')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events], Equals([
('startTest', test),
('addFailure', test),
('stopTest', test)]))
def test_setUp_followed_by_test(self):
class SomeCase(TestCase):
def setUp(self):
super(SomeCase, self).setUp()
return defer.succeed(None)
def test_failure(self):
return defer.maybeDeferred(self.fail, "Egads!")
test = SomeCase('test_failure')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events], Equals([
('startTest', test),
('addFailure', test),
('stopTest', test)]))
class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
def make_reactor(self):
from twisted.internet import reactor
return reactor
def make_result(self):
return ExtendedTestResult()
def make_runner(self, test, timeout=None):
if timeout is None:
timeout = self.make_timeout()
return AsynchronousDeferredRunTest(
test, test.exception_handlers, timeout=timeout)
def make_timeout(self):
return 0.005
def test_setUp_returns_deferred_that_fires_later(self):
# setUp can return a Deferred that might fire at any time.
# AsynchronousDeferredRunTest will not go on to running the test until
# the Deferred returned by setUp actually fires.
call_log = []
marker = object()
d = defer.Deferred().addCallback(call_log.append)
class SomeCase(TestCase):
def setUp(self):
super(SomeCase, self).setUp()
call_log.append('setUp')
return d
def test_something(self):
call_log.append('test')
def fire_deferred():
self.assertThat(call_log, Equals(['setUp']))
d.callback(marker)
test = SomeCase('test_something')
timeout = self.make_timeout()
runner = self.make_runner(test, timeout=timeout)
result = self.make_result()
reactor = self.make_reactor()
reactor.callLater(timeout, fire_deferred)
runner.run(result)
self.assertThat(call_log, Equals(['setUp', marker, 'test']))
def test_calls_setUp_test_tearDown_in_sequence(self):
# setUp, the test method and tearDown can all return
# Deferreds. AsynchronousDeferredRunTest will make sure that each of
# these are run in turn, only going on to the next stage once the
# Deferred from the previous stage has fired.
call_log = []
a = defer.Deferred()
a.addCallback(lambda x: call_log.append('a'))
b = defer.Deferred()
b.addCallback(lambda x: call_log.append('b'))
c = defer.Deferred()
c.addCallback(lambda x: call_log.append('c'))
class SomeCase(TestCase):
def setUp(self):
super(SomeCase, self).setUp()
call_log.append('setUp')
return a
def test_success(self):
call_log.append('test')
return b
def tearDown(self):
super(SomeCase, self).tearDown()
call_log.append('tearDown')
return c
test = SomeCase('test_success')
timeout = self.make_timeout()
runner = self.make_runner(test, timeout)
result = self.make_result()
reactor = self.make_reactor()
def fire_a():
self.assertThat(call_log, Equals(['setUp']))
a.callback(None)
def fire_b():
self.assertThat(call_log, Equals(['setUp', 'a', 'test']))
b.callback(None)
def fire_c():
self.assertThat(
call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown']))
c.callback(None)
reactor.callLater(timeout * 0.25, fire_a)
reactor.callLater(timeout * 0.5, fire_b)
reactor.callLater(timeout * 0.75, fire_c)
runner.run(result)
self.assertThat(
call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c']))
def test_async_cleanups(self):
# Cleanups added with addCleanup can return
# Deferreds. AsynchronousDeferredRunTest will run each of them in
# turn.
class SomeCase(TestCase):
def test_whatever(self):
pass
test = SomeCase('test_whatever')
call_log = []
a = defer.Deferred().addCallback(lambda x: call_log.append('a'))
b = defer.Deferred().addCallback(lambda x: call_log.append('b'))
c = defer.Deferred().addCallback(lambda x: call_log.append('c'))
test.addCleanup(lambda: a)
test.addCleanup(lambda: b)
test.addCleanup(lambda: c)
def fire_a():
self.assertThat(call_log, Equals([]))
a.callback(None)
def fire_b():
self.assertThat(call_log, Equals(['a']))
b.callback(None)
def fire_c():
self.assertThat(call_log, Equals(['a', 'b']))
c.callback(None)
timeout = self.make_timeout()
reactor = self.make_reactor()
reactor.callLater(timeout * 0.25, fire_a)
reactor.callLater(timeout * 0.5, fire_b)
reactor.callLater(timeout * 0.75, fire_c)
runner = self.make_runner(test, timeout)
result = self.make_result()
runner.run(result)
self.assertThat(call_log, Equals(['a', 'b', 'c']))
def test_clean_reactor(self):
# If there's cruft left over in the reactor, the test fails.
reactor = self.make_reactor()
timeout = self.make_timeout()
class SomeCase(TestCase):
def test_cruft(self):
reactor.callLater(timeout * 10.0, lambda: None)
test = SomeCase('test_cruft')
runner = self.make_runner(test, timeout)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals(
[('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
def test_exports_reactor(self):
# The reactor is set as an attribute on the test case.
reactor = self.make_reactor()
timeout = self.make_timeout()
class SomeCase(TestCase):
def test_cruft(self):
self.assertIs(reactor, self.reactor)
test = SomeCase('test_cruft')
runner = self.make_runner(test, timeout)
result = TestResult()
runner.run(result)
self.assertEqual([], result.errors)
self.assertEqual([], result.failures)
def test_unhandled_error_from_deferred(self):
# If there's a Deferred with an unhandled error, the test fails. Each
# unhandled error is reported with a separate traceback.
class SomeCase(TestCase):
def test_cruft(self):
# Note we aren't returning the Deferred so that the error will
# be unhandled.
defer.maybeDeferred(lambda: 1/0)
defer.maybeDeferred(lambda: 2/0)
test = SomeCase('test_cruft')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
error = result._events[1][2]
result._events[1] = ('addError', test, None)
self.assertThat(result._events, Equals(
[('startTest', test),
('addError', test, None),
('stopTest', test)]))
self.assertThat(
error, KeysEqual(
'twisted-log',
'unhandled-error-in-deferred',
'unhandled-error-in-deferred-1',
))
def test_unhandled_error_from_deferred_combined_with_error(self):
# If there's a Deferred with an unhandled error, the test fails. Each
# unhandled error is reported with a separate traceback, and the error
# is still reported.
class SomeCase(TestCase):
def test_cruft(self):
# Note we aren't returning the Deferred so that the error will
# be unhandled.
defer.maybeDeferred(lambda: 1/0)
2 / 0
test = SomeCase('test_cruft')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
error = result._events[1][2]
result._events[1] = ('addError', test, None)
self.assertThat(result._events, Equals(
[('startTest', test),
('addError', test, None),
('stopTest', test)]))
self.assertThat(
error, KeysEqual(
'traceback',
'twisted-log',
'unhandled-error-in-deferred',
))
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_keyboard_interrupt_stops_test_run(self):
# If we get a SIGINT during a test run, the test stops and no more
# tests run.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
raise self.skipTest("SIGINT unavailable")
class SomeCase(TestCase):
def test_pause(self):
return defer.Deferred()
test = SomeCase('test_pause')
reactor = self.make_reactor()
timeout = self.make_timeout()
runner = self.make_runner(test, timeout * 5)
result = self.make_result()
reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:runner.run(result),
Raises(MatchesException(KeyboardInterrupt)))
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_keyboard_interrupt_stops_test_run(self):
# If we get a SIGINT during a test run, the test stops and no more
# tests run.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
raise self.skipTest("SIGINT unavailable")
class SomeCase(TestCase):
def test_pause(self):
return defer.Deferred()
test = SomeCase('test_pause')
reactor = self.make_reactor()
timeout = self.make_timeout()
runner = self.make_runner(test, timeout * 5)
result = self.make_result()
reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:runner.run(result),
Raises(MatchesException(KeyboardInterrupt)))
def test_timeout_causes_test_error(self):
# If a test times out, it reports itself as having failed with a
# TimeoutError.
class SomeCase(TestCase):
def test_pause(self):
return defer.Deferred()
test = SomeCase('test_pause')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
error = result._events[1][2]
self.assertThat(
[event[:2] for event in result._events], Equals(
[('startTest', test),
('addError', test),
('stopTest', test)]))
self.assertIn('TimeoutError', str(error['traceback']))
def test_convenient_construction(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
reactor = object()
timeout = object()
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout)
runner = factory(self, [handler])
self.assertIs(reactor, runner._reactor)
self.assertIs(timeout, runner._timeout)
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
def test_use_convenient_factory(self):
# Make sure that the factory can actually be used.
factory = AsynchronousDeferredRunTest.make_factory()
class SomeCase(TestCase):
run_tests_with = factory
def test_something(self):
pass
case = SomeCase('test_something')
case.run()
def test_convenient_construction_default_reactor(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
reactor = object()
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor)
runner = factory(self, [handler])
self.assertIs(reactor, runner._reactor)
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
def test_convenient_construction_default_timeout(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
timeout = object()
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout)
runner = factory(self, [handler])
self.assertIs(timeout, runner._timeout)
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
def test_convenient_construction_default_debugging(self):
# As a convenience method, AsynchronousDeferredRunTest has a
# classmethod that returns an AsynchronousDeferredRunTest
# factory. This factory has the same API as the RunTest constructor.
handler = object()
factory = AsynchronousDeferredRunTest.make_factory(debug=True)
runner = factory(self, [handler])
self.assertIs(self, runner.case)
self.assertEqual([handler], runner.handlers)
self.assertEqual(True, runner._debug)
def test_deferred_error(self):
class SomeTest(TestCase):
def test_something(self):
return defer.maybeDeferred(lambda: 1/0)
test = SomeTest('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
def test_only_addError_once(self):
# Even if the reactor is unclean and the test raises an error and the
# cleanups raise errors, we only called addError once per test.
reactor = self.make_reactor()
class WhenItRains(TestCase):
def it_pours(self):
# Add a dirty cleanup.
self.addCleanup(lambda: 3 / 0)
# Dirty the reactor.
from twisted.internet.protocol import ServerFactory
reactor.listenTCP(0, ServerFactory())
# Unhandled error.
defer.maybeDeferred(lambda: 2 / 0)
# Actual error.
raise RuntimeError("Excess precipitation")
test = WhenItRains('it_pours')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(
error, KeysEqual(
'traceback',
'traceback-1',
'traceback-2',
'twisted-log',
'unhandled-error-in-deferred',
))
def test_log_err_is_error(self):
# An error logged during the test run is recorded as an error in the
# tests.
class LogAnError(TestCase):
def test_something(self):
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
log.err(f)
test = LogAnError('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('logged-error', 'twisted-log'))
def test_log_err_flushed_is_success(self):
# An error logged during the test run is recorded as an error in the
# tests.
class LogAnError(TestCase):
def test_something(self):
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
log.err(f)
flush_logged_errors(ZeroDivisionError)
test = LogAnError('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
result._events,
Equals([
('startTest', test),
('addSuccess', test, {'twisted-log': text_content('')}),
('stopTest', test)]))
def test_log_in_details(self):
class LogAnError(TestCase):
def test_something(self):
log.msg("foo")
1/0
test = LogAnError('test_something')
runner = self.make_runner(test)
result = self.make_result()
runner.run(result)
self.assertThat(
[event[:2] for event in result._events],
Equals([
('startTest', test),
('addError', test),
('stopTest', test)]))
error = result._events[1][2]
self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
def test_debugging_unchanged_during_test_by_default(self):
debugging = [(defer.Deferred.debug, DelayedCall.debug)]
class SomeCase(TestCase):
def test_debugging_enabled(self):
debugging.append((defer.Deferred.debug, DelayedCall.debug))
test = SomeCase('test_debugging_enabled')
runner = AsynchronousDeferredRunTest(
test, handlers=test.exception_handlers,
reactor=self.make_reactor(), timeout=self.make_timeout())
runner.run(self.make_result())
self.assertEqual(debugging[0], debugging[1])
def test_debugging_enabled_during_test_with_debug_flag(self):
self.patch(defer.Deferred, 'debug', False)
self.patch(DelayedCall, 'debug', False)
debugging = []
class SomeCase(TestCase):
def test_debugging_enabled(self):
debugging.append((defer.Deferred.debug, DelayedCall.debug))
test = SomeCase('test_debugging_enabled')
runner = AsynchronousDeferredRunTest(
test, handlers=test.exception_handlers,
reactor=self.make_reactor(), timeout=self.make_timeout(),
debug=True)
runner.run(self.make_result())
self.assertEqual([(True, True)], debugging)
self.assertEqual(False, defer.Deferred.debug)
self.assertEqual(False, defer.Deferred.debug)
class TestAssertFailsWith(NeedsTwistedTestCase):
"""Tests for `assert_fails_with`."""
if SynchronousDeferredRunTest is not None:
run_tests_with = SynchronousDeferredRunTest
def test_assert_fails_with_success(self):
# assert_fails_with fails the test if it's given a Deferred that
# succeeds.
marker = object()
d = assert_fails_with(defer.succeed(marker), RuntimeError)
def check_result(failure):
failure.trap(self.failureException)
self.assertThat(
str(failure.value),
Equals("RuntimeError not raised (%r returned)" % (marker,)))
d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
return d
def test_assert_fails_with_success_multiple_types(self):
# assert_fails_with fails the test if it's given a Deferred that
# succeeds.
marker = object()
d = assert_fails_with(
defer.succeed(marker), RuntimeError, ZeroDivisionError)
def check_result(failure):
failure.trap(self.failureException)
self.assertThat(
str(failure.value),
Equals("RuntimeError, ZeroDivisionError not raised "
"(%r returned)" % (marker,)))
d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
return d
def test_assert_fails_with_wrong_exception(self):
# assert_fails_with fails the test if it's given a Deferred that
# succeeds.
d = assert_fails_with(
defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt)
def check_result(failure):
failure.trap(self.failureException)
lines = str(failure.value).splitlines()
self.assertThat(
lines[:2],
Equals([
("ZeroDivisionError raised instead of RuntimeError, "
"KeyboardInterrupt:"),
" Traceback (most recent call last):",
]))
d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
return d
def test_assert_fails_with_expected_exception(self):
# assert_fails_with calls back with the value of the failure if it's
# one of the expected types of failures.
try:
1/0
except ZeroDivisionError:
f = failure.Failure()
d = assert_fails_with(defer.fail(f), ZeroDivisionError)
return d.addCallback(self.assertThat, Equals(f.value))
def test_custom_failure_exception(self):
# If assert_fails_with is passed a 'failureException' keyword
# argument, then it will raise that instead of `AssertionError`.
class CustomException(Exception):
pass
marker = object()
d = assert_fails_with(
defer.succeed(marker), RuntimeError,
failureException=CustomException)
def check_result(failure):
failure.trap(CustomException)
self.assertThat(
str(failure.value),
Equals("RuntimeError not raised (%r returned)" % (marker,)))
return d.addCallbacks(
lambda x: self.fail("Should not have succeeded"), check_result)
class TestRunWithLogObservers(NeedsTwistedTestCase):
def test_restores_observers(self):
from testtools.deferredruntest import run_with_log_observers
from twisted.python import log
# Make sure there's at least one observer. This reproduces bug
# #926189.
log.addObserver(lambda *args: None)
observers = list(log.theLogPublisher.observers)
run_with_log_observers([], lambda: None)
self.assertEqual(observers, log.theLogPublisher.observers)
def test_suite():
from unittest import TestLoader, TestSuite
return TestSuite(
[TestLoader().loadTestsFromName(__name__),
make_integration_tests()])

View File

@ -1,99 +0,0 @@
# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details.
"""Tests for the distutils test command logic."""
from distutils.dist import Distribution
from testtools.compat import (
_b,
_u,
BytesIO,
)
from testtools.helpers import try_import
fixtures = try_import('fixtures')
import testtools
from testtools import TestCase
from testtools.distutilscmd import TestCommand
from testtools.matchers import MatchesRegex
if fixtures:
class SampleTestFixture(fixtures.Fixture):
"""Creates testtools.runexample temporarily."""
def __init__(self):
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', _b("""
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
"""))])
def setUp(self):
super(SampleTestFixture, self).setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
class TestCommandTest(TestCase):
def setUp(self):
super(TestCommandTest, self).setUp()
if fixtures is None:
self.skipTest("Need fixtures")
def test_test_module(self):
self.useFixture(SampleTestFixture())
stdout = self.useFixture(fixtures.StringStream('stdout'))
dist = Distribution()
dist.script_name = 'setup.py'
dist.script_args = ['test']
dist.cmdclass = {'test': TestCommand}
dist.command_options = {
'test': {'test_module': ('command line', 'testtools.runexample')}}
cmd = dist.reinitialize_command('test')
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
dist.run_command('test')
self.assertThat(
stdout.getDetails()['stdout'].as_text(),
MatchesRegex(_u("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
""")))
def test_test_suite(self):
self.useFixture(SampleTestFixture())
stdout = self.useFixture(fixtures.StringStream('stdout'))
dist = Distribution()
dist.script_name = 'setup.py'
dist.script_args = ['test']
dist.cmdclass = {'test': TestCommand}
dist.command_options = {
'test': {
'test_suite': (
'command line', 'testtools.runexample.test_suite')}}
cmd = dist.reinitialize_command('test')
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
dist.run_command('test')
self.assertThat(
stdout.getDetails()['stdout'].as_text(),
MatchesRegex(_u("""Tests running...
Ran 2 tests in \\d.\\d\\d\\ds
OK
""")))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,117 +0,0 @@
# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
import unittest
from testtools import (
TestCase,
content,
content_type,
)
from testtools.compat import _b, _u
from testtools.helpers import try_import
from testtools.testresult.doubles import (
ExtendedTestResult,
)
fixtures = try_import('fixtures')
LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture')
class TestFixtureSupport(TestCase):
def setUp(self):
super(TestFixtureSupport, self).setUp()
if fixtures is None or LoggingFixture is None:
self.skipTest("Need fixtures")
def test_useFixture(self):
fixture = LoggingFixture()
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
result = unittest.TestResult()
SimpleTest('test_foo').run(result)
self.assertTrue(result.wasSuccessful())
self.assertEqual(['setUp', 'cleanUp'], fixture.calls)
def test_useFixture_cleanups_raise_caught(self):
calls = []
def raiser(ignored):
calls.append('called')
raise Exception('foo')
fixture = fixtures.FunctionFixture(lambda:None, raiser)
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
result = unittest.TestResult()
SimpleTest('test_foo').run(result)
self.assertFalse(result.wasSuccessful())
self.assertEqual(['called'], calls)
def test_useFixture_details_captured(self):
class DetailsFixture(fixtures.Fixture):
def setUp(self):
fixtures.Fixture.setUp(self)
self.addCleanup(delattr, self, 'content')
self.content = [_b('content available until cleanUp')]
self.addDetail('content',
content.Content(content_type.UTF8_TEXT, self.get_content))
def get_content(self):
return self.content
fixture = DetailsFixture()
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
# Add a colliding detail (both should show up)
self.addDetail('content',
content.Content(content_type.UTF8_TEXT, lambda:[_b('foo')]))
result = ExtendedTestResult()
SimpleTest('test_foo').run(result)
self.assertEqual('addSuccess', result._events[-2][0])
details = result._events[-2][2]
self.assertEqual(['content', 'content-1'], sorted(details.keys()))
self.assertEqual('foo', details['content'].as_text())
self.assertEqual('content available until cleanUp',
details['content-1'].as_text())
def test_useFixture_multiple_details_captured(self):
class DetailsFixture(fixtures.Fixture):
def setUp(self):
fixtures.Fixture.setUp(self)
self.addDetail('aaa', content.text_content("foo"))
self.addDetail('bbb', content.text_content("bar"))
fixture = DetailsFixture()
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
result = ExtendedTestResult()
SimpleTest('test_foo').run(result)
self.assertEqual('addSuccess', result._events[-2][0])
details = result._events[-2][2]
self.assertEqual(['aaa', 'bbb'], sorted(details))
self.assertEqual(_u('foo'), details['aaa'].as_text())
self.assertEqual(_u('bar'), details['bbb'].as_text())
def test_useFixture_details_captured_from_setUp(self):
# Details added during fixture set-up are gathered even if setUp()
# fails with an exception.
class BrokenFixture(fixtures.Fixture):
def setUp(self):
fixtures.Fixture.setUp(self)
self.addDetail('content', content.text_content("foobar"))
raise Exception()
fixture = BrokenFixture()
class SimpleTest(TestCase):
def test_foo(self):
self.useFixture(fixture)
result = ExtendedTestResult()
SimpleTest('test_foo').run(result)
self.assertEqual('addError', result._events[-2][0])
details = result._events[-2][2]
self.assertEqual(['content', 'traceback'], sorted(details))
self.assertEqual('foobar', ''.join(details['content'].iter_text()))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,213 +0,0 @@
# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
from testtools import TestCase
from testtools.helpers import (
try_import,
try_imports,
)
from testtools.matchers import (
Equals,
Is,
Not,
)
from testtools.tests.helpers import (
FullStackRunTest,
hide_testtools_stack,
is_stack_hidden,
safe_hasattr,
)
def check_error_callback(test, function, arg, expected_error_count,
expect_result):
"""General test template for error_callback argument.
:param test: Test case instance.
:param function: Either try_import or try_imports.
:param arg: Name or names to import.
:param expected_error_count: Expected number of calls to the callback.
:param expect_result: Boolean for whether a module should
ultimately be returned or not.
"""
cb_calls = []
def cb(e):
test.assertIsInstance(e, ImportError)
cb_calls.append(e)
try:
result = function(arg, error_callback=cb)
except ImportError:
test.assertFalse(expect_result)
else:
if expect_result:
test.assertThat(result, Not(Is(None)))
else:
test.assertThat(result, Is(None))
test.assertEquals(len(cb_calls), expected_error_count)
class TestSafeHasattr(TestCase):
def test_attribute_not_there(self):
class Foo(object):
pass
self.assertEqual(False, safe_hasattr(Foo(), 'anything'))
def test_attribute_there(self):
class Foo(object):
pass
foo = Foo()
foo.attribute = None
self.assertEqual(True, safe_hasattr(foo, 'attribute'))
def test_property_there(self):
class Foo(object):
@property
def attribute(self):
return None
foo = Foo()
self.assertEqual(True, safe_hasattr(foo, 'attribute'))
def test_property_raises(self):
class Foo(object):
@property
def attribute(self):
1/0
foo = Foo()
self.assertRaises(ZeroDivisionError, safe_hasattr, foo, 'attribute')
class TestTryImport(TestCase):
def test_doesnt_exist(self):
# try_import('thing', foo) returns foo if 'thing' doesn't exist.
marker = object()
result = try_import('doesntexist', marker)
self.assertThat(result, Is(marker))
def test_None_is_default_alternative(self):
# try_import('thing') returns None if 'thing' doesn't exist.
result = try_import('doesntexist')
self.assertThat(result, Is(None))
def test_existing_module(self):
# try_import('thing', foo) imports 'thing' and returns it if it's a
# module that exists.
result = try_import('os', object())
import os
self.assertThat(result, Is(os))
def test_existing_submodule(self):
# try_import('thing.another', foo) imports 'thing' and returns it if
# it's a module that exists.
result = try_import('os.path', object())
import os
self.assertThat(result, Is(os.path))
def test_nonexistent_submodule(self):
# try_import('thing.another', foo) imports 'thing' and returns foo if
# 'another' doesn't exist.
marker = object()
result = try_import('os.doesntexist', marker)
self.assertThat(result, Is(marker))
def test_object_from_module(self):
# try_import('thing.object') imports 'thing' and returns
# 'thing.object' if 'thing' is a module and 'object' is not.
result = try_import('os.path.join')
import os
self.assertThat(result, Is(os.path.join))
def test_error_callback(self):
# the error callback is called on failures.
check_error_callback(self, try_import, 'doesntexist', 1, False)
def test_error_callback_missing_module_member(self):
# the error callback is called on failures to find an object
# inside an existing module.
check_error_callback(self, try_import, 'os.nonexistent', 1, False)
def test_error_callback_not_on_success(self):
# the error callback is not called on success.
check_error_callback(self, try_import, 'os.path', 0, True)
class TestTryImports(TestCase):
def test_doesnt_exist(self):
# try_imports('thing', foo) returns foo if 'thing' doesn't exist.
marker = object()
result = try_imports(['doesntexist'], marker)
self.assertThat(result, Is(marker))
def test_fallback(self):
result = try_imports(['doesntexist', 'os'])
import os
self.assertThat(result, Is(os))
def test_None_is_default_alternative(self):
# try_imports('thing') returns None if 'thing' doesn't exist.
e = self.assertRaises(
ImportError, try_imports, ['doesntexist', 'noreally'])
self.assertThat(
str(e),
Equals("Could not import any of: doesntexist, noreally"))
def test_existing_module(self):
# try_imports('thing', foo) imports 'thing' and returns it if it's a
# module that exists.
result = try_imports(['os'], object())
import os
self.assertThat(result, Is(os))
def test_existing_submodule(self):
# try_imports('thing.another', foo) imports 'thing' and returns it if
# it's a module that exists.
result = try_imports(['os.path'], object())
import os
self.assertThat(result, Is(os.path))
def test_nonexistent_submodule(self):
# try_imports('thing.another', foo) imports 'thing' and returns foo if
# 'another' doesn't exist.
marker = object()
result = try_imports(['os.doesntexist'], marker)
self.assertThat(result, Is(marker))
def test_fallback_submodule(self):
result = try_imports(['os.doesntexist', 'os.path'])
import os
self.assertThat(result, Is(os.path))
def test_error_callback(self):
# One error for every class that doesn't exist.
check_error_callback(self, try_imports,
['os.doesntexist', 'os.notthiseither'],
2, False)
check_error_callback(self, try_imports,
['os.doesntexist', 'os.notthiseither', 'os'],
2, True)
check_error_callback(self, try_imports,
['os.path'],
0, True)
class TestStackHiding(TestCase):
run_tests_with = FullStackRunTest
def setUp(self):
super(TestStackHiding, self).setUp()
self.addCleanup(hide_testtools_stack, is_stack_hidden())
def test_is_stack_hidden_consistent_true(self):
hide_testtools_stack(True)
self.assertEqual(True, is_stack_hidden())
def test_is_stack_hidden_consistent_false(self):
hide_testtools_stack(False)
self.assertEqual(False, is_stack_hidden())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,167 +0,0 @@
# Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Tests for testtools.monkey."""
from testtools import TestCase
from testtools.matchers import MatchesException, Raises
from testtools.monkey import MonkeyPatcher, patch
class TestObj:
def __init__(self):
self.foo = 'foo value'
self.bar = 'bar value'
self.baz = 'baz value'
class MonkeyPatcherTest(TestCase):
"""
Tests for 'MonkeyPatcher' monkey-patching class.
"""
def setUp(self):
super(MonkeyPatcherTest, self).setUp()
self.test_object = TestObj()
self.original_object = TestObj()
self.monkey_patcher = MonkeyPatcher()
def test_empty(self):
# A monkey patcher without patches doesn't change a thing.
self.monkey_patcher.patch()
# We can't assert that all state is unchanged, but at least we can
# check our test object.
self.assertEquals(self.original_object.foo, self.test_object.foo)
self.assertEquals(self.original_object.bar, self.test_object.bar)
self.assertEquals(self.original_object.baz, self.test_object.baz)
def test_construct_with_patches(self):
# Constructing a 'MonkeyPatcher' with patches adds all of the given
# patches to the patch list.
patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'),
(self.test_object, 'bar', 'hehe'))
patcher.patch()
self.assertEquals('haha', self.test_object.foo)
self.assertEquals('hehe', self.test_object.bar)
self.assertEquals(self.original_object.baz, self.test_object.baz)
def test_patch_existing(self):
# Patching an attribute that exists sets it to the value defined in the
# patch.
self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
self.monkey_patcher.patch()
self.assertEquals(self.test_object.foo, 'haha')
def test_patch_non_existing(self):
# Patching a non-existing attribute sets it to the value defined in
# the patch.
self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
self.monkey_patcher.patch()
self.assertEquals(self.test_object.doesntexist, 'value')
def test_restore_non_existing(self):
# Restoring a value that didn't exist before the patch deletes the
# value.
self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
self.monkey_patcher.patch()
self.monkey_patcher.restore()
marker = object()
self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker))
def test_patch_already_patched(self):
# Adding a patch for an object and attribute that already have a patch
# overrides the existing patch.
self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH')
self.monkey_patcher.patch()
self.assertEquals(self.test_object.foo, 'BLAH')
self.monkey_patcher.restore()
self.assertEquals(self.test_object.foo, self.original_object.foo)
def test_restore_twice_is_a_no_op(self):
# Restoring an already-restored monkey patch is a no-op.
self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
self.monkey_patcher.patch()
self.monkey_patcher.restore()
self.assertEquals(self.test_object.foo, self.original_object.foo)
self.monkey_patcher.restore()
self.assertEquals(self.test_object.foo, self.original_object.foo)
def test_run_with_patches_decoration(self):
# run_with_patches runs the given callable, passing in all arguments
# and keyword arguments, and returns the return value of the callable.
log = []
def f(a, b, c=None):
log.append((a, b, c))
return 'foo'
result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10)
self.assertEquals('foo', result)
self.assertEquals([(1, 2, 10)], log)
def test_repeated_run_with_patches(self):
# We can call the same function with run_with_patches more than
# once. All patches apply for each call.
def f():
return (self.test_object.foo, self.test_object.bar,
self.test_object.baz)
self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
result = self.monkey_patcher.run_with_patches(f)
self.assertEquals(
('haha', self.original_object.bar, self.original_object.baz),
result)
result = self.monkey_patcher.run_with_patches(f)
self.assertEquals(
('haha', self.original_object.bar, self.original_object.baz),
result)
def test_run_with_patches_restores(self):
# run_with_patches restores the original values after the function has
# executed.
self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
self.assertEquals(self.original_object.foo, self.test_object.foo)
self.monkey_patcher.run_with_patches(lambda: None)
self.assertEquals(self.original_object.foo, self.test_object.foo)
def test_run_with_patches_restores_on_exception(self):
# run_with_patches restores the original values even when the function
# raises an exception.
def _():
self.assertEquals(self.test_object.foo, 'haha')
self.assertEquals(self.test_object.bar, 'blahblah')
raise RuntimeError("Something went wrong!")
self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah')
self.assertThat(lambda:self.monkey_patcher.run_with_patches(_),
Raises(MatchesException(RuntimeError("Something went wrong!"))))
self.assertEquals(self.test_object.foo, self.original_object.foo)
self.assertEquals(self.test_object.bar, self.original_object.bar)
class TestPatchHelper(TestCase):
def test_patch_patches(self):
# patch(obj, name, value) sets obj.name to value.
test_object = TestObj()
patch(test_object, 'foo', 42)
self.assertEqual(42, test_object.foo)
def test_patch_returns_cleanup(self):
# patch(obj, name, value) returns a nullary callable that restores obj
# to its original state when run.
test_object = TestObj()
original = test_object.foo
cleanup = patch(test_object, 'foo', 42)
cleanup()
self.assertEqual(original, test_object.foo)
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,120 +0,0 @@
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Tests for the test runner logic."""
from unittest import TestSuite
from testtools.compat import (
_b,
StringIO,
)
from testtools.helpers import try_import
fixtures = try_import('fixtures')
import testtools
from testtools import TestCase, run
from testtools.matchers import Contains
if fixtures:
class SampleTestFixture(fixtures.Fixture):
"""Creates testtools.runexample temporarily."""
def __init__(self):
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', _b("""
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
"""))])
def setUp(self):
super(SampleTestFixture, self).setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
class TestRun(TestCase):
def setUp(self):
super(TestRun, self).setUp()
if fixtures is None:
self.skipTest("Need fixtures")
def test_run_list(self):
self.useFixture(SampleTestFixture())
out = StringIO()
run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
self.assertEqual("""testtools.runexample.TestFoo.test_bar
testtools.runexample.TestFoo.test_quux
""", out.getvalue())
def test_run_orders_tests(self):
self.useFixture(SampleTestFixture())
out = StringIO()
# We load two tests - one that exists and one that doesn't, and we
# should get the one that exists and neither the one that doesn't nor
# the unmentioned one that does.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_run_load_list(self):
self.useFixture(SampleTestFixture())
out = StringIO()
# We load two tests - one that exists and one that doesn't, and we
# should get the one that exists and neither the one that doesn't nor
# the unmentioned one that does.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write(_b("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
"""))
finally:
f.close()
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_run_failfast(self):
stdout = self.useFixture(fixtures.StringStream('stdout'))
class Failing(TestCase):
def test_a(self):
self.fail('a')
def test_b(self):
self.fail('b')
runner = run.TestToolsTestRunner(failfast=True)
with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
self.assertThat(
stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,303 +0,0 @@
# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
"""Tests for the RunTest single test execution logic."""
from testtools import (
ExtendedToOriginalDecorator,
run_test_with,
RunTest,
TestCase,
TestResult,
)
from testtools.matchers import MatchesException, Is, Raises
from testtools.testresult.doubles import ExtendedTestResult
from testtools.tests.helpers import FullStackRunTest
class TestRunTest(TestCase):
run_tests_with = FullStackRunTest
def make_case(self):
class Case(TestCase):
def test(self):
pass
return Case('test')
def test___init___short(self):
run = RunTest("bar")
self.assertEqual("bar", run.case)
self.assertEqual([], run.handlers)
def test__init____handlers(self):
handlers = [("quux", "baz")]
run = RunTest("bar", handlers)
self.assertEqual(handlers, run.handlers)
def test_run_with_result(self):
# test.run passes result down to _run_test_method.
log = []
class Case(TestCase):
def _run_test_method(self, result):
log.append(result)
case = Case('_run_test_method')
run = RunTest(case, lambda x: log.append(x))
result = TestResult()
run.run(result)
self.assertEqual(1, len(log))
self.assertEqual(result, log[0].decorated)
def test_run_no_result_manages_new_result(self):
log = []
run = RunTest(self.make_case(), lambda x: log.append(x) or x)
result = run.run()
self.assertIsInstance(result.decorated, TestResult)
def test__run_core_called(self):
case = self.make_case()
log = []
run = RunTest(case, lambda x: x)
run._run_core = lambda: log.append('foo')
run.run()
self.assertEqual(['foo'], log)
def test__run_user_does_not_catch_keyboard(self):
case = self.make_case()
def raises():
raise KeyboardInterrupt("yo")
run = RunTest(case, None)
run.result = ExtendedTestResult()
self.assertThat(lambda: run._run_user(raises),
Raises(MatchesException(KeyboardInterrupt)))
self.assertEqual([], run.result._events)
def test__run_user_calls_onException(self):
case = self.make_case()
log = []
def handler(exc_info):
log.append("got it")
self.assertEqual(3, len(exc_info))
self.assertIsInstance(exc_info[1], KeyError)
self.assertIs(KeyError, exc_info[0])
case.addOnException(handler)
e = KeyError('Yo')
def raises():
raise e
run = RunTest(case, [(KeyError, None)])
run.result = ExtendedTestResult()
status = run._run_user(raises)
self.assertEqual(run.exception_caught, status)
self.assertEqual([], run.result._events)
self.assertEqual(["got it"], log)
def test__run_user_can_catch_Exception(self):
case = self.make_case()
e = Exception('Yo')
def raises():
raise e
log = []
run = RunTest(case, [(Exception, None)])
run.result = ExtendedTestResult()
status = run._run_user(raises)
self.assertEqual(run.exception_caught, status)
self.assertEqual([], run.result._events)
self.assertEqual([], log)
def test__run_user_uncaught_Exception_raised(self):
case = self.make_case()
e = KeyError('Yo')
def raises():
raise e
log = []
def log_exc(self, result, err):
log.append((result, err))
run = RunTest(case, [(ValueError, log_exc)])
run.result = ExtendedTestResult()
self.assertThat(lambda: run._run_user(raises),
Raises(MatchesException(KeyError)))
self.assertEqual([], run.result._events)
self.assertEqual([], log)
def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
case = self.make_case()
def broken_handler(exc_info):
# ValueError because thats what we know how to catch - and must
# not.
raise ValueError('boo')
case.addOnException(broken_handler)
e = KeyError('Yo')
def raises():
raise e
log = []
def log_exc(self, result, err):
log.append((result, err))
run = RunTest(case, [(ValueError, log_exc)])
run.result = ExtendedTestResult()
self.assertThat(lambda: run._run_user(raises),
Raises(MatchesException(ValueError)))
self.assertEqual([], run.result._events)
self.assertEqual([], log)
def test__run_user_returns_result(self):
case = self.make_case()
def returns():
return 1
run = RunTest(case)
run.result = ExtendedTestResult()
self.assertEqual(1, run._run_user(returns))
self.assertEqual([], run.result._events)
def test__run_one_decorates_result(self):
log = []
class Run(RunTest):
def _run_prepared_result(self, result):
log.append(result)
return result
run = Run(self.make_case(), lambda x: x)
result = run._run_one('foo')
self.assertEqual([result], log)
self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
self.assertEqual('foo', result.decorated)
def test__run_prepared_result_calls_start_and_stop_test(self):
result = ExtendedTestResult()
case = self.make_case()
run = RunTest(case, lambda x: x)
run.run(result)
self.assertEqual([
('startTest', case),
('addSuccess', case),
('stopTest', case),
], result._events)
def test__run_prepared_result_calls_stop_test_always(self):
result = ExtendedTestResult()
case = self.make_case()
def inner():
raise Exception("foo")
run = RunTest(case, lambda x: x)
run._run_core = inner
self.assertThat(lambda: run.run(result),
Raises(MatchesException(Exception("foo"))))
self.assertEqual([
('startTest', case),
('stopTest', case),
], result._events)
class CustomRunTest(RunTest):
marker = object()
def run(self, result=None):
return self.marker
class TestTestCaseSupportForRunTest(TestCase):
def test_pass_custom_run_test(self):
class SomeCase(TestCase):
def test_foo(self):
pass
result = TestResult()
case = SomeCase('test_foo', runTest=CustomRunTest)
from_run_test = case.run(result)
self.assertThat(from_run_test, Is(CustomRunTest.marker))
def test_default_is_runTest_class_variable(self):
class SomeCase(TestCase):
run_tests_with = CustomRunTest
def test_foo(self):
pass
result = TestResult()
case = SomeCase('test_foo')
from_run_test = case.run(result)
self.assertThat(from_run_test, Is(CustomRunTest.marker))
def test_constructor_argument_overrides_class_variable(self):
# If a 'runTest' argument is passed to the test's constructor, that
# overrides the class variable.
marker = object()
class DifferentRunTest(RunTest):
def run(self, result=None):
return marker
class SomeCase(TestCase):
run_tests_with = CustomRunTest
def test_foo(self):
pass
result = TestResult()
case = SomeCase('test_foo', runTest=DifferentRunTest)
from_run_test = case.run(result)
self.assertThat(from_run_test, Is(marker))
def test_decorator_for_run_test(self):
# Individual test methods can be marked as needing a special runner.
class SomeCase(TestCase):
@run_test_with(CustomRunTest)
def test_foo(self):
pass
result = TestResult()
case = SomeCase('test_foo')
from_run_test = case.run(result)
self.assertThat(from_run_test, Is(CustomRunTest.marker))
def test_extended_decorator_for_run_test(self):
# Individual test methods can be marked as needing a special runner.
# Extra arguments can be passed to the decorator which will then be
# passed on to the RunTest object.
marker = object()
class FooRunTest(RunTest):
def __init__(self, case, handlers=None, bar=None):
super(FooRunTest, self).__init__(case, handlers)
self.bar = bar
def run(self, result=None):
return self.bar
class SomeCase(TestCase):
@run_test_with(FooRunTest, bar=marker)
def test_foo(self):
pass
result = TestResult()
case = SomeCase('test_foo')
from_run_test = case.run(result)
self.assertThat(from_run_test, Is(marker))
def test_works_as_inner_decorator(self):
# Even if run_test_with is the innermost decorator, it will be
# respected.
def wrapped(function):
"""Silly, trivial decorator."""
def decorated(*args, **kwargs):
return function(*args, **kwargs)
decorated.__name__ = function.__name__
decorated.__dict__.update(function.__dict__)
return decorated
class SomeCase(TestCase):
@wrapped
@run_test_with(CustomRunTest)
def test_foo(self):
pass
result = TestResult()
case = SomeCase('test_foo')
from_run_test = case.run(result)
self.assertThat(from_run_test, Is(CustomRunTest.marker))
def test_constructor_overrides_decorator(self):
# If a 'runTest' argument is passed to the test's constructor, that
# overrides the decorator.
marker = object()
class DifferentRunTest(RunTest):
def run(self, result=None):
return marker
class SomeCase(TestCase):
@run_test_with(CustomRunTest)
def test_foo(self):
pass
result = TestResult()
case = SomeCase('test_foo', runTest=DifferentRunTest)
from_run_test = case.run(result)
self.assertThat(from_run_test, Is(marker))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,332 +0,0 @@
# Copyright (c) 2010 testtools developers. See LICENSE for details.
"""Tests for the evil Twisted reactor-spinning we do."""
import os
import signal
from testtools import (
skipIf,
TestCase,
)
from testtools.helpers import try_import
from testtools.matchers import (
Equals,
Is,
MatchesException,
Raises,
)
_spinner = try_import('testtools._spinner')
defer = try_import('twisted.internet.defer')
Failure = try_import('twisted.python.failure.Failure')
class NeedsTwistedTestCase(TestCase):
def setUp(self):
super(NeedsTwistedTestCase, self).setUp()
if defer is None or Failure is None:
self.skipTest("Need Twisted to run")
class TestNotReentrant(NeedsTwistedTestCase):
def test_not_reentrant(self):
# A function decorated as not being re-entrant will raise a
# _spinner.ReentryError if it is called while it is running.
calls = []
@_spinner.not_reentrant
def log_something():
calls.append(None)
if len(calls) < 5:
log_something()
self.assertThat(
log_something, Raises(MatchesException(_spinner.ReentryError)))
self.assertEqual(1, len(calls))
def test_deeper_stack(self):
calls = []
@_spinner.not_reentrant
def g():
calls.append(None)
if len(calls) < 5:
f()
@_spinner.not_reentrant
def f():
calls.append(None)
if len(calls) < 5:
g()
self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
self.assertEqual(2, len(calls))
class TestExtractResult(NeedsTwistedTestCase):
def test_not_fired(self):
# _spinner.extract_result raises _spinner.DeferredNotFired if it's
# given a Deferred that has not fired.
self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
Raises(MatchesException(_spinner.DeferredNotFired)))
def test_success(self):
# _spinner.extract_result returns the value of the Deferred if it has
# fired successfully.
marker = object()
d = defer.succeed(marker)
self.assertThat(_spinner.extract_result(d), Equals(marker))
def test_failure(self):
# _spinner.extract_result raises the failure's exception if it's given
# a Deferred that is failing.
try:
1/0
except ZeroDivisionError:
f = Failure()
d = defer.fail(f)
self.assertThat(lambda:_spinner.extract_result(d),
Raises(MatchesException(ZeroDivisionError)))
class TestTrapUnhandledErrors(NeedsTwistedTestCase):
def test_no_deferreds(self):
marker = object()
result, errors = _spinner.trap_unhandled_errors(lambda: marker)
self.assertEqual([], errors)
self.assertIs(marker, result)
def test_unhandled_error(self):
failures = []
def make_deferred_but_dont_handle():
try:
1/0
except ZeroDivisionError:
f = Failure()
failures.append(f)
defer.fail(f)
result, errors = _spinner.trap_unhandled_errors(
make_deferred_but_dont_handle)
self.assertIs(None, result)
self.assertEqual(failures, [error.failResult for error in errors])
class TestRunInReactor(NeedsTwistedTestCase):
def make_reactor(self):
from twisted.internet import reactor
return reactor
def make_spinner(self, reactor=None):
if reactor is None:
reactor = self.make_reactor()
return _spinner.Spinner(reactor)
def make_timeout(self):
return 0.01
def test_function_called(self):
# run_in_reactor actually calls the function given to it.
calls = []
marker = object()
self.make_spinner().run(self.make_timeout(), calls.append, marker)
self.assertThat(calls, Equals([marker]))
def test_return_value_returned(self):
# run_in_reactor returns the value returned by the function given to
# it.
marker = object()
result = self.make_spinner().run(self.make_timeout(), lambda: marker)
self.assertThat(result, Is(marker))
def test_exception_reraised(self):
# If the given function raises an error, run_in_reactor re-raises that
# error.
self.assertThat(
lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
Raises(MatchesException(ZeroDivisionError)))
def test_keyword_arguments(self):
# run_in_reactor passes keyword arguments on.
calls = []
function = lambda *a, **kw: calls.extend([a, kw])
self.make_spinner().run(self.make_timeout(), function, foo=42)
self.assertThat(calls, Equals([(), {'foo': 42}]))
def test_not_reentrant(self):
# run_in_reactor raises an error if it is called inside another call
# to run_in_reactor.
spinner = self.make_spinner()
self.assertThat(lambda: spinner.run(
self.make_timeout(), spinner.run, self.make_timeout(),
lambda: None), Raises(MatchesException(_spinner.ReentryError)))
def test_deferred_value_returned(self):
# If the given function returns a Deferred, run_in_reactor returns the
# value in the Deferred at the end of the callback chain.
marker = object()
result = self.make_spinner().run(
self.make_timeout(), lambda: defer.succeed(marker))
self.assertThat(result, Is(marker))
def test_preserve_signal_handler(self):
signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
signals = filter(
None, (getattr(signal, name, None) for name in signals))
for sig in signals:
self.addCleanup(signal.signal, sig, signal.getsignal(sig))
new_hdlrs = list(lambda *a: None for _ in signals)
for sig, hdlr in zip(signals, new_hdlrs):
signal.signal(sig, hdlr)
spinner = self.make_spinner()
spinner.run(self.make_timeout(), lambda: None)
self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
def test_timeout(self):
# If the function takes too long to run, we raise a
# _spinner.TimeoutError.
timeout = self.make_timeout()
self.assertThat(
lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
Raises(MatchesException(_spinner.TimeoutError)))
def test_no_junk_by_default(self):
# If the reactor hasn't spun yet, then there cannot be any junk.
spinner = self.make_spinner()
self.assertThat(spinner.get_junk(), Equals([]))
def test_clean_do_nothing(self):
# If there's nothing going on in the reactor, then clean does nothing
# and returns an empty list.
spinner = self.make_spinner()
result = spinner._clean()
self.assertThat(result, Equals([]))
def test_clean_delayed_call(self):
# If there's a delayed call in the reactor, then clean cancels it and
# returns an empty list.
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
call = reactor.callLater(10, lambda: None)
results = spinner._clean()
self.assertThat(results, Equals([call]))
self.assertThat(call.active(), Equals(False))
def test_clean_delayed_call_cancelled(self):
# If there's a delayed call that's just been cancelled, then it's no
# longer there.
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
call = reactor.callLater(10, lambda: None)
call.cancel()
results = spinner._clean()
self.assertThat(results, Equals([]))
def test_clean_selectables(self):
# If there's still a selectable (e.g. a listening socket), then
# clean() removes it from the reactor's registry.
#
# Note that the socket is left open. This emulates a bug in trial.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
port = reactor.listenTCP(0, ServerFactory())
spinner.run(self.make_timeout(), lambda: None)
results = spinner.get_junk()
self.assertThat(results, Equals([port]))
def test_clean_running_threads(self):
import threading
import time
current_threads = list(threading.enumerate())
reactor = self.make_reactor()
timeout = self.make_timeout()
spinner = self.make_spinner(reactor)
spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
# Python before 2.5 has a race condition with thread handling where
# join() does not remove threads from enumerate before returning - the
# thread being joined does the removal. This was fixed in Python 2.5
# but we still support 2.4, so we have to workaround the issue.
# http://bugs.python.org/issue1703448.
self.assertThat(
[thread for thread in threading.enumerate() if thread.isAlive()],
Equals(current_threads))
def test_leftover_junk_available(self):
# If 'run' is given a function that leaves the reactor dirty in some
# way, 'run' will clean up the reactor and then store information
# about the junk. This information can be got using get_junk.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
port = spinner.run(
self.make_timeout(), reactor.listenTCP, 0, ServerFactory())
self.assertThat(spinner.get_junk(), Equals([port]))
def test_will_not_run_with_previous_junk(self):
# If 'run' is called and there's still junk in the spinner's junk
# list, then the spinner will refuse to run.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
self.assertThat(lambda: spinner.run(timeout, lambda: None),
Raises(MatchesException(_spinner.StaleJunkError)))
def test_clear_junk_clears_previous_junk(self):
# If 'run' is called and there's still junk in the spinner's junk
# list, then the spinner will refuse to run.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
junk = spinner.clear_junk()
self.assertThat(junk, Equals([port]))
self.assertThat(spinner.get_junk(), Equals([]))
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_sigint_raises_no_result_error(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
self.skipTest("SIGINT not available")
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
Raises(MatchesException(_spinner.NoResultError)))
self.assertEqual([], spinner._clean())
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_sigint_raises_no_result_error_second_time(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
# This test is exactly the same as test_sigint_raises_no_result_error,
# and exists to make sure we haven't futzed with state.
self.test_sigint_raises_no_result_error()
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_sigint_raises_no_result_error(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
self.skipTest("SIGINT not available")
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
Raises(MatchesException(_spinner.NoResultError)))
self.assertEqual([], spinner._clean())
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_sigint_raises_no_result_error_second_time(self):
self.test_fast_sigint_raises_no_result_error()
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,84 +0,0 @@
# Copyright (c) 2012 testtools developers. See LICENSE for details.
"""Test tag support."""
from testtools import TestCase
from testtools.tags import TagContext
class TestTags(TestCase):
def test_no_tags(self):
# A tag context has no tags initially.
tag_context = TagContext()
self.assertEqual(set(), tag_context.get_current_tags())
def test_add_tag(self):
# A tag added with change_tags appears in get_current_tags.
tag_context = TagContext()
tag_context.change_tags(set(['foo']), set())
self.assertEqual(set(['foo']), tag_context.get_current_tags())
def test_add_tag_twice(self):
# Calling change_tags twice to add tags adds both tags to the current
# tags.
tag_context = TagContext()
tag_context.change_tags(set(['foo']), set())
tag_context.change_tags(set(['bar']), set())
self.assertEqual(
set(['foo', 'bar']), tag_context.get_current_tags())
def test_change_tags_returns_tags(self):
# change_tags returns the current tags. This is a convenience.
tag_context = TagContext()
tags = tag_context.change_tags(set(['foo']), set())
self.assertEqual(set(['foo']), tags)
def test_remove_tag(self):
# change_tags can remove tags from the context.
tag_context = TagContext()
tag_context.change_tags(set(['foo']), set())
tag_context.change_tags(set(), set(['foo']))
self.assertEqual(set(), tag_context.get_current_tags())
def test_child_context(self):
# A TagContext can have a parent. If so, its tags are the tags of the
# parent at the moment of construction.
parent = TagContext()
parent.change_tags(set(['foo']), set())
child = TagContext(parent)
self.assertEqual(
parent.get_current_tags(), child.get_current_tags())
def test_add_to_child(self):
# Adding a tag to the child context doesn't affect the parent.
parent = TagContext()
parent.change_tags(set(['foo']), set())
child = TagContext(parent)
child.change_tags(set(['bar']), set())
self.assertEqual(set(['foo', 'bar']), child.get_current_tags())
self.assertEqual(set(['foo']), parent.get_current_tags())
def test_remove_in_child(self):
# A tag that was in the parent context can be removed from the child
# context without affect the parent.
parent = TagContext()
parent.change_tags(set(['foo']), set())
child = TagContext(parent)
child.change_tags(set(), set(['foo']))
self.assertEqual(set(), child.get_current_tags())
self.assertEqual(set(['foo']), parent.get_current_tags())
def test_parent(self):
# The parent can be retrieved from a child context.
parent = TagContext()
parent.change_tags(set(['foo']), set())
child = TagContext(parent)
child.change_tags(set(), set(['foo']))
self.assertEqual(parent, child.parent)
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,128 +0,0 @@
# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
"""Test ConcurrentTestSuite and related things."""
__metaclass__ = type
import unittest
from testtools import (
ConcurrentTestSuite,
iterate_tests,
PlaceHolder,
TestCase,
)
from testtools.helpers import try_import
from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests
from testtools.tests.helpers import LoggingResult
FunctionFixture = try_import('fixtures.FunctionFixture')
class Sample(TestCase):
def __hash__(self):
return id(self)
def test_method1(self):
pass
def test_method2(self):
pass
class TestConcurrentTestSuiteRun(TestCase):
def test_trivial(self):
log = []
result = LoggingResult(log)
test1 = Sample('test_method1')
test2 = Sample('test_method2')
original_suite = unittest.TestSuite([test1, test2])
suite = ConcurrentTestSuite(original_suite, self.split_suite)
suite.run(result)
# log[0] is the timestamp for the first test starting.
test1 = log[1][1]
test2 = log[-1][1]
self.assertIsInstance(test1, Sample)
self.assertIsInstance(test2, Sample)
self.assertNotEqual(test1.id(), test2.id())
def test_wrap_result(self):
# ConcurrentTestSuite has a hook for wrapping the per-thread result.
wrap_log = []
def wrap_result(thread_safe_result, thread_number):
wrap_log.append(
(thread_safe_result.result.decorated, thread_number))
return thread_safe_result
result_log = []
result = LoggingResult(result_log)
test1 = Sample('test_method1')
test2 = Sample('test_method2')
original_suite = unittest.TestSuite([test1, test2])
suite = ConcurrentTestSuite(
original_suite, self.split_suite, wrap_result=wrap_result)
suite.run(result)
self.assertEqual(
[(result, 0),
(result, 1),
], wrap_log)
# Smoke test to make sure everything ran OK.
self.assertNotEqual([], result_log)
def split_suite(self, suite):
tests = list(iterate_tests(suite))
return tests[0], tests[1]
class TestFixtureSuite(TestCase):
def setUp(self):
super(TestFixtureSuite, self).setUp()
if FunctionFixture is None:
self.skip("Need fixtures")
def test_fixture_suite(self):
log = []
class Sample(TestCase):
def test_one(self):
log.append(1)
def test_two(self):
log.append(2)
fixture = FunctionFixture(
lambda: log.append('setUp'),
lambda fixture: log.append('tearDown'))
suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')])
suite.run(LoggingResult([]))
self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
class TestSortedTests(TestCase):
def test_sorts_custom_suites(self):
a = PlaceHolder('a')
b = PlaceHolder('b')
class Subclass(unittest.TestSuite):
def sort_tests(self):
self._tests = sorted_tests(self, True)
input_suite = Subclass([b, a])
suite = sorted_tests(input_suite)
self.assertEqual([a, b], list(iterate_tests(suite)))
self.assertEqual([input_suite], list(iter(suite)))
def test_custom_suite_without_sort_tests_works(self):
a = PlaceHolder('a')
b = PlaceHolder('b')
class Subclass(unittest.TestSuite):pass
input_suite = Subclass([b, a])
suite = sorted_tests(input_suite)
self.assertEqual([b, a], list(iterate_tests(suite)))
self.assertEqual([input_suite], list(iter(suite)))
def test_sorts_simple_suites(self):
a = PlaceHolder('a')
b = PlaceHolder('b')
suite = sorted_tests(unittest.TestSuite([b, a]))
self.assertEqual([a, b], list(iterate_tests(suite)))
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)

View File

@ -1,73 +0,0 @@
# Copyright (c) 2011 testtools developers. See LICENSE for details.
from __future__ import with_statement
import sys
from testtools import (
ExpectedException,
TestCase,
)
from testtools.matchers import (
AfterPreprocessing,
Equals,
)
class TestExpectedException(TestCase):
"""Test the ExpectedException context manager."""
def test_pass_on_raise(self):
with ExpectedException(ValueError, 'tes.'):
raise ValueError('test')
def test_pass_on_raise_matcher(self):
with ExpectedException(
ValueError, AfterPreprocessing(str, Equals('test'))):
raise ValueError('test')
def test_raise_on_text_mismatch(self):
try:
with ExpectedException(ValueError, 'tes.'):
raise ValueError('mismatch')
except AssertionError:
e = sys.exc_info()[1]
self.assertEqual("'mismatch' does not match /tes./", str(e))
else:
self.fail('AssertionError not raised.')
def test_raise_on_general_mismatch(self):
matcher = AfterPreprocessing(str, Equals('test'))
value_error = ValueError('mismatch')
try:
with ExpectedException(ValueError, matcher):
raise value_error
except AssertionError:
e = sys.exc_info()[1]
self.assertEqual(matcher.match(value_error).describe(), str(e))
else:
self.fail('AssertionError not raised.')
def test_raise_on_error_mismatch(self):
try:
with ExpectedException(TypeError, 'tes.'):
raise ValueError('mismatch')
except ValueError:
e = sys.exc_info()[1]
self.assertEqual('mismatch', str(e))
else:
self.fail('ValueError not raised.')
def test_raise_if_no_exception(self):
try:
with ExpectedException(TypeError, 'tes.'):
pass
except AssertionError:
e = sys.exc_info()[1]
self.assertEqual('TypeError not raised.', str(e))
else:
self.fail('AssertionError not raised.')
def test_pass_on_raise_any_message(self):
with ExpectedException(ValueError):
raise ValueError('whatever')

View File

@ -1,154 +0,0 @@
# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
"""Test suites and related things."""
__metaclass__ = type
__all__ = [
'ConcurrentTestSuite',
'iterate_tests',
'sorted_tests',
]
from testtools.helpers import safe_hasattr, try_imports
Queue = try_imports(['Queue.Queue', 'queue.Queue'])
import threading
import unittest
import testtools
def iterate_tests(test_suite_or_case):
"""Iterate through all of the test cases in 'test_suite_or_case'."""
try:
suite = iter(test_suite_or_case)
except TypeError:
yield test_suite_or_case
else:
for test in suite:
for subtest in iterate_tests(test):
yield subtest
class ConcurrentTestSuite(unittest.TestSuite):
"""A TestSuite whose run() calls out to a concurrency strategy."""
def __init__(self, suite, make_tests, wrap_result=None):
"""Create a ConcurrentTestSuite to execute suite.
:param suite: A suite to run concurrently.
:param make_tests: A helper function to split the tests in the
ConcurrentTestSuite into some number of concurrently executing
sub-suites. make_tests must take a suite, and return an iterable
of TestCase-like object, each of which must have a run(result)
method.
:param wrap_result: An optional function that takes a thread-safe
result and a thread number and must return a ``TestResult``
object. If not provided, then ``ConcurrentTestSuite`` will just
use a ``ThreadsafeForwardingResult`` wrapped around the result
passed to ``run()``.
"""
super(ConcurrentTestSuite, self).__init__([suite])
self.make_tests = make_tests
if wrap_result:
self._wrap_result = wrap_result
def _wrap_result(self, thread_safe_result, thread_number):
"""Wrap a thread-safe result before sending it test results.
You can either override this in a subclass or pass your own
``wrap_result`` in to the constructor. The latter is preferred.
"""
return thread_safe_result
def run(self, result):
"""Run the tests concurrently.
This calls out to the provided make_tests helper, and then serialises
the results so that result only sees activity from one TestCase at
a time.
ConcurrentTestSuite provides no special mechanism to stop the tests
returned by make_tests, it is up to the make_tests to honour the
shouldStop attribute on the result object they are run with, which will
be set if an exception is raised in the thread which
ConcurrentTestSuite.run is called in.
"""
tests = self.make_tests(self)
try:
threads = {}
queue = Queue()
semaphore = threading.Semaphore(1)
for i, test in enumerate(tests):
process_result = self._wrap_result(
testtools.ThreadsafeForwardingResult(result, semaphore), i)
reader_thread = threading.Thread(
target=self._run_test, args=(test, process_result, queue))
threads[test] = reader_thread, process_result
reader_thread.start()
while threads:
finished_test = queue.get()
threads[finished_test][0].join()
del threads[finished_test]
except:
for thread, process_result in threads.values():
process_result.stop()
raise
def _run_test(self, test, process_result, queue):
try:
test.run(process_result)
finally:
queue.put(test)
class FixtureSuite(unittest.TestSuite):
def __init__(self, fixture, tests):
super(FixtureSuite, self).__init__(tests)
self._fixture = fixture
def run(self, result):
self._fixture.setUp()
try:
super(FixtureSuite, self).run(result)
finally:
self._fixture.cleanUp()
def sort_tests(self):
self._tests = sorted_tests(self, True)
def _flatten_tests(suite_or_case, unpack_outer=False):
try:
tests = iter(suite_or_case)
except TypeError:
# Not iterable, assume it's a test case.
return [(suite_or_case.id(), suite_or_case)]
if (type(suite_or_case) in (unittest.TestSuite,) or
unpack_outer):
# Plain old test suite (or any others we may add).
result = []
for test in tests:
# Recurse to flatten.
result.extend(_flatten_tests(test))
return result
else:
# Find any old actual test and grab its id.
suite_id = None
tests = iterate_tests(suite_or_case)
for test in tests:
suite_id = test.id()
break
# If it has a sort_tests method, call that.
if safe_hasattr(suite_or_case, 'sort_tests'):
suite_or_case.sort_tests()
return [(suite_id, suite_or_case)]
def sorted_tests(suite_or_case, unpack_outer=False):
"""Sort suite_or_case while preserving non-vanilla TestSuites."""
tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer)
tests.sort()
return unittest.TestSuite([test for (sort_key, test) in tests])

View File

@ -1,13 +0,0 @@
# Copyright (c) 2008-2010 testtools developers. See LICENSE for details.
"""Utilities for dealing with stuff in unittest.
Legacy - deprecated - use testtools.testsuite.iterate_tests
"""
import warnings
warnings.warn("Please import iterate_tests from testtools.testsuite - "
"testtools.utils is deprecated.", DeprecationWarning, stacklevel=2)
from testtools.testsuite import iterate_tests

View File

@ -9,11 +9,6 @@ THIRD_PARTY_DIR="`dirname $0`/../third_party"
LIBDIR="`dirname $0`"
WORKDIR="`mktemp -d`"
echo "Updating testtools..."
git clone git://github.com/testing-cabal/testtools "$WORKDIR/testtools"
rm -rf "$WORKDIR/testtools/.git"
rsync -avz --delete "$WORKDIR/testtools/" "$LIBDIR/testtools/"
echo "Updating dnspython..."
git clone git://www.dnspython.org/dnspython.git "$WORKDIR/dnspython"
rm -rf "$WORKDIR/dnspython/.git"

View File

@ -4,7 +4,6 @@ import os, Options
# work out what python external libraries we need to install
external_libs = {
"testtools": "testtools/testtools",
"extras": "extras/extras",
"mimeparse": "mimeparse/mimeparse",
}