[Biopython-dev] Upcoming release

Brad Chapman chapmanb at arches.uga.edu
Sun Feb 25 07:13:43 EST 2001


[I was working on a PyUnit framework for integrating the tests]

Jeff: 
> I'm glad someone's looking seriously into this!  It sounds like 
> something for the next release, though...

Okay, well I completely ignored your message and worked more on this
:-). In the shower this morning I thought of some ways to fix the
problems we've been having, using the PyUnit framework I posted
yesterday.

It seems like I've got the regression comparisons working now so I
implemented a "replacement" for br_regrtest.py that uses PyUnit. The
only downside of the comparisons now is that it reads the entire
output into a string, and then does the comparison, but I can't ever
imagine that an output would be so incredibly huge this would be a
problem (otherwise the test should probably be split up!).

I used the fancy pyunit GUI stuff, so now the tests run by default
with a little Tk GUI (should be nicer on Windows, and especially
nicer for Macs).

This all works for me okay on both Unix and Windows. 

What do people think? Does anyone have time to look at this before
next weeks release, or do you all want to put it off until after?

BTW, I noticed some problems with the tests while doing this, which I
can now attribute to actual problems:

o test_NCBIWWW is failing right now, due to problems in comparing the
output (and these are not due to newline problems). I looked in the
logs to see what had changed, and it looks like Thomas checked in an
output change, but there wasn't a corresponding change to the
tests.

o test_SubsMat -- this seems to be failing on windows due to the fact
that Windows prints -0.00 and the output is 0.00. I guess this is a
Windows/UNIX difference. It's probably not worth worrying about since
-0.00 and 0.00 are the same thing (as far as I know :-).

Brad

-------------- next part --------------
#!/usr/bin/env python
"""Run the biopython tests as a set of PyUnit-based regression tests.

This will find all modules whose name is "test_*" in the test
directory, and run them.  Various command line options provide
additional facilities.

Command line options:

-g;--generate -- write the output file for a test instead of comparing it.
                 A test to write the output for must be specified.
--no-gui      -- do not use a GUI to run the tests
--help        -- show usage info
"""
# standard modules
import sys
import cStringIO
import os
import string
import sys
import getopt

# PyUnit
import unittest
import unittestgui

def main(argv):
    # start off using the GUI
    use_gui = 1
    
    if use_gui:
        try:
            import Tkinter as tk
        except ImportError:
            use_gui = 0

    # get the command line options
    try:
        opts, args = getopt.getopt(argv[1:], 'g',
				   ["generate", "no-gui", "help"])
    except getopt.error, msg:
        print msg
        print __doc__
        return 2

    # deal with the options
    for o, a in opts:
        if o == "--help":
            print __doc__
            return 0
        if o == "--no-gui":
            use_gui = 0

        if o == "-g" or o == "--generate":
            if len(args) > 1:
                print "Only one argument (the test name) needed for generate"
                print __doc__
                return 2
            elif len(args) == 0:
                print "No test name specified to generate output for."
                print __doc__
                return 2
            # strip off .py if it was included
            if args[0][-3:] == ".py":
                args[0] = args[0][:-3]

            generate_output(args[0])
            return 0

    # run the tests
    if use_gui:
        root = tk.Tk()
        root.title("PyUnit")
        runner = unittestgui.TkTestRunner(root,
                                          "pyunit_testing.biopython_suite")
        root.protocol('WM_DELETE_WINDOW', root.quit)
        root.mainloop()
    else:    
        test_suite = biopython_suite()
        runner = unittest.TextTestRunner()
        runner.run(test_suite)

def biopython_suite():
    all_tests = findtests()
    test_suite = unittest.TestSuite()

    # all_tests = ["test_File"]

    for test in all_tests:
        class BiopythonTest(unittest.TestCase):
            def __init__(self, test_name):
                unittest.TestCase.__init__(self)
                self.test_name = test_name

            def __str__(self):
                return self.shortDescription()

            def shortDescription(self):
                return self.test_name
            
            def runTest(self):
                generated_output = ''
                output = cStringIO.StringIO()

                # remember standard out so we can reset it after we are done
                save_stdout = sys.stdout
                try:
                    # write the output from the test into a string
                    sys.stdout = output
                    __import__(self.test_name)
                    generated_output = output.getvalue()
                finally:
                    # return standard out to its normal setting
                    sys.stdout = save_stdout

                # get the expected output
                testdir = findtestdir()
                outputdir = os.path.join(testdir, "output")
                outputfile = os.path.join(outputdir, self.test_name)

                try:
                    expected_handle = open(outputfile, 'r')
                    output_handle = cStringIO.StringIO(generated_output)
                    # check the expected output to be consistent with what
                    # we generated
                    compare_output(self.test_name, output_handle,
                                   expected_handle)
                except IOError:
                    raise IOError, "Warning: Can't open %s for test %s" % \
                          (outputfile, self.test_name)

        # add the test to the test suite
        test_suite.addTest(BiopythonTest(test))

    return test_suite

def findtests():
    """Return a list of all applicable test modules."""
    testdir = findtestdir()
    names = os.listdir(testdir)
    tests = []
    for name in names:
        if name[:5] == "test_" and name[-3:] == ".py":
	    tests.append(name[:-3])
    tests.sort()
    return tests

def findtestdir():
    if __name__ == '__main__':
        file = sys.argv[0]
    else:
        file = __file__
    testdir = os.path.dirname(file) or os.curdir
    return testdir

def generate_output(test_name):
    """Generate the golden output for the specified test.
    """
    testdir = findtestdir()
    outputdir = os.path.join(testdir, "output")
    outputfile = os.path.join(outputdir, test_name)

    output_handle = open(outputfile, 'w')

    # write the test name as the first line of the output
    output_handle.write(test_name + "\n")

    # remember standard out so we can reset it after we are done
    save_stdout = sys.stdout
    try:
        # write the output from the test into a string
        sys.stdout = output_handle
        __import__(test_name)
    finally:
        output_handle.close()
        # return standard out to its normal setting
        sys.stdout = save_stdout

def compare_output(test_name, output_handle, expected_handle):
    """Compare output from a test to the expected output.

    Arguments:

    o test_name - The name of the test we are running.

    o output_handle - A handle to all of the output generated by running
    a test.

    o expected_handle - A handle to the expected output for a test.
    """
    # first check that we are dealing with the right output
    # the first line of the output file is the test name
    expected_test = string.strip(expected_handle.readline())

    assert expected_test == test_name, "\nOutput:   %s\nExpected: %s" % \
           (test_name, expected_test)

    # now loop through the output and compare it to the expected file
    while 1:
        expected_line = expected_handle.readline()
        output_line = output_handle.readline()

        # stop looping if either of the info handles reach the end
        if not(expected_line) or not(output_line):
            # make sure both have no information left
            assert expected_line == '', "Unread: %s" % expected_line
            assert output_line == '', "Extra output: %s" % output_line
            break

        # normalize the newlines in the two lines
        expected_line = string.strip(expected_line)
        output_line = string.strip(output_line)
        
        expected_line = convert_newlines(expected_line)
        output_line = convert_newlines(output_line)

        # make sure the two lines are the same
        assert expected_line == output_line, "\nOutput  : %s\nExpected: %s" % \
               (output_line, expected_line)
        
def convert_newlines(line):
    """Convert all newlines in the given line into '\\n'.

    This helps deal with the problem between python2.1 and older pythons
    in how line breaks are generated inside strings. Older python versions
    used '\012', and 2.1 uses '\n'.
    """
    # two slashes are used since we are dealing with newlines inside strings
    # where they are "escaped" with the extra \
    newlines = ["\\012"]
        
    for newline_to_replace in newlines:
        line = line.replace(newline_to_replace, "\\n")

    return line
        
                
if __name__ == "__main__":
    sys.exit(main(sys.argv))



More information about the Biopython-dev mailing list