summaryrefslogtreecommitdiffstats
path: root/kunittest/tester.h
blob: ff3a3d453f6d5a76690e0298339bc504c112b6f7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
/*
 * tester.h
 *
 * Copyright (C)  2004  Zack Rusin <[email protected]>
 * Copyright (C)  2005  Jeroen Wijnhout <[email protected]>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *   notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *   notice, this list of conditions and the following disclaimer in the
 *   documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef TESTER_H
#define TESTER_H

/*! @mainpage KUnitTest - a UnitTest library for KDE
 *
 * @section contents Contents
 * @li @ref background
 * @li @ref usage
 * @li @ref integration
 * @li @ref module
 * @li @ref advanced
 * @li @ref scripts
 *
 * @section background Background
 *
 * KUnitTest is based on the "in reality no one wants to write tests and
 * if it takes a lot of code no one will. So the less code to write the
 * better" design principle.
 *
 * Copyright and credits:
 * @li (C) 2004 Zack Rusin (original author)
 * @li Brad Hards (import into CVS)
 * @li (C) 2005 Jeroen Wijnhout (GUI, library, module)
 *
 * You are responsible for what you do with it though. It
 * is licensed under a BSD license - read the top of each file.
 *
 * All the GUI related stuff is in kdesdk/kunittest, the core libraries are in kdelibs/kunittest.
 * A simple example modules is in kdelisbs/kunittest/samplemodule.{h,cpp}, however more examples
 * can be found in kdesdk/kunittest/example.
 *
 * There are roughly two ways to use the KUnitTest library. Either you create dynamically
 * loadable modules and use the kunittestmodrunner or kunittestguimodrunner programs to run
 * the tests, or you use the kunittest/kunittestgui library to create your own test runner
 * application.
 *
 * The main parts of the KUnitTest library are:
 * @li runner.{h,cpp} - it is the tester runner, holds all tests and runs
 * them.
 * @li runnergui.{h,cpp} - the GUI wrapper around the runner. The GUI neatly organizes the
 *   test results. With the kunittest helper script it can even add the debug output
 *   to the test results. For this you need to have the kdesdk module installed.
 * @li tester.h - which holds the base of a pure test object (Tester).
 * @li module.h - defines macros to create a dynamically loadable module.
 *
 * @section usage Example usage
 *
 * This section describes how to use the library to create your own tests and runner
 * application.
 *
 * Now lets see how you would add a new test to KUnitTest. You do that by
 * writting a Tester derived class which has an "allTests()" method:
 *
 * @code
 * class SampleTest : public Tester
 * {
 * public:
 *    SampleTest();
 *
 *    void allTests();
 * };
 * @endcode
 *
 * Now in the allTests() method we implement our tests, which might look
 * like:
 *
 * @code
 * void SampleTest::allTests()
 * {
 *    CHECK( 3+3, 6 );
 *    CHECK( TQString( "hello%1" ).arg( " world not" ), TQString( "hello world" ) );
 * }
 * @endcode
 *
 * CHECK() is implemented using a template, so you get type safe
 * comparison. All that is needed is that the argument types have an
 * @c operator==() defined.
 *
 * Now that you did that the only other thing to do is to tell the
 * framework to add this test case, by using the KUNITTEST_REGISTER_TESTER(x) macro. Just
 * put the following line in the implementation file:
 *
 * @code KUNITTEST_REGISTER_TESTER( SampleTest ); @endcode
 *
 * Note the ;, it is necessary.
 *
 * KUnitTest will do the rest. It will tell you which tests failed, how, what was the expected
 * result, what was the result it got, what was the code that failed and so on. For example for
 * the code above it would output:
 *
 * @verbatim
SampleTest - 1 test passed, 1 test failed
    Unexpected failure:
        sampletest.cpp[38]: failed on "TQString( "hello%1" ).arg( " world not" )"
            result = 'hello world not', expected = 'hello world'
@endverbatim
 *
 * If you use the RunnerGUI class then you will be presented with a scrollable list of the
 * test results.
 *
 * @section integration Integration
 *
 * The KUnitTest library is easy to use. Let's say that you have the tests written in the
 * sampletest.h and sampletest.cpp files. Then all you need is a main.cpp file and a Makefile.am.
 * You can copy both from the example file provided with the library. A typical main.cpp file
 * looks like this:
 *
 * @code
 * #include <kaboutdata.h>
 * #include <kapplication.h>
 * #include <kcmdlineargs.h>
 * #include <kcmdlineargs.h>
 * #include <klocale.h>
 * #include <kunittest/runnergui.h>
 *
 * static const char description[] = I18N_NOOP("SampleTests");
 * static const char version[] = "0.1";
 * static KCmdLineOptions options[] = { KCmdLineLastOption };
 *
 * int main( int argc, char** argv )
 * {
 *     KAboutData about("SampleTests", I18N_NOOP("SampleTests"), version, description,
 *                     KAboutData::License_BSD, "(C) 2005 You!", 0, 0, "mail@provider");
 *
 *     KCmdLineArgs::init(argc, argv, &about);
 *     KCmdLineArgs::addCmdLineOptions( options );
 *     KApplication app;
 *
 *     KUnitTest::RunnerGUI runner(0);
 *     runner.show();
 *     app.setMainWidget(&runner);
 *
 *     return app.exec();
 * }
 * @endcode
 *
 * The Makefile.am file will look like:
 *
 * @code
 * INCLUDES = -I$(top_srcdir)/src $(all_includes)
 * METASOURCES = AUTO
 * check_PROGRAMS = sampletests
 * sampletests_SOURCES = main.cpp sampletest.cpp
 * sampletests_LDFLAGS = $(KDE_RPATH) $(all_libraries)
 * sampletests_LDADD = -lkunittest
 * noinst_HEADERS = sampletest.h
 *
 * check:
 *    kunittest $(top_builddir)/src/sampletests SampleTests
 * @endcode
 *
 * Most of this Makefile.am will be self-explanatory. After running
 * "make check" the binary "sampletests" will be built. The reason for
 * adding the extra make target "check" is that you probably do not want
 * to rebuild the test suite everytime you run make.
 *
 * You can run the binary on its own, but you get more functionality if you use
 * the kunittest helper script. The Makefile.am is set up in such
 * a way that this helper script is automatically run after you do a
 * "make check". This scripts take two arguments, the first is the path
 * to the binary to run. The second the application name, in this case SampleTests.
 * This name is important since it is used to let the script communicate with the application
 * via DCOP. The helper scripts relies on the Perl DCOP bindings, so these need to be installed.
 *
 * @section module Creating test modules
 *
 * If you think that writing your own test runner if too much work then you can also
 * use the kunittestermodrunner application or the kunitguimodrunner script to run
 * the tests for you. You do have to put your tests in a dynamically loadable module though.
 * Fortunately KUnitTest comes with a few macros to help you do this.
 *
 * First the good news, you don't have to change the header file sampletest.h. However, we
 * will rename it to samplemodule.h, so we remember we are making a module. The
 * implementation file should be rename to samplemodule.cpp. This file requires some
 * modifications. First we need to include the module.h header:
 *
 * @code
 * #include <kunittest/module.h>
 * @endcode
 *
 * This header file is needed because it defines some macro you'll need. In fact this is
 * how you use them:
 *
 * @code
 * KUNITTEST_MODULE( kunittest_samplemodule, "Tests for sample module" );
 * KUNITTEST_MODULE_REGISTER_TESTER( SimpleSampleTester );
 * KUNITTEST_MODULE_REGISTER_TESTER( SomeSampleTester );
 * @endcode
 *
 * The first macro, KUNITTEST_MODULE(), makes sure that the module can be loaded and that
 * the test classes are created. The first argument "kunittest_samplemodule" is the library
 * name, in this case the library we're creating a kunittest_samplemodule.la module. The
 * second argument is name which will appear in the test runner for this test suite.
 *
 * The tester class are now added by the KUNITTEST_MODULE_REGISTER_TESTER() macro, not the
 * KUNITTEST_REGISTER_TESTER(). The only difference between the two is that you have to
 * pass the module class name to this macro.
 *
 * The Makefile.am is also a bit different, but not much:
 *
 * @code
 * INCLUDES = -I$(top_srcdir)/include $(all_includes)
 * METASOURCES = AUTO
 * check_LTLIBRARIES = kunittest_samplemodule.la
 * kunittest_samplemodule_la_SOURCES = samplemodule.cpp
 * kunittest_samplemodule_la_LIBADD = $(LIB_KUNITTEST)
 * kunittest_samplemodule_la_LDFLAGS = -module $(KDE_CHECK_PLUGIN) $(all_libraries)
 * @endcode
 *
 * The $(KDE_CHECK_PLUGIN) macro is there to make sure a dynamically loadable
 * module is created.
 *
 * After you have built the module you open a Konsole and cd into the build folder. Running
 * the tests in the module is now as easy as:
 *
 * @code
 * $ make check && kunittestmodrunner
 * @endcode
 *
 * The kunittestmodrunner application loads all kunittest_*.la modules in the current
 * directory. The exit code of this console application is the number of unexpected failures.
 *
 * If you want the GUI, you should use the kunittestmod script:
 *
 * @code
 * $ make check && kunittestmod
 * @endcode
 *
 * This script starts kunittestguimodrunner application and a helper script to take
 * care of dealing with debug output.
 *
 * @section advanced Advanced usage
 *
 * Normally you just want to use CHECK(). If you are developing some more
 * tests, and they are run (or not) based on some external dependency,
 * you may need to skip some tests. In this case, rather than doing
 * nothing (or worse, writing a test step that aborts the test run), you
 * might want to use SKIP() to record that. Note that this is just a
 * logging / reporting tool, so you just pass in a string:
 *
 * @code
 *     SKIP( "Test skipped because of lack of foo support." );
 * @endcode
 *
 * Similarly, you may have a test step that you know will fail, but you
 * don't want to delete the test step (because it is showing a bug), but
 * equally you can't fix it right now (eg it would break binary
 * compatibility, or would violate a string freeze). In that case, it
 * might help to use XFAIL(), for "expected failure". The test will still
 * be run, and recorded as a failure (assuming it does fail), but will
 * also be recorded separately. Usage might be as follows:
 *
 * @code
 *     XFAIL( 2+1, 4 );
 * @endcode
 *
 * You can mix CHECK(), SKIP() and XFAIL() within a single Tester derived
 * class.
 *
 *
 * @section exceptions Exceptions
 *
 * KUnitTest comes with simple support for testing whether an exception, such as a function call,
 * throws an exception or not. Simply, for the usual macros there corresponding ones for
 * exception testing: CHECK_EXCEPTION(), XFAIL_EXCEPTION(), and SKIP_EXCEPTION(). They all take two
 * arguments: the expression that will catch the exception, and the expression that is supposed
 * to throw the exception.
 *
 * For example:
 *
 * @code
 * CHECK_EXCEPTION(EvilToothFairyException *, myFunction("I forgot to brush my teeth!"));
 * @endcode
 *
 * @note The exception is not de-allocated in anyway.
 *
 * The macros does not allow introspection of the exceptions, such as testing a supplied
 * identifier code on the exception object or similar; this requires manual coding, such
 * as custom macros.
 *
 * @section scripts Scripts
 *
 * The library comes with several helper scripts:
 *
 * @li kunittest [app] [dcopobject] : Runs the application app and redirects all debug output to the dcopobject.
 * @li kunittestmod --folder [folder] --query [query] : Loads and runs all modules in the folder matching the query. Use a GUI.
 * @li kunittest_debughelper [dcopobject] : A PERL script that is able to redirect debug output to a RunnerGUI instance.
 *
 * These scripts are part of the kdesdk/kunittest module.
 */

/*!
 * @file tester.h
 * Defines macros for unit testing as well as some test classes.
 */

#include <iostream>
using namespace std;

#include <tqobject.h>
#include <tqstringlist.h>
#include <tqasciidict.h>

#include <kdelibs_export.h>

/*! @def CHECK(x,y)
 * Use this macro to perform an equality check. For example
 *
 * @code CHECK( numberOfErrors(), 0 ); @endcode
 */
#define CHECK( x, y ) check( __FILE__, __LINE__, #x, x, y, false )

/// for source-compat with qttestlib: use COMPARE(x,y) if you plan to port to qttestlib later.
#define COMPARE CHECK

/// for source-compat with qttestlib: use VERIFY(x) if you plan to port to qttestlib later.
#define VERIFY( x ) CHECK( x, true )

/*! @def XFAIL(x,y)
 * Use this macro to perform a check you expect to fail. For example
 *
 * @code XFAIL( numberOfErrors(), 1 ); @endcode
 *
 * If the test fails, it will be counted as such, however it will
 * also be registered separately.
 */
#define XFAIL( x, y ) check( __FILE__, __LINE__, #x, x, y, true )

/*! @def SKIP(x)
 * Use this macro to indicate that a test is skipped.
 *
 * @code SKIP("Test skipped because of lack of foo support."); @endcode
 */
#define SKIP( x ) skip( __FILE__, __LINE__, TQString::fromLatin1(#x))

/*!
 * A macro testing that @p expression throws an exception that is catched
 * with @p exceptionCatch. Use it to test that an expression, such as a function call,
 * throws a certain exception.
 * 
 * @note this macro assumes it's used in a function which is a sub-class of the Tester class.
 */
#define CHECK_EXCEPTION(exceptionCatch, expression) \
    try \
    { \
        expression; \
    } \
    catch(exceptionCatch) \
    { \
        setExceptionRaised(true); \
    } \
    if(exceptionRaised()) \
    { \
        success(TQString(__FILE__) + "[" + TQString::number(__LINE__) + "]: passed " + #expression); \
    } \
    else \
    { \
        failure(TQString(__FILE__) + "[" + TQString::number(__LINE__) + TQString("]: failed to throw " \
                "an exception on: ") + #expression); \
    } \
    setExceptionRaised(false);

/*!
 * This macro is similar to XFAIL, but is for exceptions instead. Flags @p expression
 * as being expected to fail to throw an exception that @p exceptionCatch is supposed to catch.
 */
#define XFAIL_EXCEPTION(exceptionCatch, expression) \
    try \
    { \
        expression; \
    } \
    catch(exceptionCatch) \
    { \
        setExceptionRaised(true); \
    } \
    if(exceptionRaised()) \
    { \
        unexpectedSuccess(TQString(__FILE__) + "[" + TQString::number(__LINE__) + "]: unexpectedly threw an exception and passed: " + #expression); \
    }\
    else \
    { \
        expectedFailure(TQString(__FILE__) + "[" + TQString::number(__LINE__) + TQString("]: failed to throw an exception on: ") + #expression); \
    } \
    setExceptionRaised(false);

/*!
 * This macro is similar to SKIP, but is for exceptions instead. Skip testing @p expression
 * and the @p exceptionCatch which is supposed to catch the exception, and register the test
 * as being skipped.
 */
#define SKIP_EXCEPTION(exceptionCatch, expression) \
	skip( __FILE__, __LINE__, TQString("Exception catch: ")\
			.arg(TQString(#exceptionCatch)).arg(TQString(" Test expression: ")).arg(TQString(#expression)))

/**
 * Namespace for Unit testing classes
 */
namespace KUnitTest
{
    /*! A simple class that encapsulates a test result. A Tester class usually
     * has a single TestResults instance associated with it, however the SlotTester
     * class can have more TestResults instances (one for each test slot in fact).
     */
    class KUNITTEST_EXPORT TestResults
    {
        friend class Tester;

    public:
        TestResults() : m_tests( 0 ) {}

        virtual ~TestResults() {}

        /*! Clears the test results and debug info. Normally you do not need to call this.
         */
        virtual void clear()
        {
            m_errorList.clear();
            m_xfailList.clear();
            m_xpassList.clear();
            m_skipList.clear();
            m_successList.clear();
            m_debug = "";
            m_tests = 0;
        }

        /*! Add some debug info that can be view later. Normally you do not need to call this.
         * @param debug The debug info.
         */
        virtual void addDebugInfo(const TQString &debug)
        {
            m_debug += debug;
        }

        /*! @returns The debug info that was added to this Tester object.
         */
        TQString debugInfo() const { return m_debug; }

        /*! @returns The number of finished tests. */
        int testsFinished() const { return m_tests; }

        /*! @returns The number of failed tests. */
        int errors() const { return m_errorList.count(); }

        /*! @returns The number of expected failures. */
        int xfails() const { return m_xfailList.count(); }

        /*! @returns The number of unexpected successes. */
        int xpasses() const { return m_xpassList.count(); }

        /*! @returns The number of skipped tests. */
        int skipped() const { return m_skipList.count(); }

        /*! @returns The number of passed tests. */
        int passed() const { return m_successList.count(); }

        /*! @returns Details about the failed tests. */
        TQStringList errorList() const { return m_errorList; }

        /*! @returns Details about tests that failed expectedly. */
        TQStringList xfailList() const { return m_xfailList; }

        /*! @returns Details about tests that succeeded unexpectedly. */
        TQStringList xpassList() const { return m_xpassList; }

        /*! @returns Details about which tests were skipped. */
        TQStringList skipList() const { return m_skipList; }

        /*! @returns Details about the succeeded tests. */
        TQStringList successList() const { return m_successList; }

    private:
        TQStringList m_errorList;
        TQStringList m_xfailList;
        TQStringList m_xpassList;
        TQStringList m_skipList;
        TQStringList m_successList;
        TQString     m_debug;
        int         m_tests;
    };

    typedef TQAsciiDict<TestResults> TestResultsListType;

    /*! A type that can be used to iterate through the registry. */
    typedef TQAsciiDictIterator<TestResults> TestResultsListIteratorType;

    /*! The abstract Tester class forms the base class for all test cases. Users must
     * implement the void Tester::allTests() method. This method contains the actual test.
     *
     * Use the CHECK(x,y), XFAIL(x,y) and SKIP(x) macros in the allTests() method
     * to perform the tests.
     *
     * @see CHECK, XFAIL, SKIP
     */
    class KUNITTEST_EXPORT Tester : public QObject
    {
    public:
        Tester(const char *name = 0L)
        : TQObject(0L, name), m_results(new TestResults()), m_exceptionState(false)
        {}

        virtual ~Tester() { delete m_results; }

    public:
        /*! Implement this method with the tests and checks you want to perform.
         */
        virtual void allTests() = 0;

    public:
        /*! @return The TestResults instance.
         */
        virtual TestResults *results() { return m_results; }

    protected:
        /*! This is called when the SKIP(x) macro is used.
         * @param file A C-string containing the name of the file where the skipped tests resides. Typically the __FILE__ macro is used to retrieve the filename.
         * @param line The linenumber in the file @p file. Use the __LINE__ macro for this.
         * @param msg The message that identifies the skipped test.
         */
        void skip( const char *file, int line, TQString msg )
        {
            TQString skipEntry;
            TQTextStream ts( &skipEntry, IO_WriteOnly );
            ts << file << "["<< line <<"]: " << msg;
            skipTest( skipEntry );
        }

        /*! This is called when the CHECK or XFAIL macro is used.
         * @param file A C-string containing the name of the file where the skipped tests resides. Typically the __FILE__ macro is used to retrieve the filename.
         * @param line The linenumber in the file @p file. Use the __LINE__ macro for this.
         * @param str The message that identifies the skipped test.
         * @param result The result of the test.
         * @param expectedResult The expected result.
         * @param expectedFail Indicates whether or not a failure is expected.
         */
        template<typename T>
        void check( const char *file, int line, const char *str,
                    const T  &result, const T &expectedResult,
                    bool expectedFail )
        {
            cout << "check: " << file << "["<< line <<"]" << endl;

            if ( result != expectedResult )
            {
                TQString error;
                TQTextStream ts( &error, IO_WriteOnly );
                ts << file << "["<< line <<"]: failed on \"" <<  str
                   <<"\" result = '" << result << "' expected = '" << expectedResult << "'";

                if ( expectedFail )
                    expectedFailure( error );
                else
                    failure( error );

            }
            else
            {
                // then the test passed, but we want to record it if
                // we were expecting a failure
                if (expectedFail)
                {
                    TQString err;
                    TQTextStream ts( &err, IO_WriteOnly );
                    ts << file << "["<< line <<"]: "
                       <<" unexpectedly passed on \""
                       <<  str <<"\"";
                    unexpectedSuccess( err );
                }
                else
                {
                    TQString succ;
                    TQTextStream ts( &succ, IO_WriteOnly );
                    ts << file << "["<< line <<"]: "
                       <<" passed \""
                       <<  str <<"\"";
                    success( succ );
                }
            }

            ++m_results->m_tests;
        }

    /*!
     * This function can be used to flag succeeding tests, when 
     * doing customized tests while not using the check function.
     *
     * @param message the message describing what failed. Should be informative, 
     * such as mentioning the expression that failed and where, the file and file number.
     */
    void success(const TQString &message) { m_results->m_successList.append(message); }

    /*!
     * This function can be used to flag failing tests, when 
     * doing customized tests while not using the check function.
     *
     * @param message the message describing what failed. Should be informative, 
     * such as mentioning the expression that failed and where, the file name and file number.
     */
    void failure(const TQString &message) { m_results->m_errorList.append(message); }

    /*!
     * This function can be used to flag expected failures, when 
     * doing customized tests while not using the check function.
     *
     * @param message the message describing what failed. Should be informative, 
     * such as mentioning the expression that failed and where, the file name and file number.
     */
    void expectedFailure(const TQString &message) { m_results->m_xfailList.append(message); }

    /*!
     * This function can be used to flag unexpected successes, when 
     * doing customized tests while not using the check function.
     *
     * @param message the message describing what failed. Should be informative, 
     * such as mentioning the expression that failed and where, the file name and file number.
     */
    void unexpectedSuccess(const TQString &message) { m_results->m_xpassList.append(message); }

    /*!
     * This function can be used to flag a test as skipped, when 
     * doing customized tests while not using the check function.
     *
     * @param message the message describing what failed. Should be informative, 
     * such as mentioning the expression that failed and where, the file name and file number.
     */
    void skipTest(const TQString &message) { m_results->m_skipList.append(message); }

    /*!
     * exceptionRaised and exceptionState are book-keeping functions for testing for
     * exceptions. setExceptionRaised sets an internal boolean to true.
     *
     * @see exceptionRaised
     * @param state the new 
     */
    void setExceptionRaised(bool state) { m_exceptionState = state; }

    /*!
     * Returns what the currently tested exception state.
     *
     * @see setExceptionRaised
     */
    bool exceptionRaised() const
    {
	return m_exceptionState;
    }

    protected:
        TestResults *m_results;

    private:

	bool m_exceptionState;
    };

    /*! The SlotTester class is a special Tester class, one that will
     * execute all slots that start with the string "test". The method
     * void allTests() is implemented and should not be overriden.
     */
    class KUNITTEST_EXPORT SlotTester : public Tester
    {
        Q_OBJECT

    public:
        SlotTester(const char *name = 0L);

        void allTests();

        TestResults *results(const char *sl);

        TestResultsListType &resultsList() { return m_resultsList; }

    signals:
        void invoke();

    private:
        void invokeMember(const TQString &str);

        TestResultsListType  m_resultsList;
        TestResults         *m_total;
    };
}

KUNITTEST_EXPORT TQTextStream& operator<<( TQTextStream& str, const TQRect& r );

KUNITTEST_EXPORT TQTextStream& operator<<( TQTextStream& str, const TQPoint& r );

KUNITTEST_EXPORT TQTextStream& operator<<( TQTextStream& str, const TQSize& r );

#endif