check.texi 84 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329
  1. \input texinfo @c -*-texinfo-*-
  2. @c %**start of header
  3. @setfilename check.info
  4. @include version.texi
  5. @settitle Check @value{VERSION}
  6. @syncodeindex fn cp
  7. @syncodeindex tp cp
  8. @syncodeindex vr cp
  9. @c %**end of header
  10. @copying
  11. This manual is for Check
  12. (version @value{VERSION}, @value{UPDATED}),
  13. a unit testing framework for C.
  14. Copyright @copyright{} 2001--2014 Arien Malec, Branden Archer, Chris Pickett,
  15. Fredrik Hugosson, and Robert Lemmen.
  16. @quotation
  17. Permission is granted to copy, distribute and/or modify this document
  18. under the terms of the @acronym{GNU} Free Documentation License,
  19. Version 1.2 or any later version published by the Free Software
  20. Foundation; with no Invariant Sections, no Front-Cover texts, and no
  21. Back-Cover Texts. A copy of the license is included in the section
  22. entitled ``@acronym{GNU} Free Documentation License.''
  23. @end quotation
  24. @end copying
  25. @dircategory Software development
  26. @direntry
  27. * Check: (check). A unit testing framework for C.
  28. @end direntry
  29. @titlepage
  30. @title Check
  31. @subtitle A Unit Testing Framework for C
  32. @subtitle for version @value{VERSION}, @value{UPDATED}
  33. @author Arien Malec
  34. @author Branden Archer
  35. @author Chris Pickett
  36. @author Fredrik Hugosson
  37. @author Robert Lemmen
  38. @author Robert Collins
  39. @c The following two commands start the copyright page.
  40. @page
  41. @vskip 0pt plus 1filll
  42. @insertcopying
  43. @end titlepage
  44. @c Output the table of contents at the beginning.
  45. @contents
  46. @ifnottex
  47. @node Top, Introduction, (dir), (dir)
  48. @top Check
  49. @insertcopying
  50. Please send corrections to this manual to
  51. @email{check-devel AT lists.sourceforge.net}. We'd prefer it if you can
  52. send a unified diff (@command{diff -u}) against the
  53. @file{doc/check.texi} file that ships with Check, but if that is not
  54. possible something is better than nothing.
  55. @end ifnottex
  56. @menu
  57. * Introduction::
  58. * Unit Testing in C::
  59. * Tutorial::
  60. * Advanced Features::
  61. * Supported Build Systems::
  62. * Conclusion and References::
  63. * Environment Variable Reference::
  64. * Copying This Manual::
  65. * Index::
  66. @detailmenu
  67. --- The Detailed Node Listing ---
  68. Unit Testing in C
  69. * Other Frameworks for C::
  70. Tutorial: Basic Unit Testing
  71. * How to Write a Test::
  72. * Setting Up the Money Build Using Autotools::
  73. * Setting Up the Money Build Using CMake::
  74. * Test a Little::
  75. * Creating a Suite::
  76. * SRunner Output::
  77. Advanced Features
  78. * Convenience Test Functions::
  79. * Running Multiple Cases::
  80. * No Fork Mode::
  81. * Test Fixtures::
  82. * Multiple Suites in one SRunner::
  83. * Selective Running of Tests::
  84. * Testing Signal Handling and Exit Values::
  85. * Looping Tests::
  86. * Test Timeouts::
  87. * Determining Test Coverage::
  88. * Finding Memory Leaks::
  89. * Test Logging::
  90. * Subunit Support::
  91. Test Fixtures
  92. * Test Fixture Examples::
  93. * Checked vs Unchecked Fixtures::
  94. Test Logging
  95. * XML Logging::
  96. * TAP Logging::
  97. Environment Variable Reference
  98. Copying This Manual
  99. * GNU Free Documentation License:: License for copying this manual.
  100. @end detailmenu
  101. @end menu
  102. @node Introduction, Unit Testing in C, Top, Top
  103. @chapter Introduction
  104. @cindex introduction
  105. Check is a unit testing framework for C. It was inspired by similar
  106. frameworks that currently exist for most programming languages; the
  107. most famous example being @uref{http://www.junit.org, JUnit} for Java.
  108. There is a list of unit test frameworks for multiple languages at
  109. @uref{http://www.xprogramming.com/software.htm}. Unit testing has a
  110. long history as part of formal quality assurance methodologies, but
  111. has recently been associated with the lightweight methodology called
  112. Extreme Programming. In that methodology, the characteristic practice
  113. involves interspersing unit test writing with coding (``test a
  114. little, code a little''). While the incremental unit test/code
  115. approach is indispensable to Extreme Programming, it is also
  116. applicable, and perhaps indispensable, outside of that methodology.
  117. The incremental test/code approach provides three main benefits to the
  118. developer:
  119. @enumerate
  120. @item
  121. Because the unit tests use the interface to the unit being tested,
  122. they allow the developer to think about how the interface should be
  123. designed for usage early in the coding process.
  124. @item
  125. They help the developer think early about aberrant cases, and code
  126. accordingly.
  127. @item
  128. By providing a documented level of correctness, they allow the
  129. developer to refactor (see @uref{http://www.refactoring.com})
  130. aggressively.
  131. @end enumerate
  132. That third reason is the one that turns people into unit testing
  133. addicts. There is nothing so satisfying as doing a wholesale
  134. replacement of an implementation, and having the unit tests reassure
  135. you at each step of that change that all is well. It is like the
  136. difference between exploring the wilderness with and without a good
  137. map and compass: without the proper gear, you are more likely to
  138. proceed cautiously and stick to the marked trails; with it, you can
  139. take the most direct path to where you want to go.
  140. Look at the Check homepage for the latest information on Check:
  141. @uref{https://libcheck.github.io/check/}.
  142. The Check project page is at:
  143. @uref{https://github.com/libcheck/check}.
  144. @node Unit Testing in C, Tutorial, Introduction, Top
  145. @chapter Unit Testing in C
  146. @ C unit testing
  147. The approach to unit testing frameworks used for Check originated with
  148. Smalltalk, which is a late binding object-oriented language supporting
  149. reflection. Writing a framework for C requires solving some special
  150. problems that frameworks for Smalltalk, Java or Python don't have to
  151. face. In all of those language, the worst that a unit test can do is
  152. fail miserably, throwing an exception of some sort. In C, a unit test
  153. is just as likely to trash its address space as it is to fail to meet
  154. its test requirements, and if the test framework sits in the same
  155. address space, goodbye test framework.
  156. To solve this problem, Check uses the @code{fork()} system call to
  157. create a new address space in which to run each unit test, and then
  158. uses message queues to send information on the testing process back to
  159. the test framework. That way, your unit test can do all sorts of
  160. nasty things with pointers, and throw a segmentation fault, and the
  161. test framework will happily note a unit test error, and chug along.
  162. The Check framework is also designed to play happily with common
  163. development environments for C programming. The author designed Check
  164. around Autoconf/Automake (thus the name Check: @command{make check} is
  165. the idiom used for testing with Autoconf/Automake). Note however that
  166. Autoconf/Automake are NOT necessary to use Check; any build system
  167. is sufficient. The test failure messages thrown up by Check use the
  168. common idiom of @samp{filename:linenumber:message} used by @command{gcc}
  169. and family to report problems in source code. With (X)Emacs, the output
  170. of Check allows one to quickly navigate to the location of the unit test
  171. that failed; presumably that also works in VI and IDEs.
  172. @menu
  173. * Other Frameworks for C::
  174. @end menu
  175. @node Other Frameworks for C, , Unit Testing in C, Unit Testing in C
  176. @section Other Frameworks for C
  177. @cindex other frameworks
  178. @cindex frameworks
  179. The authors know of the following additional unit testing frameworks
  180. for C:
  181. @table @asis
  182. @item AceUnit
  183. AceUnit (Advanced C and Embedded Unit) bills itself as a comfortable C
  184. code unit test framework. It tries to mimic JUnit 4.x and includes
  185. reflection-like capabilities. AceUnit can be used in resource
  186. constraint environments, e.g. embedded software development, and
  187. importantly it runs fine in environments where you cannot include a
  188. single standard header file and cannot invoke a single standard C
  189. function from the ANSI / ISO C libraries. It also has a Windows port.
  190. It does not use forks to trap signals, although the authors have
  191. expressed interest in adding such a feature. See the
  192. @uref{http://aceunit.sourceforge.net/, AceUnit homepage}.
  193. @item GNU Autounit
  194. Much along the same lines as Check, including forking to run unit tests
  195. in a separate address space (in fact, the original author of Check
  196. borrowed the idea from @acronym{GNU} Autounit). @acronym{GNU} Autounit
  197. uses GLib extensively, which means that linking and such need special
  198. options, but this may not be a big problem to you, especially if you are
  199. already using GTK or GLib. See the @uref{http://autounit.tigris.org/,
  200. GNU Autounit homepage}.
  201. @item cUnit
  202. Also uses GLib, but does not fork to protect the address space of unit
  203. tests. See the
  204. @uref{http://web.archive.org/web/*/http://people.codefactory.se/~spotty/cunit/,
  205. archived cUnit homepage}.
  206. @item CUnit
  207. Standard C, with plans for a Win32 GUI implementation. Does not
  208. currently fork or otherwise protect the address space of unit tests.
  209. In early development. See the @uref{http://cunit.sourceforge.net,
  210. CUnit homepage}.
  211. @item CuTest
  212. A simple framework with just one .c and one .h file that you drop into
  213. your source tree. See the @uref{http://cutest.sourceforge.net, CuTest
  214. homepage}.
  215. @item CppUnit
  216. The premier unit testing framework for C++; you can also use it to test C
  217. code. It is stable, actively developed, and has a GUI interface. The
  218. primary reasons not to use CppUnit for C are first that it is quite
  219. big, and second you have to write your tests in C++, which means you
  220. need a C++ compiler. If these don't sound like concerns, it is
  221. definitely worth considering, along with other C++ unit testing
  222. frameworks. See the
  223. @uref{http://cppunit.sourceforge.net/cppunit-wiki, CppUnit homepage}.
  224. @item embUnit
  225. embUnit (Embedded Unit) is another unit test framework for embedded
  226. systems. This one appears to be superseded by AceUnit.
  227. @uref{https://sourceforge.net/projects/embunit/, Embedded Unit
  228. homepage}.
  229. @item MinUnit
  230. A minimal set of macros and that's it! The point is to
  231. show how easy it is to unit test your code. See the
  232. @uref{http://www.jera.com/techinfo/jtns/jtn002.html, MinUnit
  233. homepage}.
  234. @item CUnit for Mr. Ando
  235. A CUnit implementation that is fairly new, and apparently still in
  236. early development. See the
  237. @uref{http://park.ruru.ne.jp/ando/work/CUnitForAndo/html/, CUnit for
  238. Mr. Ando homepage}.
  239. @end table
  240. This list was last updated in March 2008. If you know of other C unit
  241. test frameworks, please send an email plus description to
  242. @email{check-devel AT lists.sourceforge.net} and we will add the entry
  243. to this list.
  244. It is the authors' considered opinion that forking or otherwise
  245. trapping and reporting signals is indispensable for unit testing (but
  246. it probably wouldn't be hard to add that to frameworks without that
  247. feature). Try 'em all out: adapt this tutorial to use all of the
  248. frameworks above, and use whichever you like. Contribute, spread the
  249. word, and make one a standard. Languages such as Java and Python are
  250. fortunate to have standard unit testing frameworks; it would be desirable
  251. that C have one as well.
  252. @node Tutorial, Advanced Features, Unit Testing in C, Top
  253. @chapter Tutorial: Basic Unit Testing
  254. This tutorial will use the JUnit
  255. @uref{http://junit.sourceforge.net/doc/testinfected/testing.htm, Test
  256. Infected} article as a starting point. We will be creating a library
  257. to represent money, @code{libmoney}, that allows conversions between
  258. different currency types. The development style will be ``test a
  259. little, code a little'', with unit test writing preceding coding.
  260. This constantly gives us insights into module usage, and also makes
  261. sure we are constantly thinking about how to test our code.
  262. @menu
  263. * How to Write a Test::
  264. * Setting Up the Money Build Using Autotools::
  265. * Setting Up the Money Build Using CMake::
  266. * Test a Little::
  267. * Creating a Suite::
  268. * SRunner Output::
  269. @end menu
  270. @node How to Write a Test, Setting Up the Money Build Using Autotools, Tutorial, Tutorial
  271. @section How to Write a Test
  272. Test writing using Check is very simple. The file in which the checks
  273. are defined must include @file{check.h} as so:
  274. @example
  275. @verbatim
  276. #include <check.h>
  277. @end verbatim
  278. @end example
  279. The basic unit test looks as follows:
  280. @example
  281. @verbatim
  282. START_TEST (test_name)
  283. {
  284. /* unit test code */
  285. }
  286. END_TEST
  287. @end verbatim
  288. @end example
  289. The @code{START_TEST}/@code{END_TEST} pair are macros that setup basic
  290. structures to permit testing. It is a mistake to leave off the
  291. @code{END_TEST} marker; doing so produces all sorts of strange errors
  292. when the check is compiled.
  293. @node Setting Up the Money Build Using Autotools, Setting Up the Money Build Using CMake, How to Write a Test, Tutorial
  294. @section Setting Up the Money Build Using Autotools
  295. Since we are creating a library to handle money, we will first create
  296. an interface in @file{money.h}, an implementation in @file{money.c},
  297. and a place to store our unit tests, @file{check_money.c}. We want to
  298. integrate these core files into our build system, and will need some
  299. additional structure. To manage everything we'll use Autoconf,
  300. Automake, and friends (collectively known as Autotools) for this
  301. example. Note that one could do something similar with ordinary
  302. Makefiles, or any other build system. It is in the authors' opinion that
  303. it is generally easier to use Autotools than bare Makefiles, and they
  304. provide built-in support for running tests.
  305. Note that this is not the place to explain how Autotools works. If
  306. you need help understanding what's going on beyond the explanations
  307. here, the best place to start is probably Alexandre Duret-Lutz's
  308. excellent
  309. @uref{http://www.lrde.epita.fr/~adl/autotools.html,
  310. Autotools tutorial}.
  311. The examples in this section are part of the Check distribution; you
  312. don't need to spend time cutting and pasting or (worse) retyping them.
  313. Locate the Check documentation on your system and look in the
  314. @samp{example} directory. The standard directory for GNU/Linux
  315. distributions should be @samp{/usr/share/doc/check/example}. This
  316. directory contains the final version reached the end of the tutorial. If
  317. you want to follow along, create backups of @file{money.h},
  318. @file{money.c}, and @file{check_money.c}, and then delete the originals.
  319. We set up a directory structure as follows:
  320. @example
  321. @verbatim
  322. .
  323. |-- Makefile.am
  324. |-- README
  325. |-- configure.ac
  326. |-- src
  327. | |-- Makefile.am
  328. | |-- main.c
  329. | |-- money.c
  330. | `-- money.h
  331. `-- tests
  332. |-- Makefile.am
  333. `-- check_money.c
  334. @end verbatim
  335. @end example
  336. Note that this is the output of @command{tree}, a great directory
  337. visualization tool. The top-level @file{Makefile.am} is simple; it
  338. merely tells Automake how to process sub-directories:
  339. @example
  340. @verbatim
  341. SUBDIRS = src . tests
  342. @end verbatim
  343. @end example
  344. Note that @code{tests} comes last, because the code should be testing
  345. an already compiled library. @file{configure.ac} is standard Autoconf
  346. boilerplate, as specified by the Autotools tutorial and as suggested
  347. by @command{autoscan}.
  348. @file{src/Makefile.am} builds @samp{libmoney} as a Libtool archive,
  349. and links it to an application simply called @command{main}. The
  350. application's behavior is not important to this tutorial; what's
  351. important is that none of the functions we want to unit test appear in
  352. @file{main.c}; this probably means that the only function in
  353. @file{main.c} should be @code{main()} itself. In order to test the
  354. whole application, unit testing is not appropriate: you should use a
  355. system testing tool like Autotest. If you really want to test
  356. @code{main()} using Check, rename it to something like
  357. @code{_myproject_main()} and write a wrapper around it.
  358. The primary build instructions for our unit tests are in
  359. @file{tests/Makefile.am}:
  360. @cartouche
  361. @example
  362. @verbatiminclude example/tests/Makefile.am
  363. @end example
  364. @end cartouche
  365. @code{TESTS} tells Automake which test programs to run for
  366. @command{make check}. Similarly, the @code{check_} prefix in
  367. @code{check_PROGRAMS} actually comes from Automake; it says to build
  368. these programs only when @command{make check} is run. (Recall that
  369. Automake's @code{check} target is the origin of Check's name.) The
  370. @command{check_money} test is a program that we will build from
  371. @file{tests/check_money.c}, linking it against both
  372. @file{src/libmoney.la} and the installed @file{libcheck.la} on our
  373. system. The appropriate compiler and linker flags for using Check are
  374. found in @code{@@CHECK_CFLAGS@@} and @code{@@CHECK_LIBS@@}, values
  375. defined by the @code{AM_PATH_CHECK} macro.
  376. Now that all this infrastructure is out of the way, we can get on with
  377. development. @file{src/money.h} should only contain standard C header
  378. boilerplate:
  379. @cartouche
  380. @example
  381. @verbatiminclude example/src/money.1.h
  382. @end example
  383. @end cartouche
  384. @file{src/money.c} should be empty, and @file{tests/check_money.c}
  385. should only contain an empty @code{main()} function:
  386. @cartouche
  387. @example
  388. @verbatiminclude example/tests/check_money.1.c
  389. @end example
  390. @end cartouche
  391. Create the GNU Build System for the project and then build @file{main}
  392. and @file{libmoney.la} as follows:
  393. @example
  394. @verbatim
  395. $ autoreconf --install
  396. $ ./configure
  397. $ make
  398. @end verbatim
  399. @end example
  400. (@command{autoreconf} determines which commands are needed in order
  401. for @command{configure} to be created or brought up to date.
  402. Previously one would use a script called @command{autogen.sh} or
  403. @command{bootstrap}, but that practice is unnecessary now.)
  404. Now build and run the @command{check_money} test with @command{make
  405. check}. If all goes well, @command{make} should report that our tests
  406. passed. No surprise, because there aren't any tests to fail. If you
  407. have problems, make sure to see @ref{Supported Build Systems}.
  408. This was tested on the isadora distribution of Linux Mint
  409. GNU/Linux in November 2012, using Autoconf 2.65, Automake 1.11.1,
  410. and Libtool 2.2.6b. Please report any problems to
  411. @email{check-devel AT lists.sourceforge.net}.
  412. @node Setting Up the Money Build Using CMake, Test a Little, Setting Up the Money Build Using Autotools, Tutorial
  413. @section Setting Up the Money Build Using CMake
  414. Since we are creating a library to handle money, we will first create
  415. an interface in @file{money.h}, an implementation in @file{money.c},
  416. and a place to store our unit tests, @file{check_money.c}. We want to
  417. integrate these core files into our build system, and will need some
  418. additional structure. To manage everything we'll use CMake for this
  419. example. Note that one could do something similar with ordinary
  420. Makefiles, or any other build system. It is in the authors' opinion that
  421. it is generally easier to use CMake than bare Makefiles, and they
  422. provide built-in support for running tests.
  423. Note that this is not the place to explain how CMake works. If
  424. you need help understanding what's going on beyond the explanations
  425. here, the best place to start is probably the @uref{http://www.cmake.org,
  426. CMake project's homepage}.
  427. The examples in this section are part of the Check distribution; you
  428. don't need to spend time cutting and pasting or (worse) retyping them.
  429. Locate the Check documentation on your system and look in the
  430. @samp{example} directory, or look in the Check source. If on a GNU/Linux
  431. system the standard directory should be @samp{/usr/share/doc/check/example}.
  432. This directory contains the final version reached the end of the tutorial. If
  433. you want to follow along, create backups of @file{money.h},
  434. @file{money.c}, and @file{check_money.c}, and then delete the originals.
  435. We set up a directory structure as follows:
  436. @example
  437. @verbatim
  438. .
  439. |-- Makefile.am
  440. |-- README
  441. |-- CMakeLists.txt
  442. |-- cmake
  443. | |-- config.h.in
  444. | |-- FindCheck.cmake
  445. |-- src
  446. | |-- CMakeLists.txt
  447. | |-- main.c
  448. | |-- money.c
  449. | `-- money.h
  450. `-- tests
  451. |-- CMakeLists.txt
  452. `-- check_money.c
  453. @end verbatim
  454. @end example
  455. The top-level @file{CMakeLists.txt} contains the configuration checks
  456. for available libraries and types, and also defines sub-directories
  457. to process. The @file{cmake/FindCheck.cmake} file contains instructions
  458. for locating Check on the system and setting up the build to use it.
  459. If the system does not have pkg-config installed, @file{cmake/FindCheck.cmake}
  460. may not be able to locate Check successfully. In this case, the install
  461. directory of Check must be located manually, and the following line
  462. added to @file{tests/CMakeLists.txt} (assuming Check was installed under
  463. C:\\Program Files\\check:
  464. @verbatim
  465. set(CHECK_INSTALL_DIR "C:/Program Files/check")
  466. @end verbatim
  467. Note that @code{tests} comes last, because the code should be testing
  468. an already compiled library.
  469. @file{src/CMakeLists.txt} builds @samp{libmoney} as an archive,
  470. and links it to an application simply called @command{main}. The
  471. application's behavior is not important to this tutorial; what's
  472. important is that none of the functions we want to unit test appear in
  473. @file{main.c}; this probably means that the only function in
  474. @file{main.c} should be @code{main()} itself. In order to test the
  475. whole application, unit testing is not appropriate: you should use a
  476. system testing tool like Autotest. If you really want to test
  477. @code{main()} using Check, rename it to something like
  478. @code{_myproject_main()} and write a wrapper around it.
  479. Now that all this infrastructure is out of the way, we can get on with
  480. development. @file{src/money.h} should only contain standard C header
  481. boilerplate:
  482. @cartouche
  483. @example
  484. @verbatiminclude example/src/money.1.h
  485. @end example
  486. @end cartouche
  487. @file{src/money.c} should be empty, and @file{tests/check_money.c}
  488. should only contain an empty @code{main()} function:
  489. @cartouche
  490. @example
  491. @verbatiminclude example/tests/check_money.1.c
  492. @end example
  493. @end cartouche
  494. Create the CMake Build System for the project and then build @file{main}
  495. and @file{libmoney.la} as follows for Unix-compatible systems:
  496. @example
  497. @verbatim
  498. $ cmake .
  499. $ make
  500. @end verbatim
  501. @end example
  502. and for MSVC on Windows:
  503. @example
  504. @verbatim
  505. $ cmake -G "NMake Makefiles" .
  506. $ nmake
  507. @end verbatim
  508. @end example
  509. Now build and run the @command{check_money} test, with either @command{make
  510. test} on a Unix-compatible system or @command{nmake test} if on Windows using MSVC.
  511. If all goes well, the command should report that our tests
  512. passed. No surprise, because there aren't any tests to fail.
  513. This was tested on Windows 7 using CMake 2.8.12.1 and MSVC 16.00.30319.01/
  514. Visual Studios 10 in February 2014. Please report any problems to
  515. @email{check-devel AT lists.sourceforge.net}.
  516. @node Test a Little, Creating a Suite, Setting Up the Money Build Using CMake, Tutorial
  517. @section Test a Little, Code a Little
  518. The @uref{http://junit.sourceforge.net/doc/testinfected/testing.htm,
  519. Test Infected} article starts out with a @code{Money} class, and so
  520. will we. Of course, we can't do classes with C, but we don't really
  521. need to. The Test Infected approach to writing code says that we
  522. should write the unit test @emph{before} we write the code, and in
  523. this case, we will be even more dogmatic and doctrinaire than the
  524. authors of Test Infected (who clearly don't really get this stuff,
  525. only being some of the originators of the Patterns approach to
  526. software development and OO design).
  527. Here are the changes to @file{check_money.c} for our first unit test:
  528. @cartouche
  529. @example
  530. @verbatiminclude check_money.1-2.c.diff
  531. @end example
  532. @end cartouche
  533. @findex ck_assert_int_eq
  534. @findex ck_assert_str_eq
  535. A unit test should just chug along and complete. If it exits early,
  536. or is signaled, it will fail with a generic error message. (Note: it
  537. is conceivable that you expect an early exit, or a signal and there is
  538. functionality in Check to specifically assert that we should expect a
  539. signal or an early exit.) If we want to get some information
  540. about what failed, we need to use some calls that will point out a failure.
  541. Two such calls are @code{ck_assert_int_eq} (used to determine if two integers
  542. are equal) and @code{ck_assert_str_eq} (used to determine if two null terminated
  543. strings are equal). Both of these functions (actually macros) will signal an error
  544. if their arguments are not equal.
  545. @findex ck_assert
  546. An alternative to using @code{ck_assert_int_eq} and @code{ck_assert_str_eq}
  547. is to write the expression under test directly using @code{ck_assert}.
  548. This takes one Boolean argument which must be True for the check to pass.
  549. The second test could be rewritten as follows:
  550. @example
  551. @verbatim
  552. ck_assert(strcmp (money_currency (m), "USD") == 0);
  553. @end verbatim
  554. @end example
  555. @findex ck_assert_msg
  556. @code{ck_assert} will find and report failures, but will not print any
  557. user supplied message in the unit test result. To print a user defined
  558. message along with any failures found, use @code{ck_assert_msg}. The first
  559. argument is a Boolean argument. The remaining arguments support @code{varargs}
  560. and accept @code{printf}-style format strings and arguments. This is especially
  561. useful while debugging. For example, the second test could be rewritten as:
  562. @example
  563. @verbatim
  564. ck_assert_msg(strcmp (money_currency (m), "USD") == 0,
  565. "Was expecting a currency of USD, but found %s", money_currency (m));
  566. @end verbatim
  567. @end example
  568. @findex ck_abort
  569. @findex ck_abort_msg
  570. If the Boolean argument is too complicated to elegantly express within
  571. @code{ck_assert()}, there are the alternate functions @code{ck_abort()}
  572. and @code{ck_abort_msg()} that unconditionally fail. The second test inside
  573. @code{test_money_create} above could be rewritten as follows:
  574. @example
  575. @verbatim
  576. if (strcmp (money_currency (m), "USD") != 0)
  577. {
  578. ck_abort_msg ("Currency not set correctly on creation");
  579. }
  580. @end verbatim
  581. @end example
  582. For your convenience ck_assert, which does not accept a user supplied message,
  583. substitutes a suitable message for you. (This is also equivalent to
  584. passing a NULL message to ck_assert_msg). So you could also
  585. write a test as follows:
  586. @example
  587. @verbatim
  588. ck_assert (money_amount (m) == 5);
  589. @end verbatim
  590. @end example
  591. This is equivalent to:
  592. @example
  593. @verbatim
  594. ck_assert_msg (money_amount (m) == 5, NULL);
  595. @end verbatim
  596. @end example
  597. which will print the file, line number, and the message
  598. @code{"Assertion 'money_amount (m) == 5' failed"} if
  599. @code{money_amount (m) != 5}.
  600. When we try to compile and run the test suite now using @command{make
  601. check}, we get a whole host of compilation errors. It may seem a bit
  602. strange to deliberately write code that won't compile, but notice what
  603. we are doing: in creating the unit test, we are also defining
  604. requirements for the money interface. Compilation errors are, in a
  605. way, unit test failures of their own, telling us that the
  606. implementation does not match the specification. If all we do is edit
  607. the sources so that the unit test compiles, we are actually making
  608. progress, guided by the unit tests, so that's what we will now do.
  609. We will patch our header @file{money.h} as follows:
  610. @cartouche
  611. @example
  612. @verbatiminclude money.1-2.h.diff
  613. @end example
  614. @end cartouche
  615. Our code compiles now, and again passes all of the tests. However,
  616. once we try to @emph{use} the functions in @code{libmoney} in the
  617. @code{main()} of @code{check_money}, we'll run into more problems, as
  618. they haven't actually been implemented yet.
  619. @node Creating a Suite, SRunner Output, Test a Little, Tutorial
  620. @section Creating a Suite
  621. To run unit tests with Check, we must create some test cases,
  622. aggregate them into a suite, and run them with a suite runner. That's
  623. a bit of overhead, but it is mostly one-off. Here's a diff for the
  624. new version of @file{check_money.c}. Note that we include stdlib.h to
  625. get the definitions of @code{EXIT_SUCCESS} and @code{EXIT_FAILURE}.
  626. @cartouche
  627. @example
  628. @verbatiminclude check_money.2-3.c.diff
  629. @end example
  630. @end cartouche
  631. Most of the @code{money_suite()} code should be self-explanatory. We are
  632. creating a suite, creating a test case, adding the test case to the
  633. suite, and adding the unit test we created above to the test case.
  634. Why separate this off into a separate function, rather than inline it
  635. in @code{main()}? Because any new tests will get added in
  636. @code{money_suite()}, but nothing will need to change in @code{main()}
  637. for the rest of this example, so main will stay relatively clean and
  638. simple.
  639. Unit tests are internally defined as static functions. This means
  640. that the code to add unit tests to test cases must be in the same
  641. compilation unit as the unit tests themselves. This provides another
  642. reason to put the creation of the test suite in a separate function:
  643. you may later want to keep one source file per suite; defining a
  644. uniquely named suite creation function allows you later to define a
  645. header file giving prototypes for all the suite creation functions,
  646. and encapsulate the details of where and how unit tests are defined
  647. behind those functions. See the test program defined for Check itself
  648. for an example of this strategy.
  649. The code in @code{main()} bears some explanation. We are creating a
  650. suite runner object of type @code{SRunner} from the @code{Suite} we
  651. created in @code{money_suite()}. We then run the suite, using the
  652. @code{CK_NORMAL} flag to specify that we should print a summary of the
  653. run, and list any failures that may have occurred. We capture the
  654. number of failures that occurred during the run, and use that to
  655. decide how to return. The @code{check} target created by Automake
  656. uses the return value to decide whether the tests passed or failed.
  657. Now that the tests are actually being run by @command{check_money}, we
  658. encounter linker errors again we try out @code{make check}. Try it
  659. for yourself and see. The reason is that the @file{money.c}
  660. implementation of the @file{money.h} interface hasn't been created
  661. yet. Let's go with the fastest solution possible and implement stubs
  662. for each of the functions in @code{money.c}. Here is the diff:
  663. @cartouche
  664. @example
  665. @verbatiminclude money.1-3.c.diff
  666. @end example
  667. @end cartouche
  668. Note that we @code{#include <stdlib.h>} to get the definition of
  669. @code{NULL}. Now, the code compiles and links when we run @code{make
  670. check}, but our unit test fails. Still, this is progress, and we can
  671. focus on making the test pass.
  672. @node SRunner Output, , Creating a Suite, Tutorial
  673. @section SRunner Output
  674. @findex srunner_run_all
  675. @findex srunner_run
  676. The functions to run tests in an @code{SRunner} are defined as follows:
  677. @example
  678. @verbatim
  679. void srunner_run_all (SRunner * sr, enum print_output print_mode);
  680. void srunner_run (SRunner *sr, const char *sname, const char *tcname,
  681. enum print_output print_mode);
  682. @end verbatim
  683. @end example
  684. Those functions do two things:
  685. @enumerate
  686. @item
  687. They run all of the unit tests for the selected test cases defined for
  688. the selected suites in the SRunner, and collect the results in the
  689. SRunner. The determination of the selected test cases and suites
  690. depends on the specific function used.
  691. @code{srunner_run_all} will run all the defined test cases of all
  692. defined suites except if the environment variables @code{CK_RUN_CASE}
  693. or @code{CK_RUN_SUITE} are defined. If defined, those variables shall
  694. contain the name of a test suite or a test case, defining in that way
  695. the selected suite/test case.
  696. @code{srunner_run} will run the suite/case selected by the
  697. @code{sname} and @code{tcname} parameters. A value of @code{NULL}
  698. in some of those parameters means ``any suite/case''.
  699. @item
  700. They print the results according to the @code{print_mode} specified.
  701. @end enumerate
  702. For SRunners that have already been run, there is also a separate
  703. printing function defined as follows:
  704. @example
  705. @verbatim
  706. void srunner_print (SRunner *sr, enum print_output print_mode);
  707. @end verbatim
  708. @end example
  709. The enumeration values of @code{print_output} defined in Check that
  710. parameter @code{print_mode} can assume are as follows:
  711. @table @code
  712. @vindex CK_SILENT
  713. @item CK_SILENT
  714. Specifies that no output is to be generated. If you use this flag, you
  715. either need to programmatically examine the SRunner object, print
  716. separately, or use test logging (@pxref{Test Logging}.)
  717. @vindex CK_MINIMAL
  718. @item CK_MINIMAL
  719. Only a summary of the test run will be printed (number run, passed,
  720. failed, errors).
  721. @vindex CK_NORMAL
  722. @item CK_NORMAL
  723. Prints the summary of the run, and prints one message per failed
  724. test.
  725. @vindex CK_VERBOSE
  726. @item CK_VERBOSE
  727. Prints the summary, and one message per test (passed or failed)
  728. @vindex CK_ENV
  729. @vindex CK_VERBOSITY
  730. @item CK_ENV
  731. Gets the print mode from the environment variable @code{CK_VERBOSITY},
  732. which can have the values "silent", "minimal", "normal", "verbose". If
  733. the variable is not found or the value is not recognized, the print
  734. mode is set to @code{CK_NORMAL}.
  735. @vindex CK_SUBUNIT
  736. @item CK_SUBUNIT
  737. Prints running progress through the @uref{https://launchpad.net/subunit/,
  738. subunit} test runner protocol. See 'subunit support' under the Advanced Features section for more information.
  739. @end table
  740. With the @code{CK_NORMAL} flag specified in our @code{main()}, let's
  741. rerun @code{make check} now. The output from the unit test is as follows:
  742. @example
  743. @verbatim
  744. Running suite(s): Money
  745. 0%: Checks: 1, Failures: 1, Errors: 0
  746. check_money.c:9:F:Core:test_money_create:0: Assertion 'money_amount (m)==5' failed:
  747. money_amount (m)==0, 5==5
  748. FAIL: check_money
  749. =====================================================
  750. 1 of 1 test failed
  751. Please report to check-devel AT lists.sourceforge.net
  752. =====================================================
  753. @end verbatim
  754. @end example
  755. Note that the output from @code{make check} prior to Automake 1.13 will
  756. be the output of the unit test program. Starting with 1.13 Automake will
  757. run all unit test programs concurrently and store the output in
  758. log files. The output listed above should be present in a log file.
  759. The first number in the summary line tells us that 0% of our tests
  760. passed, and the rest of the line tells us that there was one check in
  761. total, and of those checks, one failure and zero errors. The next
  762. line tells us exactly where that failure occurred, and what kind of
  763. failure it was (P for pass, F for failure, E for error).
  764. After that we have some higher level output generated by Automake: the
  765. @code{check_money} program failed, and the bug-report address given in
  766. @file{configure.ac} is printed.
  767. Let's implement the @code{money_amount} function, so that it will pass
  768. its tests. We first have to create a Money structure to hold the
  769. amount, and then implement the function to return the correct amount:
  770. @cartouche
  771. @example
  772. @verbatiminclude money.3-4.c.diff
  773. @end example
  774. @end cartouche
  775. We will now rerun make check and@dots{} what's this? The output is
  776. now as follows:
  777. @example
  778. @verbatim
  779. Running suite(s): Money
  780. 0%: Checks: 1, Failures: 0, Errors: 1
  781. check_money.c:5:E:Core:test_money_create:0: (after this point)
  782. Received signal 11 (Segmentation fault)
  783. @end verbatim
  784. @end example
  785. @findex mark_point
  786. What does this mean? Note that we now have an error, rather than a
  787. failure. This means that our unit test either exited early, or was
  788. signaled. Next note that the failure message says ``after this
  789. point''; This means that somewhere after the point noted
  790. (@file{check_money.c}, line 5) there was a problem: signal 11 (a.k.a.
  791. segmentation fault). The last point reached is set on entry to the
  792. unit test, and after every call to the @code{ck_assert()},
  793. @code{ck_abort()}, @code{ck_assert_int_*()}, @code{ck_assert_str_*()},
  794. or the special function @code{mark_point()}. For example, if we wrote some test
  795. code as follows:
  796. @example
  797. @verbatim
  798. stuff_that_works ();
  799. mark_point ();
  800. stuff_that_dies ();
  801. @end verbatim
  802. @end example
  803. then the point returned will be that marked by @code{mark_point()}.
  804. The reason our test failed so horribly is that we haven't implemented
  805. @code{money_create()} to create any @code{Money}. We'll go ahead and
  806. implement that, the symmetric @code{money_free()}, and
  807. @code{money_currency()} too, in order to make our unit test pass again,
  808. here is a diff:
  809. @cartouche
  810. @example
  811. @verbatiminclude money.4-5.c.diff
  812. @end example
  813. @end cartouche
  814. @node Advanced Features, Supported Build Systems, Tutorial, Top
  815. @chapter Advanced Features
  816. What you've seen so far is all you need for basic unit testing. The
  817. features described in this section are additions to Check that make it
  818. easier for the developer to write, run, and analyze tests.
  819. @menu
  820. * Convenience Test Functions::
  821. * Running Multiple Cases::
  822. * No Fork Mode::
  823. * Test Fixtures::
  824. * Multiple Suites in one SRunner::
  825. * Selective Running of Tests::
  826. * Selecting Tests by Suite or Test Case::
  827. * Selecting Tests Based on Arbitrary Tags::
  828. * Testing Signal Handling and Exit Values::
  829. * Looping Tests::
  830. * Test Timeouts::
  831. * Determining Test Coverage::
  832. * Finding Memory Leaks::
  833. * Test Logging::
  834. * Subunit Support::
  835. @end menu
  836. @node Convenience Test Functions, Running Multiple Cases, Advanced Features, Advanced Features
  837. @section Convenience Test Functions
  838. Using the @code{ck_assert} function for all tests can lead to lot of
  839. repetitive code that is hard to read. For your convenience Check
  840. provides a set of functions (actually macros) for testing often used
  841. conditions.
  842. @findex check_set_max_msg_size
  843. @vindex CK_MAX_MSG_SIZE
  844. The typical size of an assertion message is less than 80 bytes.
  845. However, some of the functions listed below can generate very large messages
  846. (up to 4GB allocations were seen in the wild).
  847. To prevent this, a limit is placed on the assertion message size.
  848. This limit is 4K bytes by default.
  849. It can be modified by setting the @code{CK_MAX_MSG_SIZE} environment variable,
  850. or, if it is not set, by invoking the @code{check_set_max_msg_size()} function.
  851. If used, this function must be called, once, before the first assertion.
  852. @ftable @code
  853. @item ck_abort
  854. Unconditionally fails test with default message.
  855. @item ck_abort_msg
  856. Unconditionally fails test with user supplied message.
  857. @item ck_assert
  858. Fails test if supplied condition evaluates to false.
  859. @item ck_assert_msg
  860. Fails test if supplied condition evaluates to false and displays user
  861. provided message.
  862. @item ck_assert_int_eq
  863. @itemx ck_assert_int_ne
  864. @itemx ck_assert_int_lt
  865. @itemx ck_assert_int_le
  866. @itemx ck_assert_int_gt
  867. @itemx ck_assert_int_ge
  868. Compares two signed integer values (@code{intmax_t}) and displays a predefined
  869. message with both the condition and input parameters on failure. The
  870. operator used for comparison is different for each function and is indicated
  871. by the last two letters of the function name. The abbreviations @code{eq},
  872. @code{ne}, @code{lt}, @code{le}, @code{gt}, and @code{ge} correspond to
  873. @code{==}, @code{!=}, @code{<}, @code{<=}, @code{>}, and @code{>=}
  874. respectively.
  875. @item ck_assert_uint_eq
  876. @itemx ck_assert_uint_ne
  877. @itemx ck_assert_uint_lt
  878. @itemx ck_assert_uint_le
  879. @itemx ck_assert_uint_gt
  880. @itemx ck_assert_uint_ge
  881. Similar to @code{ck_assert_int_*}, but compares two unsigned integer values
  882. (@code{uintmax_t}) instead.
  883. @item ck_assert_float_eq
  884. @itemx ck_assert_float_ne
  885. @itemx ck_assert_float_lt
  886. @itemx ck_assert_float_le
  887. @itemx ck_assert_float_gt
  888. @itemx ck_assert_float_ge
  889. Compares two floating point numbers (@code{float}) and displays a predefined
  890. message with both the condition and input parameters on failure.
  891. The operator used for comparison is different for each function
  892. and is indicated by the last two letters of the function name.
  893. The abbreviations @code{eq}, @code{ne}, @code{lt}, @code{le}, @code{gt},
  894. and @code{ge} correspond to @code{==}, @code{!=}, @code{<}, @code{<=}, @code{>},
  895. and @code{>=} respectively.
  896. Beware using those operators for floating point numbers because of precision
  897. possible loss on every operation on floating point numbers. For example
  898. (1/3)*3==1 would return false, because 1/3==1.333... (or 1.(3) notation
  899. in Europe) and cannot be represented by computer logic. As another example
  900. 1.1f in fact could be 1.10000002384185791015625 and 2.1f could be
  901. 2.099999904632568359375 because of binary representation of floating
  902. point numbers.
  903. If you have different mathematical operations used on floating point numbers
  904. consider using precision comparisons or integer numbers instead. But in some
  905. cases those operators could be used. For example if you cyclically increment
  906. your floating point number only by positive or only by negative values than
  907. you may use @code{<}, @code{<=}, @code{>} and @code{>=} operators in tests.
  908. If your computations must end up with a certain value than @code{==} and
  909. @code{!=} operators may be used.
  910. @item ck_assert_double_eq
  911. @itemx ck_assert_double_ne
  912. @itemx ck_assert_double_lt
  913. @itemx ck_assert_double_le
  914. @itemx ck_assert_double_gt
  915. @itemx ck_assert_double_ge
  916. Similar to @code{ck_assert_float_*}, but compares two double precision
  917. floating point values (@code{double}) instead.
  918. @item ck_assert_ldouble_eq
  919. @itemx ck_assert_ldouble_ne
  920. @itemx ck_assert_ldouble_lt
  921. @itemx ck_assert_ldouble_le
  922. @itemx ck_assert_ldouble_gt
  923. @itemx ck_assert_ldouble_ge
  924. Similar to @code{ck_assert_float_*}, but compares two double precision
  925. floating point values (@code{long double}) instead.
  926. @item ck_assert_float_eq_tol
  927. @itemx ck_assert_float_ne_tol
  928. @itemx ck_assert_float_le_tol
  929. @itemx ck_assert_float_ge_tol
  930. Compares two floating point numbers (@code{float}) with specified user tolerance
  931. set by the third parameter (@code{float}) and displays a predefined message
  932. with both the condition and input parameters on failure.
  933. The abbreviations @code{eq}, @code{ne}, @code{le}, and @code{ge} correspond
  934. to @code{==}, @code{!=}, @code{<=}, and @code{>=} respectively with acceptable
  935. error (tolerance) specified by the last parameter.
  936. Beware using those functions for floating comparisons because of
  937. (1) errors coming from floating point number representation,
  938. (2) rounding errors,
  939. (3) floating point errors are platform dependent.
  940. Floating point numbers are often internally represented in binary
  941. so they cannot be exact power of 10. All these operators have significant
  942. error in comparisons so use them only if you know what you're doing.
  943. Some assertions could fail on one platform and would be passed on another.
  944. For example expression @code{0.02<=0.01+10^-2} is true by meaning,
  945. but some platforms may calculate it as false. IEEE 754 standard specifies
  946. the floating point number format representation but it does not promise that
  947. the same computation carried out on all hardware will produce the same result.
  948. @item ck_assert_double_eq_tol
  949. @itemx ck_assert_double_ne_tol
  950. @itemx ck_assert_double_le_tol
  951. @itemx ck_assert_double_ge_tol
  952. Similar to @code{ck_assert_float_*_tol}, but compares two double precision
  953. floating point values (@code{double}) instead.
  954. @item ck_assert_ldouble_eq_tol
  955. @itemx ck_assert_ldouble_ne_tol
  956. @itemx ck_assert_ldouble_le_tol
  957. @itemx ck_assert_ldouble_ge_tol
  958. Similar to @code{ck_assert_float_*_tol}, but compares two double precision
  959. floating point values (@code{long double}) instead.
  960. @item ck_assert_float_finite
  961. Checks that a floating point number (@code{float}) is finite and displays
  962. a predefined message with both the condition and input parameter on failure.
  963. Finite means that value cannot be positive infinity, negative infinity
  964. or NaN ("Not a Number").
  965. @item ck_assert_double_finite
  966. Similar to @code{ck_assert_float_finite}, but checks double precision
  967. floating point value (@code{double}) instead.
  968. @item ck_assert_ldouble_finite
  969. Similar to @code{ck_assert_float_finite}, but checks double precision
  970. floating point value (@code{long double}) instead.
  971. @item ck_assert_float_infinite
  972. Checks that a floating point number (@code{float}) is infinite and displays
  973. a predefined message with both the condition and input parameter on failure.
  974. Infinite means that value may only be positive infinity or negative infinity.
  975. @item ck_assert_double_infinite
  976. Similar to @code{ck_assert_float_infinite}, but checks double precision
  977. floating point value (@code{double}) instead.
  978. @item ck_assert_ldouble_infinite
  979. Similar to @code{ck_assert_float_infinite}, but checks double precision
  980. floating point value (@code{long double}) instead.
  981. @item ck_assert_float_nan
  982. Checks that a floating point number (@code{float}, @code{double} or
  983. @code{long double} abbreviated as @code{ldouble}) is NaN ("Not a Number")
  984. and displays a predefined message with both the condition and input parameter
  985. on failure.
  986. @item ck_assert_double_nan
  987. Similar to @code{ck_assert_float_nan}, but checks double precision
  988. floating point value (@code{double}) instead.
  989. @item ck_assert_ldouble_nan
  990. Similar to @code{ck_assert_float_nan}, but checks double precision
  991. floating point value (@code{long double}) instead.
  992. @item ck_assert_float_nonnan
  993. Checks that a floating point number (@code{float}) is not NaN ("Not a Number")
  994. and displays a predefined message with both the condition and input parameter
  995. on failure.
  996. @item ck_assert_double_nonnan
  997. Similar to @code{ck_assert_float_nonnan}, but checks double precision
  998. floating point value (@code{double}) instead.
  999. @item ck_assert_ldouble_nonnan
  1000. Similar to @code{ck_assert_float_nonnan}, but checks double precision
  1001. floating point value (@code{long double}) instead.
  1002. @item ck_assert_str_eq
  1003. @itemx ck_assert_str_ne
  1004. @itemx ck_assert_str_lt
  1005. @itemx ck_assert_str_le
  1006. @itemx ck_assert_str_gt
  1007. @itemx ck_assert_str_ge
  1008. Compares two null-terminated @code{char *} string values, using the
  1009. @code{strcmp()} function internally, and displays predefined message
  1010. with condition and input parameter values on failure. The comparison
  1011. operator is again indicated by last two letters of the function name.
  1012. @code{ck_assert_str_lt(a, b)} will pass if the unsigned numerical value
  1013. of the character string @code{a} is less than that of @code{b}.
  1014. If a NULL pointer is be passed to any comparison macro the check will fail.
  1015. @item ck_assert_pstr_eq
  1016. @itemx ck_assert_pstr_ne
  1017. Similar to @code{ck_assert_str_*} macros, but able to check undefined strings.
  1018. If a NULL pointer would be passed to a comparison macro it would mean that
  1019. a string is undefined. If both strings are undefined @code{ck_assert_pstr_eq}
  1020. would pass, but @code{ck_assert_pstr_ne} would fail. If only one of strings is
  1021. undefined @code{ck_assert_pstr_eq} macro would fail and @code{ck_assert_pstr_ne}
  1022. would pass.
  1023. @item ck_assert_ptr_eq
  1024. @itemx ck_assert_ptr_ne
  1025. Compares two pointers and displays predefined message with
  1026. condition and values of both input parameters on failure. The operator
  1027. used for comparison is different for each function and is indicated by
  1028. the last two letters of the function name. The abbreviations @code{eq} and
  1029. @code{ne} correspond to @code{==} and @code{!=} respectively.
  1030. @item ck_assert_ptr_null
  1031. @itemx ck_assert_ptr_nonnull
  1032. Compares a pointers against null and displays predefined message with
  1033. condition and value of the input parameter on failure.
  1034. @code{ck_assert_ptr_null} checks that pointer is equal to NULL and
  1035. @code{ck_assert_ptr_nonnull} checks that pointer is not equal to NULL.
  1036. @code{ck_assert_ptr_nonnull} is highly recommended to use in situations
  1037. when a function call can return NULL as error indication (like functions
  1038. that use malloc, calloc, strdup, mmap, etc).
  1039. @item ck_assert_mem_eq
  1040. @itemx ck_assert_mem_ne
  1041. @itemx ck_assert_mem_lt
  1042. @itemx ck_assert_mem_le
  1043. @itemx ck_assert_mem_gt
  1044. @itemx ck_assert_mem_ge
  1045. Compares contents of two memory locations of the given length, using the
  1046. @code{memcmp()} function internally, and displays predefined message
  1047. with condition and input parameter values on failure. The comparison
  1048. operator is again indicated by last two letters of the function name.
  1049. @code{ck_assert_mem_lt(a, b)} will pass if the unsigned numerical value
  1050. of memory location @code{a} is less than that of @code{b}.
  1051. @item fail
  1052. (Deprecated) Unconditionally fails test with user supplied message.
  1053. @item fail_if
  1054. (Deprecated) Fails test if supplied condition evaluates to true and
  1055. displays user provided message.
  1056. @item fail_unless
  1057. (Deprecated) Fails test if supplied condition evaluates to false and
  1058. displays user provided message.
  1059. @end ftable
  1060. @node Running Multiple Cases, No Fork Mode, Convenience Test Functions, Advanced Features
  1061. @section Running Multiple Cases
  1062. What happens if we pass @code{-1} as the @code{amount} in
  1063. @code{money_create()}? What should happen? Let's write a unit test.
  1064. Since we are now testing limits, we should also test what happens when
  1065. we create @code{Money} where @code{amount == 0}. Let's put these in a
  1066. separate test case called ``Limits'' so that @code{money_suite} is
  1067. changed like so:
  1068. @cartouche
  1069. @example
  1070. @verbatiminclude check_money.3-6.c.diff
  1071. @end example
  1072. @end cartouche
  1073. Now we can rerun our suite, and fix the problem(s). Note that errors
  1074. in the ``Core'' test case will be reported as ``Core'', and errors in
  1075. the ``Limits'' test case will be reported as ``Limits'', giving you
  1076. additional information about where things broke.
  1077. @cartouche
  1078. @example
  1079. @verbatiminclude money.5-6.c.diff
  1080. @end example
  1081. @end cartouche
  1082. @node No Fork Mode, Test Fixtures, Running Multiple Cases, Advanced Features
  1083. @section No Fork Mode
  1084. Check normally forks to create a separate address space. This allows
  1085. a signal or early exit to be caught and reported, rather than taking
  1086. down the entire test program, and is normally very useful. However,
  1087. when you are trying to debug why the segmentation fault or other
  1088. program error occurred, forking makes it difficult to use debugging
  1089. tools. To define fork mode for an @code{SRunner} object, you can do
  1090. one of the following:
  1091. @vindex CK_FORK
  1092. @findex srunner_set_fork_status
  1093. @enumerate
  1094. @item
  1095. Define the CK_FORK environment variable to equal ``no''.
  1096. @item
  1097. Explicitly define the fork status through the use of the following
  1098. function:
  1099. @verbatim
  1100. void srunner_set_fork_status (SRunner * sr, enum fork_status fstat);
  1101. @end verbatim
  1102. @end enumerate
  1103. The enum @code{fork_status} allows the @code{fstat} parameter to
  1104. assume the following values: @code{CK_FORK} and @code{CK_NOFORK}. An
  1105. explicit call to @code{srunner_set_fork_status()} overrides the
  1106. @code{CK_FORK} environment variable.
  1107. @node Test Fixtures, Multiple Suites in one SRunner, No Fork Mode, Advanced Features
  1108. @section Test Fixtures
  1109. We may want multiple tests that all use the same Money. In such
  1110. cases, rather than setting up and tearing down objects for each unit
  1111. test, it may be convenient to add some setup that is constant across
  1112. all the tests in a test case. Each such setup/teardown pair is called
  1113. a @dfn{test fixture} in test-driven development jargon.
  1114. A fixture is created by defining a setup and/or a teardown function,
  1115. and associating it with a test case. There are two kinds of test
  1116. fixtures in Check: checked and unchecked fixtures. These are defined
  1117. as follows:
  1118. @table @asis
  1119. @item Checked fixtures
  1120. are run inside the address space created by the fork to create the
  1121. unit test. Before each unit test in a test case, the @code{setup()}
  1122. function is run, if defined. After each unit test, the
  1123. @code{teardown()} function is run, if defined. Since they run inside
  1124. the forked address space, if checked fixtures signal or otherwise
  1125. fail, they will be caught and reported by the @code{SRunner}. A
  1126. checked @code{teardown()} fixture will not run if the unit test
  1127. fails.
  1128. @item Unchecked fixtures
  1129. are run in the same address space as the test program. Therefore they
  1130. may not signal or exit, but may use the fail functions. The unchecked
  1131. @code{setup()}, if defined, is run before the test case is
  1132. started. The unchecked @code{teardown()}, if defined, is run after the
  1133. test case is done. An unchecked @code{teardown()} fixture will run even
  1134. if a unit test fails.
  1135. @end table
  1136. An important difference is that the checked fixtures are run once per
  1137. unit test and the unchecked fixtures are run once per test case.
  1138. So for a test case that contains @code{check_one()} and
  1139. @code{check_two()} unit tests,
  1140. @code{checked_setup()}/@code{checked_teardown()} checked fixtures, and
  1141. @code{unchecked_setup()}/@code{unchecked_teardown()} unchecked
  1142. fixtures, the control flow would be:
  1143. @example
  1144. @verbatim
  1145. unchecked_setup();
  1146. fork();
  1147. checked_setup();
  1148. check_one();
  1149. checked_teardown();
  1150. wait();
  1151. fork();
  1152. checked_setup();
  1153. check_two();
  1154. checked_teardown();
  1155. wait();
  1156. unchecked_teardown();
  1157. @end verbatim
  1158. @end example
  1159. @menu
  1160. * Test Fixture Examples::
  1161. * Checked vs Unchecked Fixtures::
  1162. @end menu
  1163. @node Test Fixture Examples, Checked vs Unchecked Fixtures, Test Fixtures, Test Fixtures
  1164. @subsection Test Fixture Examples
  1165. We create a test fixture in Check as follows:
  1166. @enumerate
  1167. @item
  1168. Define global variables, and functions to setup and teardown the
  1169. globals. The functions both take @code{void} and return @code{void}.
  1170. In our example, we'll make @code{five_dollars} be a global created and
  1171. freed by @code{setup()} and @code{teardown()} respectively.
  1172. @item
  1173. @findex tcase_add_checked_fixture
  1174. Add the @code{setup()} and @code{teardown()} functions to the test
  1175. case with @code{tcase_add_checked_fixture()}. In our example, this
  1176. belongs in the suite setup function @code{money_suite}.
  1177. @item
  1178. Rewrite tests to use the globals. We'll rewrite our first to use
  1179. @code{five_dollars}.
  1180. @end enumerate
  1181. Note that the functions used for setup and teardown do not need to be
  1182. named @code{setup()} and @code{teardown()}, but they must take
  1183. @code{void} and return @code{void}. We'll update @file{check_money.c}
  1184. with the following patch:
  1185. @cartouche
  1186. @example
  1187. @verbatiminclude check_money.6-7.c.diff
  1188. @end example
  1189. @end cartouche
  1190. @node Checked vs Unchecked Fixtures, , Test Fixture Examples, Test Fixtures
  1191. @subsection Checked vs Unchecked Fixtures
  1192. Checked fixtures run once for each unit test in a test case, and so
  1193. they should not be used for expensive setup. However, if a checked
  1194. fixture fails and @code{CK_FORK} mode is being used, it will not bring
  1195. down the entire framework.
  1196. On the other hand, unchecked fixtures run once for an entire test
  1197. case, as opposed to once per unit test, and so can be used for
  1198. expensive setup. However, since they may take down the entire test
  1199. program, they should only be used if they are known to be safe.
  1200. Additionally, the isolation of objects created by unchecked fixtures
  1201. is not guaranteed by @code{CK_NOFORK} mode. Normally, in
  1202. @code{CK_FORK} mode, unit tests may abuse the objects created in an
  1203. unchecked fixture with impunity, without affecting other unit tests in
  1204. the same test case, because the fork creates a separate address space.
  1205. However, in @code{CK_NOFORK} mode, all tests live in the same address
  1206. space, and side effects in one test will affect the unchecked fixture
  1207. for the other tests.
  1208. A checked fixture will generally not be affected by unit test side
  1209. effects, since the @code{setup()} is run before each unit test. There
  1210. is an exception for side effects to the total environment in which the
  1211. test program lives: for example, if the @code{setup()} function
  1212. initializes a file that a unit test then changes, the combination of
  1213. the @code{teardown()} function and @code{setup()} function must be able
  1214. to restore the environment for the next unit test.
  1215. If the @code{setup()} function in a fixture fails, in either checked
  1216. or unchecked fixtures, the unit tests for the test case, and the
  1217. @code{teardown()} function for the fixture will not be run. A fixture
  1218. error will be created and reported to the @code{SRunner}.
  1219. @node Multiple Suites in one SRunner, Selective Running of Tests, Test Fixtures, Advanced Features
  1220. @section Multiple Suites in one SRunner
  1221. In a large program, it will be convenient to create multiple suites,
  1222. each testing a module of the program. While one can create several
  1223. test programs, each running one @code{Suite}, it may be convenient to
  1224. create one main test program, and use it to run multiple suites. The
  1225. Check test suite provides an example of how to do this. The main
  1226. testing program is called @code{check_check}, and has a header file
  1227. that declares suite creation functions for all the module tests:
  1228. @example
  1229. @verbatim
  1230. Suite *make_sub_suite (void);
  1231. Suite *make_sub2_suite (void);
  1232. Suite *make_master_suite (void);
  1233. Suite *make_list_suite (void);
  1234. Suite *make_msg_suite (void);
  1235. Suite *make_log_suite (void);
  1236. Suite *make_limit_suite (void);
  1237. Suite *make_fork_suite (void);
  1238. Suite *make_fixture_suite (void);
  1239. Suite *make_pack_suite (void);
  1240. @end verbatim
  1241. @end example
  1242. @findex srunner_add_suite
  1243. The function @code{srunner_add_suite()} is used to add additional
  1244. suites to an @code{SRunner}. Here is the code that sets up and runs
  1245. the @code{SRunner} in the @code{main()} function in
  1246. @file{check_check_main.c}:
  1247. @example
  1248. @verbatim
  1249. SRunner *sr;
  1250. sr = srunner_create (make_master_suite ());
  1251. srunner_add_suite (sr, make_list_suite ());
  1252. srunner_add_suite (sr, make_msg_suite ());
  1253. srunner_add_suite (sr, make_log_suite ());
  1254. srunner_add_suite (sr, make_limit_suite ());
  1255. srunner_add_suite (sr, make_fork_suite ());
  1256. srunner_add_suite (sr, make_fixture_suite ());
  1257. srunner_add_suite (sr, make_pack_suite ());
  1258. @end verbatim
  1259. @end example
  1260. @node Selective Running of Tests, Testing Signal Handling and Exit Values, Multiple Suites in one SRunner, Advanced Features
  1261. @section Selective Running of Tests
  1262. After adding a couple of suites and some test cases in each, it is
  1263. sometimes practical to be able to run only one suite, or one specific
  1264. test case, without recompiling the test code. Check provides two ways
  1265. to accomplish this, either by specifying a suite or test case by name
  1266. or by assigning tags to test cases and specifying one or more tags to
  1267. run.
  1268. @menu
  1269. * Selecting Tests by Suite or Test Case::
  1270. * Selecting Tests Based on Arbitrary Tags::
  1271. @end menu
  1272. @node Selecting Tests by Suite or Test Case, Selecting Tests Based on Arbitrary Tags, Selective Running of Tests, Selective Running of Tests
  1273. @subsection Selecting Tests by Suite or Test Case
  1274. @vindex CK_RUN_SUITE
  1275. @vindex CK_RUN_CASE
  1276. There are two environment variables available that offer this
  1277. ability, @code{CK_RUN_SUITE} and @code{CK_RUN_CASE}. Just set the
  1278. value to the name of the suite and/or test case you want to run. These
  1279. environment variables can also be a good integration tool for running
  1280. specific tests from within another tool, e.g. an IDE.
  1281. @node Selecting Tests Based on Arbitrary Tags, ,Selecting Tests by Suite or Test Case, Selective Running of Tests
  1282. @subsection Selecting Tests Based on Arbitrary Tags
  1283. @vindex CK_INCLUDE_TAGS
  1284. @vindex CK_EXCLUDE_TAGS
  1285. It can be useful to dynamically include or exclude groups of tests to
  1286. be run based on criteria other than the suite or test case name. For
  1287. example, one or more tags can be assigned to test cases. The tags
  1288. could indicate if a test runs for a long time, so such tests could be
  1289. excluded in order to run quicker tests for a sanity
  1290. check. Alternately, tags may be used to indicate which functional
  1291. areas test cover. Tests can then be run that include all test cases
  1292. for a given set of functional areas.
  1293. In Check, a tag is a string of characters without white space. One or
  1294. more tags can be assigned to a test case by using the
  1295. @code{tcase_set_tags} function. This function accepts a string, and
  1296. multiple tags can be specified by delimiting them with spaces. For
  1297. example:
  1298. @example
  1299. @verbatim
  1300. Suite *s;
  1301. TCase *red, *blue, *purple, *yellow, *black;
  1302. s = suite_create("Check Tag Filtering");
  1303. red = tcase_create("Red");
  1304. tcase_set_tags(red, "Red");
  1305. suite_add_tcase (s, red);
  1306. tcase_add_test(red, red_test1);
  1307. blue = tcase_create("Blue");
  1308. tcase_set_tags(blue, "Blue");
  1309. suite_add_tcase (s, blue);
  1310. tcase_add_test(blue, blue_test1);
  1311. purple = tcase_create("Purple");
  1312. tcase_set_tags(purple, "Red Blue");
  1313. suite_add_tcase (s, purple);
  1314. tcase_add_test(purple, purple_test1);
  1315. @end verbatim
  1316. @end example
  1317. Once test cases are tagged they may be selectively run in one of two ways:
  1318. a) Using Environment Variables
  1319. There are two environment variables available for selecting test cases
  1320. based on tags: @code{CK_INCLUDE_TAGS} and
  1321. @code{CK_EXCLUDE_TAGS}. These can be set to a space separated list of
  1322. tag names. If @code{CK_INCLUDE_TAGS} is set then test cases which
  1323. include at least one tag in common with @code{CK_INCLUDE_TAGS} will be
  1324. run. If @code{CK_EXCLUDE_TAGS} is set then test cases with one tag in
  1325. common with @code{CK_EXCLUDE_TAGS} will not be run. In cases where
  1326. both @code{CK_INCLUDE_TAGS} and @code{CK_EXCLUDE_TAGS} match a tag for
  1327. a test case the test will be excluded.
  1328. Both @code{CK_INCLUDE_TAGS} and @code{CK_EXCLUDE_TAGS} can be
  1329. specified in conjunction with @code{CK_RUN_SUITE} or even
  1330. @code{CK_RUN_CASE} in which case they will have the effect of further
  1331. narrowing the selection.
  1332. b) Programmatically
  1333. The @code{srunner_run_tagged} function allows one to specify which
  1334. tags to run or exclude from a suite runner. This can be used to
  1335. programmatically control which test cases may run.
  1336. @node Testing Signal Handling and Exit Values, Looping Tests, Selective Running of Tests, Advanced Features
  1337. @section Testing Signal Handling and Exit Values
  1338. @findex tcase_add_test_raise_signal
  1339. To enable testing of signal handling, there is a function
  1340. @code{tcase_add_test_raise_signal()} which is used instead of
  1341. @code{tcase_add_test()}. This function takes an additional signal
  1342. argument, specifying a signal that the test expects to receive. If no
  1343. signal is received this is logged as a failure. If a different signal
  1344. is received this is logged as an error.
  1345. The signal handling functionality only works in CK_FORK mode.
  1346. @findex tcase_add_exit_test
  1347. To enable testing of expected exits, there is a function
  1348. @code{tcase_add_exit_test()} which is used instead of @code{tcase_add_test()}.
  1349. This function takes an additional expected exit value argument,
  1350. specifying a value that the test is expected to exit with. If the test
  1351. exits with any other value this is logged as a failure. If the test exits
  1352. early this is logged as an error.
  1353. The exit handling functionality only works in CK_FORK mode.
  1354. @node Looping Tests, Test Timeouts, Testing Signal Handling and Exit Values, Advanced Features
  1355. @section Looping Tests
  1356. Looping tests are tests that are called with a new context for each
  1357. loop iteration. This makes them ideal for table based tests. If
  1358. loops are used inside ordinary tests to test multiple values, only the
  1359. first error will be shown before the test exits. However, looping
  1360. tests allow for all errors to be shown at once, which can help out
  1361. with debugging.
  1362. @findex tcase_add_loop_test
  1363. Adding a normal test with @code{tcase_add_loop_test()} instead of
  1364. @code{tcase_add_test()} will make the test function the body of a
  1365. @code{for} loop, with the addition of a fork before each call. The
  1366. loop variable @code{_i} is available for use inside the test function;
  1367. for example, it could serve as an index into a table. For failures,
  1368. the iteration which caused the failure is available in error messages
  1369. and logs.
  1370. Start and end values for the loop are supplied when adding the test.
  1371. The values are used as in a normal @code{for} loop. Below is some
  1372. pseudo-code to show the concept:
  1373. @example
  1374. @verbatim
  1375. for (_i = tfun->loop_start; _i < tfun->loop_end; _i++)
  1376. {
  1377. fork(); /* New context */
  1378. tfun->f(_i); /* Call test function */
  1379. wait(); /* Wait for child to terminate */
  1380. }
  1381. @end verbatim
  1382. @end example
  1383. An example of looping test usage follows:
  1384. @example
  1385. @verbatim
  1386. static const int primes[5] = {2,3,5,7,11};
  1387. START_TEST (check_is_prime)
  1388. {
  1389. ck_assert (is_prime (primes[_i]));
  1390. }
  1391. END_TEST
  1392. ...
  1393. tcase_add_loop_test (tcase, check_is_prime, 0, 5);
  1394. @end verbatim
  1395. @end example
  1396. Looping tests work in @code{CK_NOFORK} mode as well, but without the
  1397. forking. This means that only the first error will be shown.
  1398. @node Test Timeouts, Determining Test Coverage, Looping Tests, Advanced Features
  1399. @section Test Timeouts
  1400. @findex tcase_set_timeout
  1401. @vindex CK_DEFAULT_TIMEOUT
  1402. @vindex CK_TIMEOUT_MULTIPLIER
  1403. To be certain that a test won't hang indefinitely, all tests are run
  1404. with a timeout, the default being 4 seconds. If the test is not
  1405. finished within that time, it is killed and logged as an error.
  1406. The timeout for a specific test case, which may contain multiple unit
  1407. tests, can be changed with the @code{tcase_set_timeout()} function.
  1408. The default timeout used for all test cases can be changed with the
  1409. environment variable @code{CK_DEFAULT_TIMEOUT}, but this will not
  1410. override an explicitly set timeout. Another way to change the timeout
  1411. length is to use the @code{CK_TIMEOUT_MULTIPLIER} environment variable,
  1412. which multiplies all timeouts, including those set with
  1413. @code{tcase_set_timeout()}, with the supplied integer value. All timeout
  1414. arguments are in seconds and a timeout of 0 seconds turns off the timeout
  1415. functionality. On systems that support it, the timeout can be specified
  1416. using a nanosecond precision. Otherwise, second precision is used.
  1417. Test timeouts are only available in CK_FORK mode.
  1418. @node Determining Test Coverage, Finding Memory Leaks, Test Timeouts, Advanced Features
  1419. @section Determining Test Coverage
  1420. The term @dfn{code coverage} refers to the extent that the statements
  1421. of a program are executed during a run. Thus, @dfn{test coverage}
  1422. refers to code coverage when executing unit tests. This information
  1423. can help you to do two things:
  1424. @itemize
  1425. @item
  1426. Write better tests that more fully exercise your code, thereby
  1427. improving confidence in it.
  1428. @item
  1429. Detect dead code that could be factored away.
  1430. @end itemize
  1431. Check itself does not provide any means to determine this test
  1432. coverage; rather, this is the job of the compiler and its related
  1433. tools. In the case of @command{gcc} this information is easy to
  1434. obtain, and other compilers should provide similar facilities.
  1435. Using @command{gcc}, first enable test coverage profiling when
  1436. building your source by specifying the @option{-fprofile-arcs} and
  1437. @option{-ftest-coverage} switches:
  1438. @example
  1439. @verbatim
  1440. $ gcc -g -Wall -fprofile-arcs -ftest-coverage -o foo foo.c foo_check.c
  1441. @end verbatim
  1442. @end example
  1443. You will see that an additional @file{.gcno} file is created for each
  1444. @file{.c} input file. After running your tests the normal way, a
  1445. @file{.gcda} file is created for each @file{.gcno} file. These
  1446. contain the coverage data in a raw format. To combine this
  1447. information and a source file into a more readable format you can use
  1448. the @command{gcov} utility:
  1449. @example
  1450. @verbatim
  1451. $ gcov foo.c
  1452. @end verbatim
  1453. @end example
  1454. This will produce the file @file{foo.c.gcov} which looks like this:
  1455. @example
  1456. @verbatim
  1457. -: 41: * object */
  1458. 18: 42: if (ht->table[p] != NULL) {
  1459. -: 43: /* replaces the current entry */
  1460. #####: 44: ht->count--;
  1461. #####: 45: ht->size -= ht->table[p]->size +
  1462. #####: 46: sizeof(struct hashtable_entry);
  1463. @end verbatim
  1464. @end example
  1465. As you can see this is an annotated source file with three columns:
  1466. usage information, line numbers, and the original source. The usage
  1467. information in the first column can either be '-', which means that
  1468. this line does not contain code that could be executed; '#####', which
  1469. means this line was never executed although it does contain
  1470. code---these are the lines that are probably most interesting for you;
  1471. or a number, which indicates how often that line was executed.
  1472. This is of course only a very brief overview, but it should illustrate
  1473. how determining test coverage generally works, and how it can help
  1474. you. For more information or help with other compilers, please refer
  1475. to the relevant manuals.
  1476. @node Finding Memory Leaks, Test Logging, Determining Test Coverage, Advanced Features
  1477. @section Finding Memory Leaks
  1478. It is possible to determine if any code under test leaks memory during
  1479. a test. Check itself does not have an API for memory leak detection,
  1480. however Valgrind can be used against a unit testing program to search
  1481. for potential leaks.
  1482. Before discussing memory leak detection, first a "memory leak" should be
  1483. better defined. There are two primary definitions of a memory leak:
  1484. @enumerate
  1485. @item
  1486. Memory that is allocated but not freed before a program terminates.
  1487. However, it was possible for the program to free the memory if it had
  1488. wanted to. Valgrind refers to these as "still reachable" leaks.
  1489. @item
  1490. Memory that is allocated, and any reference to the memory is lost.
  1491. The program could not have freed the memory. Valgrind refers to these
  1492. as "definitely lost" leaks.
  1493. @end enumerate
  1494. Valgrind uses the second definition by default when defining a memory leak.
  1495. These leaks are the ones which are likely to cause a program issues due
  1496. to heap depletion.
  1497. If one wanted to run Valgrind against a unit testing program to determine
  1498. if leaks are present, the following invocation of Valgrind will work:
  1499. @example
  1500. @verbatim
  1501. valgrind --leak-check=full ${UNIT_TEST_PROGRAM}
  1502. ...
  1503. ==3979== LEAK SUMMARY:
  1504. ==3979== definitely lost: 0 bytes in 0 blocks
  1505. ==3979== indirectly lost: 0 bytes in 0 blocks
  1506. ==3979== possibly lost: 0 bytes in 0 blocks
  1507. ==3979== still reachable: 548 bytes in 24 blocks
  1508. ==3979== suppressed: 0 bytes in 0 blocks
  1509. @end verbatim
  1510. @end example
  1511. In that example, there were no "definitely lost" memory leaks found.
  1512. However, why would there be such a large number of "still reachable"
  1513. memory leaks? It turns out this is a consequence of using @code{fork()}
  1514. to run a unit test in its own process memory space, which Check does by
  1515. default on platforms with @code{fork()} available.
  1516. Consider the example where a unit test program creates one suite with
  1517. one test. The flow of the program will look like the following:
  1518. @example
  1519. @b{Main process:} @b{Unit test process:}
  1520. create suite
  1521. srunner_run_all()
  1522. fork unit test unit test process created
  1523. wait for test start test
  1524. ... end test
  1525. ... exit(0)
  1526. test complete
  1527. report result
  1528. free suite
  1529. exit(0)
  1530. @end example
  1531. The unit testing process has a copy of all memory that the main process
  1532. allocated. In this example, that would include the suite allocated in
  1533. main. When the unit testing process calls @code{exit(0)}, the suite
  1534. allocated in @code{main()} is reachable but not freed. As the unit test
  1535. has no reason to do anything besides die when its test is finished, and
  1536. it has no reasonable way to free everything before it dies, Valgrind
  1537. reports that some memory is still reachable but not freed.
  1538. If the "still reachable" memory leaks are a concern, and one required that
  1539. the unit test program report that there were no memory leaks regardless
  1540. of the type, then the unit test program needs to run without fork. To
  1541. accomplish this, either define the @code{CK_FORK=no} environment variable,
  1542. or use the @code{srunner_set_fork_status()} function to set the fork mode
  1543. as @code{CK_NOFORK} for all suite runners.
  1544. Running the same unit test program by disabling @code{fork()} results
  1545. in the following:
  1546. @example
  1547. @verbatim
  1548. CK_FORK=no valgrind --leak-check=full ${UNIT_TEST_PROGRAM}
  1549. ...
  1550. ==4924== HEAP SUMMARY:
  1551. ==4924== in use at exit: 0 bytes in 0 blocks
  1552. ==4924== total heap usage: 482 allocs, 482 frees, 122,351 bytes allocated
  1553. ==4924==
  1554. ==4924== All heap blocks were freed -- no leaks are possible
  1555. @end verbatim
  1556. @end example
  1557. @node Test Logging, Subunit Support, Finding Memory Leaks, Advanced Features
  1558. @section Test Logging
  1559. @findex srunner_set_log
  1560. Check supports an operation to log the results of a test run. To use
  1561. test logging, call the @code{srunner_set_log()} function with the name
  1562. of the log file you wish to create:
  1563. @example
  1564. @verbatim
  1565. SRunner *sr;
  1566. sr = srunner_create (make_s1_suite ());
  1567. srunner_add_suite (sr, make_s2_suite ());
  1568. srunner_set_log (sr, "test.log");
  1569. srunner_run_all (sr, CK_NORMAL);
  1570. @end verbatim
  1571. @end example
  1572. In this example, Check will write the results of the run to
  1573. @file{test.log}. The @code{print_mode} argument to
  1574. @code{srunner_run_all()} is ignored during test logging; the log will
  1575. contain a result entry, organized by suite, for every test run. Here
  1576. is an example of test log output:
  1577. @example
  1578. @verbatim
  1579. Running suite S1
  1580. ex_log_output.c:8:P:Core:test_pass: Test passed
  1581. ex_log_output.c:14:F:Core:test_fail: Failure
  1582. ex_log_output.c:18:E:Core:test_exit: (after this point) Early exit
  1583. with return value 1
  1584. Running suite S2
  1585. ex_log_output.c:26:P:Core:test_pass2: Test passed
  1586. Results for all suites run:
  1587. 50%: Checks: 4, Failures: 1, Errors: 1
  1588. @end verbatim
  1589. @end example
  1590. Another way to enable test logging is to use the @code{CK_LOG_FILE_NAME}
  1591. environment variable. When set tests will be logged to the specified file name.
  1592. If log file is specified with both @code{CK_LOG_FILE_NAME} and
  1593. @code{srunner_set_log()}, the name provided to @code{srunner_set_log()} will
  1594. be used.
  1595. If the log name is set to "-" either via @code{srunner_set_log()} or
  1596. @code{CK_LOG_FILE_NAME}, the log data will be printed to stdout instead
  1597. of to a file.
  1598. @menu
  1599. * XML Logging::
  1600. * TAP Logging::
  1601. @end menu
  1602. @node XML Logging, , Test Logging, Test Logging
  1603. @subsection XML Logging
  1604. @findex srunner_set_xml
  1605. @findex srunner_has_xml
  1606. @findex srunner_xml_fname
  1607. The log can also be written in XML. The following functions define
  1608. the interface for XML logs:
  1609. @example
  1610. @verbatim
  1611. void srunner_set_xml (SRunner *sr, const char *fname);
  1612. int srunner_has_xml (SRunner *sr);
  1613. const char *srunner_xml_fname (SRunner *sr);
  1614. @end verbatim
  1615. @end example
  1616. XML output is enabled by a call to @code{srunner_set_xml()} before the tests
  1617. are run. Here is an example of an XML log:
  1618. @example
  1619. @verbatim
  1620. <?xml version="1.0"?>
  1621. <?xml-stylesheet type="text/xsl" href="http://check.sourceforge.net/xml/check_unittest.xslt"?>
  1622. <testsuites xmlns="http://check.sourceforge.net/ns">
  1623. <datetime>2012-10-19 09:56:06</datetime>
  1624. <suite>
  1625. <title>S1</title>
  1626. <test result="success">
  1627. <path>.</path>
  1628. <fn>ex_xml_output.c:10</fn>
  1629. <id>test_pass</id>
  1630. <iteration>0</iteration>
  1631. <duration>0.000013</duration>
  1632. <description>Core</description>
  1633. <message>Passed</message>
  1634. </test>
  1635. <test result="failure">
  1636. <path>.</path>
  1637. <fn>ex_xml_output.c:16</fn>
  1638. <id>test_fail</id>
  1639. <iteration>0</iteration>
  1640. <duration>-1.000000</duration>
  1641. <description>Core</description>
  1642. <message>Failure</message>
  1643. </test>
  1644. <test result="error">
  1645. <path>.</path>
  1646. <fn>ex_xml_output.c:20</fn>
  1647. <id>test_exit</id>
  1648. <iteration>0</iteration>
  1649. <duration>-1.000000</duration>
  1650. <description>Core</description>
  1651. <message>Early exit with return value 1</message>
  1652. </test>
  1653. </suite>
  1654. <suite>
  1655. <title>S2</title>
  1656. <test result="success">
  1657. <path>.</path>
  1658. <fn>ex_xml_output.c:28</fn>
  1659. <id>test_pass2</id>
  1660. <iteration>0</iteration>
  1661. <duration>0.000011</duration>
  1662. <description>Core</description>
  1663. <message>Passed</message>
  1664. </test>
  1665. <test result="failure">
  1666. <path>.</path>
  1667. <fn>ex_xml_output.c:34</fn>
  1668. <id>test_loop</id>
  1669. <iteration>0</iteration>
  1670. <duration>-1.000000</duration>
  1671. <description>Core</description>
  1672. <message>Iteration 0 failed</message>
  1673. </test>
  1674. <test result="success">
  1675. <path>.</path>
  1676. <fn>ex_xml_output.c:34</fn>
  1677. <id>test_loop</id>
  1678. <iteration>1</iteration>
  1679. <duration>0.000010</duration>
  1680. <description>Core</description>
  1681. <message>Passed</message>
  1682. </test>
  1683. <test result="failure">
  1684. <path>.</path>
  1685. <fn>ex_xml_output.c:34</fn>
  1686. <id>test_loop</id>
  1687. <iteration>2</iteration>
  1688. <duration>-1.000000</duration>
  1689. <description>Core</description>
  1690. <message>Iteration 2 failed</message>
  1691. </test>
  1692. </suite>
  1693. <suite>
  1694. <title>XML escape &quot; &apos; &lt; &gt; &amp; tests</title>
  1695. <test result="failure">
  1696. <path>.</path>
  1697. <fn>ex_xml_output.c:40</fn>
  1698. <id>test_xml_esc_fail_msg</id>
  1699. <iteration>0</iteration>
  1700. <duration>-1.000000</duration>
  1701. <description>description &quot; &apos; &lt; &gt; &amp;</description>
  1702. <message>fail &quot; &apos; &lt; &gt; &amp; message</message>
  1703. </test>
  1704. </suite>
  1705. <duration>0.001610</duration>
  1706. </testsuites>
  1707. @end verbatim
  1708. @end example
  1709. XML logging can be enabled by an environment variable as well. If
  1710. @code{CK_XML_LOG_FILE_NAME} environment variable is set, the XML test log will
  1711. be written to specified file name. If XML log file is specified with both
  1712. @code{CK_XML_LOG_FILE_NAME} and @code{srunner_set_xml()}, the name provided
  1713. to @code{srunner_set_xml()} will be used.
  1714. If the log name is set to "-" either via @code{srunner_set_xml()} or
  1715. @code{CK_XML_LOG_FILE_NAME}, the log data will be printed to stdout instead
  1716. of to a file.
  1717. If both plain text and XML log files are specified, by any of above methods,
  1718. then check will log to both files. In other words logging in plain text and XML
  1719. format simultaneously is supported.
  1720. @node TAP Logging, , Test Logging, Test Logging
  1721. @subsection TAP Logging
  1722. @findex srunner_set_tap
  1723. @findex srunner_has_tap
  1724. @findex srunner_tap_fname
  1725. The log can also be written in Test Anything Protocol (TAP) format.
  1726. Refer to the @uref{http://podwiki.hexten.net/TAP/TAP.html,TAP Specification}
  1727. for information on valid TAP output and parsers of TAP. The following
  1728. functions define the interface for TAP logs:
  1729. @example
  1730. @verbatim
  1731. void srunner_set_tap (SRunner *sr, const char *fname);
  1732. int srunner_has_tap (SRunner *sr);
  1733. const char *srunner_tap_fname (SRunner *sr);
  1734. @end verbatim
  1735. @end example
  1736. TAP output is enabled by a call to @code{srunner_set_tap()} before the tests
  1737. are run. Here is an example of an TAP log:
  1738. @example
  1739. @verbatim
  1740. ok 1 - mytests.c:test_suite_name:my_test_1: Passed
  1741. ok 2 - mytests.c:test_suite_name:my_test_2: Passed
  1742. not ok 3 - mytests.c:test_suite_name:my_test_3: Foo happened
  1743. ok 4 - mytests.c:test_suite_name:my_test_1: Passed
  1744. 1..4
  1745. @end verbatim
  1746. @end example
  1747. TAP logging can be enabled by an environment variable as well. If
  1748. @code{CK_TAP_LOG_FILE_NAME} environment variable is set, the TAP test log will
  1749. be written to specified file name. If TAP log file is specified with both
  1750. @code{CK_TAP_LOG_FILE_NAME} and @code{srunner_set_tap()}, the name provided
  1751. to @code{srunner_set_tap()} will be used.
  1752. If the log name is set to "-" either via @code{srunner_set_tap()} or
  1753. @code{CK_TAP_LOG_FILE_NAME}, the log data will be printed to stdout instead
  1754. of to a file.
  1755. If both plain text and TAP log files are specified, by any of above methods,
  1756. then check will log to both files. In other words logging in plain text and TAP
  1757. format simultaneously is supported.
  1758. @node Subunit Support, , Test Logging, Advanced Features
  1759. @section Subunit Support
  1760. Check supports running test suites with subunit output. This can be useful to
  1761. combine test results from multiple languages, or to perform programmatic
  1762. analysis on the results of multiple check test suites or otherwise handle test
  1763. results in a programmatic manner. Using subunit with check is very straight
  1764. forward. There are two steps:
  1765. 1) In your check test suite driver pass 'CK_SUBUNIT' as the output mode
  1766. for your srunner.
  1767. @example
  1768. @verbatim
  1769. SRunner *sr;
  1770. sr = srunner_create (make_s1_suite ());
  1771. srunner_add_suite (sr, make_s2_suite ());
  1772. srunner_run_all (sr, CK_SUBUNIT);
  1773. @end verbatim
  1774. @end example
  1775. 2) Setup your main language test runner to run your check based test
  1776. executable. For instance using python:
  1777. @example
  1778. @verbatim
  1779. import subunit
  1780. class ShellTests(subunit.ExecTestCase):
  1781. """Run some tests from the C codebase."""
  1782. def test_group_one(self):
  1783. """./foo/check_driver"""
  1784. def test_group_two(self):
  1785. """./foo/other_driver"""
  1786. @end verbatim
  1787. @end example
  1788. In this example, running the test suite ShellTests in python (using any test
  1789. runner - unittest.py, tribunal, trial, nose or others) will run
  1790. ./foo/check_driver and ./foo/other_driver and report on their result.
  1791. Subunit is hosted on launchpad - the @uref{https://launchpad.net/subunit/,
  1792. subunit} project there contains bug tracker, future plans, and source code
  1793. control details.
  1794. @node Supported Build Systems, Conclusion and References, Advanced Features, Top
  1795. @chapter Supported Build Systems
  1796. @findex Supported Build Systems
  1797. Check officially supports two build systems: Autotools and CMake.
  1798. Primarily it is recommended to use Autotools where possible, as CMake is
  1799. only officially supported for Windows. Information on using Check in
  1800. either build system follows.
  1801. @menu
  1802. * Autotools::
  1803. * CMake::
  1804. @end menu
  1805. @node Autotools, CMake, Supported Build Systems, Supported Build Systems
  1806. @section Autotools
  1807. It is recommended to use pkg-config where possible to locate and use
  1808. Check in an Autotools project. This can be accomplished by including
  1809. the following in the project's @file{configure.ac} file:
  1810. @verbatim
  1811. PKG_CHECK_MODULES([CHECK], [check >= MINIMUM-VERSION])
  1812. @end verbatim
  1813. where MINIMUM-VERSION is the lowest version which is sufficient for
  1814. the project. For example, to guarantee that at least version 0.9.6 is
  1815. available, use the following:
  1816. @verbatim
  1817. PKG_CHECK_MODULES([CHECK], [check >= 0.9.6])
  1818. @end verbatim
  1819. An example of a @file{configure.ac} script for a project is
  1820. included in the @file{doc/example} directory in Check's source.
  1821. This macro should provide everything necessary to integrate Check
  1822. into an Autotools project.
  1823. If one does not wish to use pkg-config Check also provides its own
  1824. macro, @code{AM_PATH_CHECK()}, which may be used. This macro is
  1825. deprecated, but is still included with Check for backwards compatibility.
  1826. The @code{AM_PATH_CHECK()} macro is defined in the file
  1827. @file{check.m4} which is installed by Check. It has some optional
  1828. parameters that you might find useful in your @file{configure.ac}:
  1829. @verbatim
  1830. AM_PATH_CHECK([MINIMUM-VERSION,
  1831. [ACTION-IF-FOUND[,ACTION-IF-NOT-FOUND]]])
  1832. @end verbatim
  1833. @code{AM_PATH_CHECK} does several things:
  1834. @enumerate
  1835. @item
  1836. It ensures check.h is available
  1837. @item
  1838. It ensures a compatible version of Check is installed
  1839. @item
  1840. It sets @env{CHECK_CFLAGS} and @env{CHECK_LIBS} for use by Automake.
  1841. @end enumerate
  1842. If you include @code{AM_PATH_CHECK()} in @file{configure.ac} and
  1843. subsequently see warnings when attempting to create
  1844. @command{configure}, it probably means one of the following things:
  1845. @enumerate
  1846. @item
  1847. You forgot to call @command{aclocal}. @command{autoreconf} will do
  1848. this for you.
  1849. @item
  1850. @command{aclocal} can't find @file{check.m4}. Here are some possible
  1851. solutions:
  1852. @enumerate a
  1853. @item
  1854. Call @command{aclocal} with @option{-I} set to the location of
  1855. @file{check.m4}. This means you have to call both @command{aclocal} and
  1856. @command{autoreconf}.
  1857. @item
  1858. Add the location of @file{check.m4} to the @samp{dirlist} used by
  1859. @command{aclocal} and then call @command{autoreconf}. This means you
  1860. need permission to modify the @samp{dirlist}.
  1861. @item
  1862. Set @code{ACLOCAL_AMFLAGS} in your top-level @file{Makefile.am} to
  1863. include @option{-I DIR} with @code{DIR} being the location of
  1864. @file{check.m4}. Then call @command{autoreconf}.
  1865. @end enumerate
  1866. @end enumerate
  1867. @node CMake, , Autotools, Supported Build Systems
  1868. @section CMake
  1869. Those unable to use Autotools in their project may use CMake instead.
  1870. Officially CMake is supported only for Windows.
  1871. Documentation for using CMake is forthcoming. In the meantime, look
  1872. at the example CMake project in Check's @file{doc/examples} directory.
  1873. If you are using CMake version 3 or above, importing Check into your project
  1874. is easier than in earlier versions. If you have installed Check
  1875. as a CMake library, you should have the following files:
  1876. @verbatim
  1877. ${INSTALL_PREFIX}/lib/cmake/check/check-config.cmake and
  1878. ${INSTALL_PREFIX}/lib/cmake/check/check-config-version.cmake and
  1879. @end verbatim
  1880. If you haven't installed Check into a system directory, you have to tell
  1881. your CMake build how to find it:
  1882. @verbatim
  1883. cmake -Dcheck_ROOT=${INSTALL_PREFIX}
  1884. Then use Check in your @file{CMakeLists.txt} like this:
  1885. @verbatim
  1886. find_package(check <check_version if wanted> REQUIRED CONFIG)
  1887. add_executable(myproj.test myproj.test.c)
  1888. target_link_libraries(myproj.test Check::check)
  1889. add_test(NAME MyProj COMMAND myproj.test)
  1890. @end verbatim
  1891. @node Conclusion and References, Environment Variable Reference, Supported Build Systems, Top
  1892. @chapter Conclusion and References
  1893. The tutorial and description of advanced features has provided an
  1894. introduction to all of the functionality available in Check.
  1895. Hopefully, this is enough to get you started writing unit tests with
  1896. Check. All the rest is simply application of what has been learned so
  1897. far with repeated application of the ``test a little, code a little''
  1898. strategy.
  1899. For further reference, see Kent Beck, ``Test-Driven Development: By
  1900. Example'', 1st ed., Addison-Wesley, 2003. ISBN 0-321-14653-0.
  1901. If you know of other authoritative references to unit testing and
  1902. test-driven development, please send us a patch to this manual.
  1903. @node Environment Variable Reference, Copying This Manual, Conclusion and References, Top
  1904. @appendix Environment Variable Reference
  1905. This is a reference to environment variables that Check recognized and their use.
  1906. CK_RUN_CASE: Name of a test case, runs only that test. See section @ref{Selective Running of Tests}.
  1907. CK_RUN_SUITE: Name of a test suite, runs only that suite. See section @ref{Selective Running of Tests}.
  1908. CK_INCLUDE_TAGS: String of space separated tags, runs only test cases associated with at least one of the tags, See section @ref{Selecting Tests Based on Arbitrary Tags}.
  1909. CK_EXCLUDE_TAGS: String of space separated tags, runs only test cases not associated with any of the tags, See section @ref{Selecting Tests Based on Arbitrary Tags}.
  1910. CK_VERBOSITY: How much output to emit, accepts: ``silent'', ``minimal'', ``normal'', ``subunit'', or ``verbose''. See section @ref{SRunner Output}.
  1911. CK_FORK: Set to ``no'' to disable using fork() to run unit tests in their own process. This is useful for debugging segmentation faults. See section @ref{No Fork Mode}.
  1912. CK_DEFAULT_TIMEOUT: Override Check's default unit test timeout, a floating value in seconds. ``0'' means no timeout. See section @ref{Test Timeouts}.
  1913. CK_TIMEOUT_MULTIPLIER: A multiplier used against the default unit test timeout. An integer, defaults to ``1''. See section @ref{Test Timeouts}.
  1914. CK_LOG_FILE_NAME: Filename to write logs to. See section @ref{Test Logging}.
  1915. CK_XML_LOG_FILE_NAME: Filename to write XML log to. See section @ref{XML Logging}.
  1916. CK_TAP_LOG_FILE_NAME: Filename to write TAP (Test Anything Protocol) output to. See section @ref{TAP Logging}.
  1917. CK_MAX_MSG_SIZE: Maximal assertion message size.
  1918. @node Copying This Manual, Index, Environment Variable Reference, Top
  1919. @appendix Copying This Manual
  1920. @menu
  1921. * GNU Free Documentation License:: License for copying this manual.
  1922. @end menu
  1923. @include fdl.texi
  1924. @node Index, , Copying This Manual, Top
  1925. @unnumbered Index
  1926. @printindex cp
  1927. @bye