1 | Mohammad Mahdi Hassan and James H. Andrews Comparing multi-point stride coverage and dataflow coverage 35th International Conference on Software Engineering, {ICSE} '13, San Francisco, CA, USA, May 18-26, 2013, 2013. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{HassanA13,
author = {Mohammad Mahdi Hassan and James H. Andrews},
title = {Comparing multi-point stride coverage and dataflow coverage},
booktitle = {35th International Conference on Software Engineering, {ICSE} '13, San Francisco, CA, USA, May 18-26, 2013},
year = {2013},
address = {},
month = {},
pages = {172--181}
} |
2 | Akbar Siami Namin and James H. Andrews The influence of size and coverage on test suite effectiveness Proceedings of the Eighteenth International Symposium on Software Testing and Analysis, {ISSTA} 2009, Chicago, IL, USA, July 19-23, 2009, 2009. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{NaminA09,
author = {Akbar Siami Namin and James H. Andrews},
title = {The influence of size and coverage on test suite effectiveness},
booktitle = {Proceedings of the Eighteenth International Symposium on Software Testing and Analysis, {ISSTA} 2009, Chicago, IL, USA, July 19-23, 2009},
year = {2009},
address = {},
month = {},
pages = {57--68}
} |
3 | Shaimaa Ali and James H. Andrews and Tamilselvi Dhandapani and Wantao Wang Evaluating the Accuracy of Fault Localization Techniques {ASE} 2009, 24th {IEEE/ACM} International Conference on Automated Software Engineering, Auckland, New Zealand, November 16-20, 2009, 2009. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{ali_ASE_09,
author = {Shaimaa Ali and James H. Andrews and Tamilselvi Dhandapani and Wantao Wang},
title = {Evaluating the Accuracy of Fault Localization Techniques},
booktitle = {{ASE} 2009, 24th {IEEE/ACM} International Conference on Automated Software Engineering, Auckland, New Zealand, November 16-20, 2009},
year = {2009},
address = {},
month = {},
pages = {76--87}
} |
4 | Akbar Siami Namin and James H. Andrews The Influence of Size and Coverage on Test Suite Effectiveness Proceedings of the18th International Symposium on Software Testing and Analysis (ISSTA'09)Chicago, Illinois, USA, 19-23 July 2009. |
|
| Abstract: We study the relationship between three properties of test suites: size, structural coverage, and fault-finding effectiveness. In particular, we study the question of whether achieving high coverage leads directly to greater effectiveness, or only indirectly through forcing a test suite to be larger. Our experiments indicate that coverage is sometimes correlated with effectiveness when size is controlled for, and that using both size and coverage yields a more accurate prediction of effectiveness than size alone. This in turn suggests that both size and coverage are important to test suite effectiveness. Our experiments also indicate that no linear relationship exists among the three variables of size, coverage and effectiveness, but that a nonlinear relationship does exist. |
| @INPROCEEDINGS{NaminA09,
author = {Akbar Siami Namin and James H. Andrews},
title = {The Influence of Size and Coverage on Test Suite Effectiveness},
booktitle = {Proceedings of the18th International Symposium on Software Testing and Analysis (ISSTA'09)},
year = {2009},
address = {Chicago, Illinois, USA},
month = {19-23 July},
pages = {57–68}
} |
5 | Akbar Siami Namin and James H. Andrews and Duncan J. Murdoch Sufficient Mutation Operators for Measuring Test Effectiveness Proceedings of the 30th International Conference on Software Engineering (ICSE'08)Leipzig, Germany, 10-18 May 2008. |
|
| Abstract: Mutants are automatically-generated, possibly faulty variants of programs. The mutation adequacy ratio of a test suite is the ratio of non-equivalent mutants it is able to identify to the total number of non-equivalent mutants. This ratio can be used as a measure of test effectiveness. However, it can be expensive to calculate, due to the large number of different mutation operators that have been proposed for generating the mutants.
In this paper, we address the problem of finding a small set of mutation operators which is still sufficient for measuring test effectiveness. We do this by defining a statistical analysis procedure that allows us to identify such a set, together with an associated linear model that predicts mutation adequacy with high accuracy. We confirm the validity of our procedure through cross-validation and the application of other, alternative statistical analyses. |
| @INPROCEEDINGS{NaminAM08,
author = {Akbar Siami Namin and James H. Andrews and Duncan J. Murdoch},
title = {Sufficient Mutation Operators for Measuring Test Effectiveness},
booktitle = {Proceedings of the 30th International Conference on Software Engineering (ICSE'08)},
year = {2008},
address = {Leipzig, Germany},
month = {10-18 May},
pages = {351-360}
} |
6 | Akbar Siami Namin and James H. Andrews and Duncan J. Murdoch Sufficient mutation operators for measuring test effectiveness 30th International Conference on Software Engineering {(ICSE} 2008), Leipzig, Germany, May 10-18, 2008, 2008. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{NaminAM08,
author = {Akbar Siami Namin and James H. Andrews and Duncan J. Murdoch},
title = {Sufficient mutation operators for measuring test effectiveness},
booktitle = {30th International Conference on Software Engineering {(ICSE} 2008), Leipzig, Germany, May 10-18, 2008},
year = {2008},
address = {},
month = {},
pages = {351--360}
} |
7 | Akbar Siami Namin and James H. Andrews On Sufficiency of Mutants Proceedings of the 29th International Conference on Software Engineering (ICSE COMPANION'07)Minneapolis, Minnesota, 20-26 May 2007. |
|
| Abstract: Mutation is the practice of automatically generating possibly faulty variants of a program, for the purpose of assessing the adequacy of a test suite or comparing testing techniques. The cost of mutation often makes its application infeasible. The cost of mutation is usually assessed in terms of the number of mutants, and consequently the number of "mutation operators" that produce them. We address this problem by finding a smaller subset of mutation operators, called "sufficient", that can model the behaviour of the full set. To do this, we provide an experimental procedure and adapt statistical techniques proposed for variable reduction, model selection and nonlinear regression. Our preliminary results reveal interesting information about mutation operators. |
| @INPROCEEDINGS{NaminA07,
author = {Akbar Siami Namin and James H. Andrews},
title = {On Sufficiency of Mutants},
booktitle = {Proceedings of the 29th International Conference on Software Engineering (ICSE COMPANION'07)},
year = {2007},
address = {Minneapolis, Minnesota},
month = {20-26 May},
pages = {73-74}
} |
8 | James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria {IEEE} Trans. Software Eng., 32(8), 2006. |
|
| Abstract: Available soon... |
| @ARTICLE{AndrewsBLN06,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin},
title = {Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria},
journal = {{IEEE} Trans. Software Eng.},
year = {2006},
month = {},
volume = {32},
number = {8},
pages = {608--624}
} |
9 | James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria IEEE Transactions on Software Engineering, 32(8), August 2006. |
|
| Abstract: The empirical assessment of test techniques plays an important role in software testing research. One common practice is to seed faults in subject software, either manually or by using a program that generates all possible mutants based on a set of mutation operators. The latter allows the systematic, repeatable seeding of large numbers of faults, thus facilitating the statistical analysis of fault detection effectiveness of test suites; however, we do not know whether empirical results obtained this way lead to valid, representative conclusions. Focusing on four common control and data flow criteria (block, decision, C-use, and P-use), this paper investigates this important issue based on a middle size industrial program with a comprehensive pool of test cases and known faults. Based on the data available thus far, the results are very consistent across the investigated criteria as they show that the use of mutation operators is yielding trustworthy results: generated mutants can be used to predict the detection effectiveness of real faults. Applying such a mutation analysis, we then investigate the relative cost and effectiveness of the above-mentioned criteria by revisiting fundamental questions regarding the relationships between fault detection, test suite size, and control/data flow coverage. Although such questions have been partially investigated in previous studies, we can use a large number of mutants, which helps decrease the impact of random variation in our analysis and allows us to use a different analysis approach. Our results are then; compared with published studies, plausible reasons for the differences are provided, and the research leads us to suggest a way to tune the mutation analysis process to possible differences in fault detection probabilities in a specific environment. |
| @ARTICLE{AndrewsBLN06,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin},
title = {Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria},
journal = {IEEE Transactions on Software Engineering},
year = {2006},
month = {August},
volume = {32},
number = {8},
pages = {608-624}
} |
10 | Akbar Siami Namin and James H. Andrews Finding Sufficient Mutation Operators via Variable Reduction Proceedings of the 2nd Workshop on Mutation Analysis (MUTATION'06)Raleigh, North Carolina, November 2006. |
|
| Abstract: A set of mutation operators is "sufficient" if it can be used for most purposes to replace a larger set. We describe in detail an experimental procedure for determining a set of sufficient C language mutation operators. We also describe several statistical analyses that determine sufficient subsets with respect to several different criteria, based on standard techniques for variable reduction. We have begun to carry out our experimental procedure on seven standard C subject programs. We present preliminary results that indicate that the procedure and analyses are feasible and yield useful information. |
| @INPROCEEDINGS{NaminA06,
author = {Akbar Siami Namin and James H. Andrews},
title = {Finding Sufficient Mutation Operators via Variable Reduction},
booktitle = {Proceedings of the 2nd Workshop on Mutation Analysis (MUTATION'06)},
year = {2006},
address = {Raleigh, North Carolina},
month = {November},
pages = {5}
} |
11 | James H. Andrews and Lionel C. Briand and Yvan Labiche Is Mutation an Appropriate Tool for Testing Experiments? Proceedings of the 27th International Conference on Software Engineering (ICSE'05)St Louis, Missouri, 15-21 May 2005. |
|
| Abstract: The empirical assessment of test techniques plays an important role in software testing research. One common practice is to instrument faults, either manually or by using mutation operators. The latter allows the systematic, repeatable seeding of large numbers of faults; however, we do not know whether empirical results obtained this way lead to valid, representative conclusions. This paper investigates this important question based on a number of programs with comprehensive pools of test cases and known faults. It is concluded that, based on the data available thus far, the use of mutation operators is yielding trustworthy results (generated mutants are similar to real faults). Mutants appear however to be different from hand-seeded faults that seem to be harder to detect than real faults. |
| @INPROCEEDINGS{AndrewsBL05,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche},
title = {Is Mutation an Appropriate Tool for Testing Experiments?},
booktitle = {Proceedings of the 27th International Conference on Software Engineering (ICSE'05)},
year = {2005},
address = {St Louis, Missouri},
month = {15-21 May},
pages = {402 - 411}
} |
12 | James H. Andrews and Lionel C. Briand and Yvan Labiche Is mutation an appropriate tool for testing experiments? 27th International Conference on Software Engineering {(ICSE} 2005), 15-21 May 2005, St. Louis, Missouri, {USA}, 2005. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{andrews_ICSE_05_mutgen,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche},
title = {Is mutation an appropriate tool for testing experiments?},
booktitle = {27th International Conference on Software Engineering {(ICSE} 2005), 15-21 May 2005, St. Louis, Missouri, {USA}},
year = {2005},
address = {},
month = {},
pages = {402--411}
} |
13 | James H. Andrews and Yingjun Zhang General Test Result Checking with Log File Analysis {IEEE} Trans. Software Eng., 29(7), 2003. |
|
| Abstract: Available soon... |
| @ARTICLE{andrews_TSE_03,
author = {James H. Andrews and Yingjun Zhang},
title = {General Test Result Checking with Log File Analysis},
journal = {{IEEE} Trans. Software Eng.},
year = {2003},
month = {},
volume = {29},
number = {7},
pages = {634--648}
} |