1 | Reza Matinnejad and Shiva Nejati and Lionel C. Briand and Thomas Bruckmann Automated test suite generation for time-continuous simulink models Proceedings of the 38th International Conference on Software Engineering, {ICSE} 2016, Austin, TX, USA, May 14-22, 2016, 2016. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{matinnejad_ICSE_16,
author = {Reza Matinnejad and Shiva Nejati and Lionel C. Briand and Thomas Bruckmann},
title = {Automated test suite generation for time-continuous simulink models},
booktitle = {Proceedings of the 38th International Conference on Software Engineering, {ICSE} 2016, Austin, TX, USA, May 14-22, 2016},
year = {2016},
address = {},
month = {},
pages = {595--606}
} |
2 | Daniel Di Nardo and Fabrizio Pastore and Lionel C. Briand Generating Complex and Faulty Test Data through Model-Based Mutation Analysis 8th {IEEE} International Conference on Software Testing, Verification and Validation, {ICST} 2015, Graz, Austria, April 13-17, 2015, 2015. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{nardo_ICST_15,
author = {Daniel Di Nardo and Fabrizio Pastore and Lionel C. Briand},
title = {Generating Complex and Faulty Test Data through Model-Based Mutation Analysis},
booktitle = {8th {IEEE} International Conference on Software Testing, Verification and Validation, {ICST} 2015, Graz, Austria, April 13-17, 2015},
year = {2015},
address = {},
month = {},
pages = {1--10}
} |
3 | Andrea Arcuri and Lionel C. Briand Adaptive random testing: an illusion of effectiveness? Proceedings of the 20th International Symposium on Software Testing and Analysis, {ISSTA} 2011, Toronto, ON, Canada, July 17-21, 2011, 2011. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{arcuri_STVR_11,
author = {Andrea Arcuri and Lionel C. Briand},
title = {Adaptive random testing: an illusion of effectiveness?},
booktitle = {Proceedings of the 20th International Symposium on Software Testing and Analysis, {ISSTA} 2011, Toronto, ON, Canada, July 17-21, 2011},
year = {2011},
address = {},
month = {},
pages = {265--275}
} |
4 | James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria {IEEE} Trans. Software Eng., 32(8), 2006. |
|
| Abstract: Available soon... |
| @ARTICLE{AndrewsBLN06,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin},
title = {Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria},
journal = {{IEEE} Trans. Software Eng.},
year = {2006},
month = {},
volume = {32},
number = {8},
pages = {608--624}
} |
5 | James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria IEEE Transactions on Software Engineering, 32(8), August 2006. |
|
| Abstract: The empirical assessment of test techniques plays an important role in software testing research. One common practice is to seed faults in subject software, either manually or by using a program that generates all possible mutants based on a set of mutation operators. The latter allows the systematic, repeatable seeding of large numbers of faults, thus facilitating the statistical analysis of fault detection effectiveness of test suites; however, we do not know whether empirical results obtained this way lead to valid, representative conclusions. Focusing on four common control and data flow criteria (block, decision, C-use, and P-use), this paper investigates this important issue based on a middle size industrial program with a comprehensive pool of test cases and known faults. Based on the data available thus far, the results are very consistent across the investigated criteria as they show that the use of mutation operators is yielding trustworthy results: generated mutants can be used to predict the detection effectiveness of real faults. Applying such a mutation analysis, we then investigate the relative cost and effectiveness of the above-mentioned criteria by revisiting fundamental questions regarding the relationships between fault detection, test suite size, and control/data flow coverage. Although such questions have been partially investigated in previous studies, we can use a large number of mutants, which helps decrease the impact of random variation in our analysis and allows us to use a different analysis approach. Our results are then; compared with published studies, plausible reasons for the differences are provided, and the research leads us to suggest a way to tune the mutation analysis process to possible differences in fault detection probabilities in a specific environment. |
| @ARTICLE{AndrewsBLN06,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche and Akbar Siami Namin},
title = {Using Mutation Analysis for Assessing and Comparing Testing Coverage Criteria},
journal = {IEEE Transactions on Software Engineering},
year = {2006},
month = {August},
volume = {32},
number = {8},
pages = {608-624}
} |
6 | James H. Andrews and Lionel C. Briand and Yvan Labiche Is Mutation an Appropriate Tool for Testing Experiments? Proceedings of the 27th International Conference on Software Engineering (ICSE'05)St Louis, Missouri, 15-21 May 2005. |
|
| Abstract: The empirical assessment of test techniques plays an important role in software testing research. One common practice is to instrument faults, either manually or by using mutation operators. The latter allows the systematic, repeatable seeding of large numbers of faults; however, we do not know whether empirical results obtained this way lead to valid, representative conclusions. This paper investigates this important question based on a number of programs with comprehensive pools of test cases and known faults. It is concluded that, based on the data available thus far, the use of mutation operators is yielding trustworthy results (generated mutants are similar to real faults). Mutants appear however to be different from hand-seeded faults that seem to be harder to detect than real faults. |
| @INPROCEEDINGS{AndrewsBL05,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche},
title = {Is Mutation an Appropriate Tool for Testing Experiments?},
booktitle = {Proceedings of the 27th International Conference on Software Engineering (ICSE'05)},
year = {2005},
address = {St Louis, Missouri},
month = {15-21 May},
pages = {402 - 411}
} |
7 | James H. Andrews and Lionel C. Briand and Yvan Labiche Is mutation an appropriate tool for testing experiments? 27th International Conference on Software Engineering {(ICSE} 2005), 15-21 May 2005, St. Louis, Missouri, {USA}, 2005. |
|
| Abstract: Available soon... |
| @INPROCEEDINGS{andrews_ICSE_05_mutgen,
author = {James H. Andrews and Lionel C. Briand and Yvan Labiche},
title = {Is mutation an appropriate tool for testing experiments?},
booktitle = {27th International Conference on Software Engineering {(ICSE} 2005), 15-21 May 2005, St. Louis, Missouri, {USA}},
year = {2005},
address = {},
month = {},
pages = {402--411}
} |
8 | Lionel C. Briand and Dietmar Pfahl Using simulation for assessing the real impact of test-coverage on defect-coverage {IEEE} Trans. Reliability, 49(1), 2000. |
|
| Abstract: Available soon... |
| @ARTICLE{BriandP00,
author = {Lionel C. Briand and Dietmar Pfahl},
title = {Using simulation for assessing the real impact of test-coverage on defect-coverage},
journal = {{IEEE} Trans. Reliability},
year = {2000},
month = {},
volume = {49},
number = {1},
pages = {60--70}
} |