var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fleonmoonen.com%2Fassets%2Fpublications_leon_moonen.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fleonmoonen.com%2Fassets%2Fpublications_leon_moonen.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fleonmoonen.com%2Fassets%2Fpublications_leon_moonen.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2017\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Leveraging Machine Learning to Guide Software Evolution (keynote).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In IEEE International Workshop on Empirical Software Engineering in Practice (IWESEP), Tokyo, Japan, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{moonen:2017:ml4evolution:keynote,\n  abstract =     "Knowledge about dependencies between system artifacts such as modules, methods and variables is essential for a variety of software maintenance and software evolution tasks. Unfortunately, existing approaches to uncover such dependencies by means of static or dynamic program analysis are typically language-specific. Their application is thus largely restricted to homogeneous systems, which is a major drawback given the increasingly heterogeneity in modern software systems.In this talk, we will look at the alternative of using unsupervised machine learning techniques such as association rule mining, which can be used to infer knowledge about the relationships between items in a data set. Association rule mining has been successfully used to analyze the change history of a software system and uncover so called evolutionary coupling between its artifacts. One of the advantages of this approach is that it is language-agnostic, and uncovering dependencies across artifacts written in different programming languages essentially comes for free.We will explore how association rule mining can be used to derive evidence-based recommendations to guide software maintenance and evolution tasks. Examples include software change impact analysis, recommending related change during development, and conducting targeted regression testing. We survey the state-of-the-art, analyze why and where the applicability of existing techniques falls short, and discuss several avenues for improvement, including novel mining algorithms, methods for aggregating the evidence captured by individual rules, and guidelines for selecting appropriate values for parameters of the mining algorithms.",\n  address =      "Tokyo, Japan",\n  author =       "Moonen, Leon",\n  booktitle =    "IEEE International Workshop on Empirical Software Engineering in Practice (IWESEP)",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 2017 - Leveraging Machine Learning to Guide Software Evolution (keynote) - IEEE International Workshop on Empirical Software En.pdf:pdf",\n  publisher =    "IEEE",\n  title =        "{Leveraging Machine Learning to Guide Software Evolution (keynote)}",\n  type =         "keynote",\n  year =         "2017",\n}\n\n
\n
\n\n\n
\n Knowledge about dependencies between system artifacts such as modules, methods and variables is essential for a variety of software maintenance and software evolution tasks. Unfortunately, existing approaches to uncover such dependencies by means of static or dynamic program analysis are typically language-specific. Their application is thus largely restricted to homogeneous systems, which is a major drawback given the increasingly heterogeneity in modern software systems.In this talk, we will look at the alternative of using unsupervised machine learning techniques such as association rule mining, which can be used to infer knowledge about the relationships between items in a data set. Association rule mining has been successfully used to analyze the change history of a software system and uncover so called evolutionary coupling between its artifacts. One of the advantages of this approach is that it is language-agnostic, and uncovering dependencies across artifacts written in different programming languages essentially comes for free.We will explore how association rule mining can be used to derive evidence-based recommendations to guide software maintenance and evolution tasks. Examples include software change impact analysis, recommending related change during development, and conducting targeted regression testing. We survey the state-of-the-art, analyze why and where the applicability of existing techniques falls short, and discuss several avenues for improvement, including novel mining algorithms, methods for aggregating the evidence captured by individual rules, and guidelines for selecting appropriate values for parameters of the mining algorithms.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Safety Evidence Change Impact Analysis in Practice.\n \n \n \n\n\n \n de la Vara, J. L.; Borg, M.; Wnuk, K.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Software Engineering (ICSE), 2017. ACM/IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{dlvara:2017:icse,\n  abstract =     "In many application domains, critical systems must comply with safety standards. This involves gathering safety evidence in the form of artefacts such as safety analyses, system specifications, and testing results. These artefacts can evolve during a system's lifecycle, creating a need for impact analysis to guarantee that system safety and compliance are not jeopardised. Although extensive research has been conducted on change impact analysis and on safety evidence management, the knowledge about how safety evidence change impact analysis is addressed in practice is limited. This paper reports on a survey targeted at filling this gap by analysing the circumstances under which safety evidence change impact analysis is addressed, the tool support used, and the challenges faced. We obtained 97 valid responses representing 16 application domains, 28 countries, and 47 safety standards. The results suggest that most practitioners deal with safety evidence change impact analysis during system development and mainly from system specifications. Furthermore, the level of automation in the process is low and insufficient tool support is the most frequent challenge. Other notable findings include that the different artefact types used as safety evidence seem to co-evolve, the evolution of safety case should probably be better managed, and no commercial impact analysis tool has been reported as used for all artefact types. Finally, we identified over 20 areas where the state of the practice in safety evidence change impact analysis can be improved.",\n  author =       "de la Vara, Jose Luis and Borg, Markus and Wnuk, Krzysztof and Moonen, Leon",\n  booktitle =    "International Conference on Software Engineering (ICSE)",\n  keywords =     "Maintenance process,Methods for SQA and V{\\&}V,Software and System Safety,Standards",\n  publisher =    "ACM/IEEE",\n  title =        "{Safety Evidence Change Impact Analysis in Practice}",\n  year =         "2017",\n}\n\n
\n
\n\n\n
\n In many application domains, critical systems must comply with safety standards. This involves gathering safety evidence in the form of artefacts such as safety analyses, system specifications, and testing results. These artefacts can evolve during a system's lifecycle, creating a need for impact analysis to guarantee that system safety and compliance are not jeopardised. Although extensive research has been conducted on change impact analysis and on safety evidence management, the knowledge about how safety evidence change impact analysis is addressed in practice is limited. This paper reports on a survey targeted at filling this gap by analysing the circumstances under which safety evidence change impact analysis is addressed, the tool support used, and the challenges faced. We obtained 97 valid responses representing 16 application domains, 28 countries, and 47 safety standards. The results suggest that most practitioners deal with safety evidence change impact analysis during system development and mainly from system specifications. Furthermore, the level of automation in the process is low and insufficient tool support is the most frequent challenge. Other notable findings include that the different artefact types used as safety evidence seem to co-evolve, the evolution of safety case should probably be better managed, and no commercial impact analysis tool has been reported as used for all artefact types. Finally, we identified over 20 areas where the state of the practice in safety evidence change impact analysis can be improved.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (16)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Analyzing and visualizing information flow in heterogeneous component-based software systems.\n \n \n \n \n\n\n \n Moonen, L.; and Yazdanshenas, A. R.\n\n\n \n\n\n\n Information and Software Technology, 77: 34–55. September 2016.\n \n\n\n\n
\n\n\n\n \n \n \"AnalyzingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{moonen:2016:analyzing,\n  abstract =     "Context: Component-based software engineering is aimed at managing the complexity of large-scale software development by composing systems from reusable parts. To understand or validate the behavior of such a system, one needs to understand the components involved in combination with understanding how they are configured and composed. This becomes increasingly difficult when components are implemented in various programming languages, and composition is specified in external artifacts. Moreover, tooling that supports in-depth system-wide analysis of such heterogeneous systems is lacking. Objective: This paper contributes a method to analyze and visualize information flow in a component-based system at various levels of abstraction. These visualizations are designed to support the comprehension needs of both safety domain experts and software developers for, respectively, certification and evolution of safety-critical cyber-physical systems. Method: We build system-wide dependence graphs and use static program slicing to determine all possible end-to-end information flows through and across a system's components. We define a hierarchy of five abstractions over these information flows that reduce visual distraction and cognitive overload, while satisfying the users' information needs. We improve on our earlier work to provide interconnected views that support both systematic, as well as opportunistic navigation scenarios. Results: We discuss the design and implementation of our approach and the resulting views in a prototype tool called FlowTracker. We summarize the results of a qualitative evaluation study, carried out via two rounds of interview, on the effectiveness and usability of these views. We discuss a number of improvements, such as more selective information presentations, that resulted from the evaluation. Conclusion: The evaluation shows that the proposed approach and views are useful for understanding and validating heterogeneous component-based systems, and address information needs that could earlier only be met by manual inspection of the source code. We discuss lessons learned and directions for future work.",\n  author =       "Moonen, Leon and Yazdanshenas, Amir Reza",\n  DOI =          "10.1016/j.infsof.2016.05.002",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen, Yazdanshenas - 2016 - Analyzing and visualizing information flow in heterogeneous component-based software systems - Information.pdf:pdf",\n  ISSN =         "09505849",\n  journal =      "Information and Software Technology",\n  keywords =     "Component-based software systems,Information flow analysis,Model reconstruction,Program comprehension,Software visualization",\n  month =        sep,\n  pages =        "34--55",\n  publisher =    "Elsevier",\n  title =        "{Analyzing and visualizing information flow in heterogeneous component-based software systems}",\n  URL =          "http://linkinghub.elsevier.com/retrieve/pii/S0950584916300817",\n  volume =       "77",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n Context: Component-based software engineering is aimed at managing the complexity of large-scale software development by composing systems from reusable parts. To understand or validate the behavior of such a system, one needs to understand the components involved in combination with understanding how they are configured and composed. This becomes increasingly difficult when components are implemented in various programming languages, and composition is specified in external artifacts. Moreover, tooling that supports in-depth system-wide analysis of such heterogeneous systems is lacking. Objective: This paper contributes a method to analyze and visualize information flow in a component-based system at various levels of abstraction. These visualizations are designed to support the comprehension needs of both safety domain experts and software developers for, respectively, certification and evolution of safety-critical cyber-physical systems. Method: We build system-wide dependence graphs and use static program slicing to determine all possible end-to-end information flows through and across a system's components. We define a hierarchy of five abstractions over these information flows that reduce visual distraction and cognitive overload, while satisfying the users' information needs. We improve on our earlier work to provide interconnected views that support both systematic, as well as opportunistic navigation scenarios. Results: We discuss the design and implementation of our approach and the resulting views in a prototype tool called FlowTracker. We summarize the results of a qualitative evaluation study, carried out via two rounds of interview, on the effectiveness and usability of these views. We discuss a number of improvements, such as more selective information presentations, that resulted from the evaluation. Conclusion: The evaluation shows that the proposed approach and views are useful for understanding and validating heterogeneous component-based systems, and address information needs that could earlier only be met by manual inspection of the source code. We discuss lessons learned and directions for future work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring the Effects of History Length and Age on Mining Software Change Impact.\n \n \n \n \n\n\n \n Moonen, L.; Alesio, S. D.; Rolfsnes, T.; Binkley, D. W. D. W.; Di Alesio, S.; Rolfsnes, T.; and Binkley, D. W. D. W.\n\n\n \n\n\n\n In IEEE International Working Conference on Source Code Analysis and Manipulation (SCAM), pages 207–216, September 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2016:exploring,\n  abstract =     "The goal of Software Change Impact Analysis is to identify artifacts (typically source-code files) potentially affected by a change. Recently, there is an increased interest in mining software change impact based on evolutionary coupling. A particularly promising approach uses association rule mining to uncover potentially affected artifacts from patterns in the system's change history. Two main considerations when using this approach are the history length, the number of transactions from the change history used to identify the impact of a change, and history age, the number of transactions that have occurred since patterns were last mined from the history. Although history length and age can significantly affect the quality of mining results, few guidelines exist on how to best select appropriate values for these two parameters. In this paper, we empirically investigate the effects of history length and age on the quality of change impact analysis using mined evolutionary couplings. Specifically, we report on a series of systematic experiments involving the change histories of two large industrial systems and 17 large open source systems. In these experiments, we vary the length and age of the history used to mine software change impact, and assess how this affects precision and applicability. Results from the study are used to derive practical guidelines for choosing history length and age when applying association rule mining to conduct software change impact analysis.",\n  author =       "Moonen, Leon and Alesio, Stefano Di and Rolfsnes, Thomas and Binkley, David W. Dave W. and {Di Alesio}, Stefano and Rolfsnes, Thomas and Binkley, David W. Dave W.",\n  booktitle =    "IEEE International Working Conference on Source Code Analysis and Manipulation (SCAM)",\n  DOI =          "10.1109/SCAM.2016.9",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen et al. - 2016 - Exploring the Effects of History Length and Age on Mining Software Change Impact - IEEE International Working Con.pdf:pdf;:Users/leon/Documents/Mendeley Desktop/Moonen et al. - 2016 - Exploring the Effects of History Length and Age on Mining Software Change Impact - IEEE International Working (2).pdf:pdf",\n  ISBN =         "9781-5090-3848-0",\n  keywords =     "association rule mining,change impact analysis,evolutionary coupling,parameter tuning",\n  month =        sep,\n  pages =        "207--216",\n  publisher =    "IEEE",\n  title =        "{Exploring the Effects of History Length and Age on Mining Software Change Impact}",\n  URL =          "http://ieeexplore.ieee.org/document/7781814/",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n The goal of Software Change Impact Analysis is to identify artifacts (typically source-code files) potentially affected by a change. Recently, there is an increased interest in mining software change impact based on evolutionary coupling. A particularly promising approach uses association rule mining to uncover potentially affected artifacts from patterns in the system's change history. Two main considerations when using this approach are the history length, the number of transactions from the change history used to identify the impact of a change, and history age, the number of transactions that have occurred since patterns were last mined from the history. Although history length and age can significantly affect the quality of mining results, few guidelines exist on how to best select appropriate values for these two parameters. In this paper, we empirically investigate the effects of history length and age on the quality of change impact analysis using mined evolutionary couplings. Specifically, we report on a series of systematic experiments involving the change histories of two large industrial systems and 17 large open source systems. In these experiments, we vary the length and age of the history used to mine software change impact, and assess how this affects precision and applicability. Results from the study are used to derive practical guidelines for choosing history length and age when applying association rule mining to conduct software change impact analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving change recommendation using aggregated association rules.\n \n \n \n \n\n\n \n Rolfsnes, T.; Moonen, L.; Di Alesio, S.; Behjati, R.; and Binkley, D.\n\n\n \n\n\n\n In International Conference on Mining Software Repositories (MSR), pages 73–84, 2016. ACM\n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{rolfsnes:2016:improving,\n  abstract =     "Past research has proposed association rule mining as a means to uncover the evolutionary coupling from a system's change history. These couplings have various applications, such as improving system decomposition and recommending related changes during development. The strength of the coupling can be characterized using a variety of interestingness measures. Existing recommendation engines typically use only the rule with the highest interestingness value in situations where more than one rule applies. In contrast, we argue that multiple applicable rules indicate increased evidence, and hypothesize that the aggregation of such rules can be exploited to provide more accurate recommendations. To investigate this hypothesis we conduct an empirical study on the change histories of two large industrial systems and four large open source systems. As aggregators we adopt three cumulative gain functions from information retrieval. The experiments evaluate the three using 39 different rule interestingness measures. The results show that aggregation provides a significant impact on most measure's value and, furthermore, leads to a significant improvement in the resulting recommendation.",\n  author =       "Rolfsnes, Thomas and Moonen, Leon and {Di Alesio}, Stefano and Behjati, Razieh and Binkley, Dave",\n  booktitle =    "International Conference on Mining Software Repositories (MSR)",\n  DOI =          "10.1145/2901739.2901756",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Rolfsnes et al. - 2016 - Improving change recommendation using aggregated association rules - International Conference on Mining Softwar.pdf:pdf",\n  ISBN =         "9781-4503-4186-8",\n  pages =        "73--84",\n  publisher =    "ACM",\n  title =        "{Improving change recommendation using aggregated association rules}",\n  URL =          "http://dl.acm.org/citation.cfm?doid=2901739.2901756",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n Past research has proposed association rule mining as a means to uncover the evolutionary coupling from a system's change history. These couplings have various applications, such as improving system decomposition and recommending related changes during development. The strength of the coupling can be characterized using a variety of interestingness measures. Existing recommendation engines typically use only the rule with the highest interestingness value in situations where more than one rule applies. In contrast, we argue that multiple applicable rules indicate increased evidence, and hypothesize that the aggregation of such rules can be exploited to provide more accurate recommendations. To investigate this hypothesis we conduct an empirical study on the change histories of two large industrial systems and four large open source systems. As aggregators we adopt three cumulative gain functions from information retrieval. The experiments evaluate the three using 39 different rule interestingness measures. The results show that aggregation provides a significant impact on most measure's value and, furthermore, leads to a significant improvement in the resulting recommendation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Practical guidelines for change recommendation using association rule mining.\n \n \n \n \n\n\n \n Moonen, L.; Di Alesio, S.; Binkley, D.; and Rolfsnes, T.\n\n\n \n\n\n\n In International Conference on Automated Software Engineering (ASE), pages 732–743, September 2016. ACM\n \n\n\n\n
\n\n\n\n \n \n \"PracticalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2016:guidelines,\n  abstract =     "Association rule mining is an unsupervised learning technique that infers relationships among items in a data set. This technique has been successfully used to analyze a system's change history and uncover evolutionary coupling between system artifacts. Evolutionary coupling can, in turn, be used to recommend artifacts that are potentially impacted by a given set of changes to the system. In general, the quality of such recommendations is affected by (1) the values selected for various parameters of the mining algorithm, (2) the characteristics of the change set used to derive the recommendation, and (3) the characteristics of the system's change history for which recommendations are generated. In this paper, we empirically investigate the extent to which these factors affect change impact recommendations. Specifically, we conduct a series of systematic experiments on the change histories of two large industrial systems and eight large open source systems, in which we control the change size for which to derive recommendations, the measure used to assess the strength of the evolutionary coupling, and the maximum size of historical changes taken into account when inferring these couplings. We use the results from our study to derive a number of practical guidelines for applying association rule mining to derive software change impact recommendations.",\n  author =       "Moonen, Leon and {Di Alesio}, Stefano and Binkley, David and Rolfsnes, Thomas",\n  booktitle =    "International Conference on Automated Software Engineering (ASE)",\n  DOI =          "10.1145/2970276.2970327",\n  file =         ":Users/leon/Documents/Mendeley Desktop//Moonen et al. - 2016 - Practical guidelines for change recommendation using association rule mining - International Conference on Automa.pdf:pdf",\n  ISBN =         "9781-4503-3845-5",\n  keywords =     "Evolutionary coupling,association rule mining,change impact analysis,change recommendations,parameter tuning",\n  month =        sep,\n  pages =        "732--743",\n  publisher =    "ACM",\n  title =        "{Practical guidelines for change recommendation using association rule mining}",\n  URL =          "http://dl.acm.org/citation.cfm?doid=2970276.2970327",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n Association rule mining is an unsupervised learning technique that infers relationships among items in a data set. This technique has been successfully used to analyze a system's change history and uncover evolutionary coupling between system artifacts. Evolutionary coupling can, in turn, be used to recommend artifacts that are potentially impacted by a given set of changes to the system. In general, the quality of such recommendations is affected by (1) the values selected for various parameters of the mining algorithm, (2) the characteristics of the change set used to derive the recommendation, and (3) the characteristics of the system's change history for which recommendations are generated. In this paper, we empirically investigate the extent to which these factors affect change impact recommendations. Specifically, we conduct a series of systematic experiments on the change histories of two large industrial systems and eight large open source systems, in which we control the change size for which to derive recommendations, the measure used to assess the strength of the evolutionary coupling, and the maximum size of historical changes taken into account when inferring these couplings. We use the results from our study to derive a number of practical guidelines for applying association rule mining to derive software change impact recommendations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generalizing the Analysis of Evolutionary Coupling for Software Change Impact Analysis.\n \n \n \n \n\n\n \n Rolfsnes, T.; Alesio, S. D.; Behjati, R.; Moonen, L.; and Binkley, D. W.\n\n\n \n\n\n\n In International Conference on Software Analysis, Evolution, and Reengineering (SANER), pages 201–212, March 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"GeneralizingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Rolfsnes:2016:SANER,\n  abstract =     "Software change impact analysis aims to find artifacts potentially affected by a change. Typical approaches apply language-specific static or dynamic dependence analysis, and are thus restricted to homogeneous systems. This restriction is a major drawback given today{\\&}rsquo;s increasingly heterogeneous software. Evolutionary coupling has been proposed as a language-agnostic alternative that mines relations between source-code entities from the system{\\&}rsquo;s change history. Unfortunately, existing evolutionary coupling based techniques fall short. For example, using Singular Value Decomposition (SVD) quickly becomes computationally expensive. An efficient alternative applies targeted association rule mining, but the most widely known approach (ROSE) has restricted applicability: experiments on two large industrial systems, and four large open source systems, show that ROSE can only identify dependencies about 25{\\%} of the time. To overcome this limitation, we introduce TARMAQ, a new algorithm for mining evolutionary coupling. Empirically evaluated on the same six systems, TARMAQ performs consistently better than ROSE and SVD, is applicable 100{\\%} of the time, and runs orders of magnitude faster than SVD. We conclude that the proposed algorithm is a significant step forward towards achieving robust change impact analysis for heterogeneous systems.",\n  author =       "Rolfsnes, Thomas and Alesio, Stefano Di and Behjati, Razieh and Moonen, Leon and Binkley, Dave W.",\n  booktitle =    "International Conference on Software Analysis, Evolution, and Reengineering (SANER)",\n  DOI =          "10.1109/SANER.2016.101",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Rolfsnes et al. - 2016 - Generalizing the Analysis of Evolutionary Coupling for Software Change Impact Analysis - International Conferen.pdf:pdf",\n  ISBN =         "9781-509-0185-5-0",\n  keywords =     "Machine learning,evolutionary coupling,software repository mining,targeted association rule mining",\n  month =        mar,\n  pages =        "201--212",\n  publisher =    "IEEE",\n  title =        "{Generalizing the Analysis of Evolutionary Coupling for Software Change Impact Analysis}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7476643 http://ieeexplore.ieee.org/document/7476643/",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n Software change impact analysis aims to find artifacts potentially affected by a change. Typical approaches apply language-specific static or dynamic dependence analysis, and are thus restricted to homogeneous systems. This restriction is a major drawback given today’s increasingly heterogeneous software. Evolutionary coupling has been proposed as a language-agnostic alternative that mines relations between source-code entities from the system’s change history. Unfortunately, existing evolutionary coupling based techniques fall short. For example, using Singular Value Decomposition (SVD) quickly becomes computationally expensive. An efficient alternative applies targeted association rule mining, but the most widely known approach (ROSE) has restricted applicability: experiments on two large industrial systems, and four large open source systems, show that ROSE can only identify dependencies about 25% of the time. To overcome this limitation, we introduce TARMAQ, a new algorithm for mining evolutionary coupling. Empirically evaluated on the same six systems, TARMAQ performs consistently better than ROSE and SVD, is applicable 100% of the time, and runs orders of magnitude faster than SVD. We conclude that the proposed algorithm is a significant step forward towards achieving robust change impact analysis for heterogeneous systems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Introduction to the Special Issue on Program Comprehension.\n \n \n \n \n\n\n \n Roy, C. K.; Begel, A.; and Moonen, L.\n\n\n \n\n\n\n Journal of Software: Evolution and Process, 28(10): 838–839. October 2016.\n \n\n\n\n
\n\n\n\n \n \n \"IntroductionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{roy:2016:icpc:si:intro,\n  author =       "Roy, Chanchal K. and Begel, Andrew and Moonen, Leon",\n  DOI =          "10.1002/smr.1836",\n  ISSN =         "20477473",\n  journal =      "Journal of Software: Evolution and Process",\n  month =        oct,\n  number =       "10",\n  pages =        "838--839",\n  publisher =    "Wiley",\n  title =        "{Introduction to the Special Issue on Program Comprehension}",\n  URL =          "http://doi.wiley.com/10.1002/smr.1836",\n  volume =       "28",\n  year =         "2016",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exploring the Effects of History Length and Age on Mining Software Change Impact (report).\n \n \n \n\n\n \n Moonen, L.; Di Alesio, S.; Rolfsnes, T.; and Binkley, D. W.\n\n\n \n\n\n\n Technical Report 2016-10, 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{moonen:2016:exploring:report,\n  abstract =     "The goal of Software Change Impact Analysis is to identify artifacts (typically source-code files) potentially affected by a change. Recently, there is an increased interest in mining software change impact based on evolutionary coupling. A particularly promising approach uses association rule mining to uncover potentially affected artifacts from patterns in the system's change history. Two main considerations when using this approach are the history length, the number of transactions from the change history used to identify the impact of a change, and history age, the number of transactions that have occurred since patterns were last mined from the history. Although history length and age can significantly affect the quality of mining results, few guidelines exist on how to best select appropriate values for these two parameters. In this paper, we empirically investigate the effects of history length and age on the quality of change impact analysis using mined evolutionary couplings. Specifically, we report on a series of systematic experiments involving the change histories of two large industrial systems and 17 large open source systems. In these experiments, we vary the length and age of the history used to mine software change impact, and assess how this affects precision and applicability. Results from the study are used to derive practical guidelines for choosing history length and age when applying association rule mining to conduct software change impact analysis.",\n  author =       "Moonen, Leon and {Di Alesio}, Stefano and Rolfsnes, Thomas and Binkley, Dave W.",\n  booktitle =    "Technical Report",\n  keywords =     "association rule mining,change impact analysis,evolutionary coupling,parameter tuning",\n  number =       "2016-10",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Exploring the Effects of History Length and Age on Mining Software Change Impact (report)}",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n The goal of Software Change Impact Analysis is to identify artifacts (typically source-code files) potentially affected by a change. Recently, there is an increased interest in mining software change impact based on evolutionary coupling. A particularly promising approach uses association rule mining to uncover potentially affected artifacts from patterns in the system's change history. Two main considerations when using this approach are the history length, the number of transactions from the change history used to identify the impact of a change, and history age, the number of transactions that have occurred since patterns were last mined from the history. Although history length and age can significantly affect the quality of mining results, few guidelines exist on how to best select appropriate values for these two parameters. In this paper, we empirically investigate the effects of history length and age on the quality of change impact analysis using mined evolutionary couplings. Specifically, we report on a series of systematic experiments involving the change histories of two large industrial systems and 17 large open source systems. In these experiments, we vary the length and age of the history used to mine software change impact, and assess how this affects precision and applicability. Results from the study are used to derive practical guidelines for choosing history length and age when applying association rule mining to conduct software change impact analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Practical Guidelines for Change Recommendation using Association Rule Mining (report).\n \n \n \n\n\n \n Moonen, L.; Di Alesio, S.; Binkley, D. W.; and Rolfsnes, T.\n\n\n \n\n\n\n Technical Report 2016-09, 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{moonen:2016:guidelines:report,\n  abstract =     "Association rule mining is an unsupervised learning technique that infers relationships among items in a data set. This technique has been successfully used to analyze a system's change history and uncover evolutionary coupling between system artifacts. Evolutionary coupling can, in turn, be used to recommend artifacts that are potentially impacted by a given set of changes to the system. In general, the quality of such recommendations is affected by (1) the values selected for various parameters of the mining algorithm, (2) the characteristics of the change set used to derive the recommendation, and (3) the characteristics of the system's change history for which recommendations are generated. In this paper, we empirically investigate the extent to which these factors affect change impact recommendations. Specifically, we conduct a series of systematic experiments on the change histories of two large industrial systems and eight large open source systems, in which we control the change size for which to derive recommendations, the measure used to assess the strength of the evolutionary coupling, and the maximum size of historical changes taken into account when inferring these couplings. We use the results from our study to derive a number of practical guidelines for applying association rule mining to derive software change impact recommendations.",\n  author =       "Moonen, Leon and {Di Alesio}, Stefano and Binkley, Dave W. and Rolfsnes, Thomas",\n  booktitle =    "Technical Report",\n  keywords =     "Evolutionary coupling,association rule mining,change impact analysis,change recommendations,parameter tuning",\n  number =       "2016-09",\n  pages =        "11",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Practical Guidelines for Change Recommendation using Association Rule Mining (report)}",\n  type =         "Technical Report",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n Association rule mining is an unsupervised learning technique that infers relationships among items in a data set. This technique has been successfully used to analyze a system's change history and uncover evolutionary coupling between system artifacts. Evolutionary coupling can, in turn, be used to recommend artifacts that are potentially impacted by a given set of changes to the system. In general, the quality of such recommendations is affected by (1) the values selected for various parameters of the mining algorithm, (2) the characteristics of the change set used to derive the recommendation, and (3) the characteristics of the system's change history for which recommendations are generated. In this paper, we empirically investigate the extent to which these factors affect change impact recommendations. Specifically, we conduct a series of systematic experiments on the change histories of two large industrial systems and eight large open source systems, in which we control the change size for which to derive recommendations, the measure used to assess the strength of the evolutionary coupling, and the maximum size of historical changes taken into account when inferring these couplings. We use the results from our study to derive a number of practical guidelines for applying association rule mining to derive software change impact recommendations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exploring the Design Space of Association Rule Mining Algorithms for Change Recommendation (report).\n \n \n \n\n\n \n Rolfsnes, T.; Moonen, L.; Di Alesio, S.; and Binkley, D. W.\n\n\n \n\n\n\n Technical Report 2016-14, 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{rolfsnes:2015:design_space:report,\n  author =       "Rolfsnes, Thomas and Moonen, Leon and {Di Alesio}, Stefano and Binkley, Dave W.",\n  booktitle =    "Technical Report",\n  number =       "2016-14",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Exploring the Design Space of Association Rule Mining Algorithms for Change Recommendation (report)}",\n  year =         "2016",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the First International Workshop on Technical Debt Analytics (TDA 2016).\n \n \n \n \n\n\n \n Yamashita, A.; Moonen, L.; Mens, T.; and Tahir, A.\n\n\n \n\n\n\n Volume 1771 CEUR Workshop Proceedings, 2016.\n \n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Book{yamashita:2016:tda:proceedings,\n  abstract =     "An open challenge in Technical Debt (TD) research is to translate TD threats into economic opportunities, so development teams can make a strong case to the business side to invest on paying off TD by increasing Technical Wealth (TW). For this, a comprehensive TD/TW theory (e.g., standardizing ambiguous terms into a consistent vocabulary) is needed to formalize and consolidate relationships between the costs of technical debt and benefits of technical wealth. We aim to gather practitioners and researchers working in this area, to share experiences, concur on terminologies and evaluation guidelines. The TDA 2016 workshop offers a specialised arena in TD to address the following goals: (1) Calibrating technical debt and technical wealth related terminologies and concepts that are used indistinctly and interchangeably in software engineering literature, and (2) Comparing, integrating, compiling and even reconciling empirical work on the effects of technical debt/technical wealth from economic and organisational perspectives.",\n  author =       "Yamashita, Aiko and Moonen, Leon and Mens, Tom and Tahir, Amjed",\n  booktitle =    "CEUR Workshop Proceedings",\n  ISSN =         "1613-0073",\n  keywords =     "technical debt,technical wealth",\n  pages =        "30",\n  publisher =    "CEUR Workshop Proceedings",\n  title =        "{Proceedings of the First International Workshop on Technical Debt Analytics (TDA 2016)}",\n  URL =          "http://ceur-ws.org/Vol-1771/",\n  volume =       "1771",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n An open challenge in Technical Debt (TD) research is to translate TD threats into economic opportunities, so development teams can make a strong case to the business side to invest on paying off TD by increasing Technical Wealth (TW). For this, a comprehensive TD/TW theory (e.g., standardizing ambiguous terms into a consistent vocabulary) is needed to formalize and consolidate relationships between the costs of technical debt and benefits of technical wealth. We aim to gather practitioners and researchers working in this area, to share experiences, concur on terminologies and evaluation guidelines. The TDA 2016 workshop offers a specialised arena in TD to address the following goals: (1) Calibrating technical debt and technical wealth related terminologies and concepts that are used indistinctly and interchangeably in software engineering literature, and (2) Comparing, integrating, compiling and even reconciling empirical work on the effects of technical debt/technical wealth from economic and organisational perspectives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Special Issue on Software Maintenance and Evolution.\n \n \n \n \n\n\n \n Moonen, L.; and Pollock, L.\n\n\n \n\n\n\n Journal of Software: Evolution and Process, 28(7): 507–509. July 2016.\n \n\n\n\n
\n\n\n\n \n \n \"SpecialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{moonen:2016:icsme:si,\n  author =       "Moonen, Leon and Pollock, Lori",\n  DOI =          "10.1002/smr.1745",\n  ISSN =         "20477473",\n  journal =      "Journal of Software: Evolution and Process",\n  month =        jul,\n  number =       "7",\n  pages =        "507--509",\n  publisher =    "Wiley",\n  title =        "{Special Issue on Software Maintenance and Evolution}",\n  URL =          "http://doi.wiley.com/10.1002/smr.1745",\n  volume =       "28",\n  year =         "2016",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Special Issue on Program Comprehension.\n \n \n \n \n\n\n \n Roy, C. K.; Begel, A.; and Moonen, L.\n\n\n \n\n\n\n Journal of Software: Evolution and Process, 28(10): 835–837. October 2016.\n \n\n\n\n
\n\n\n\n \n \n \"SpecialPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{roy:2016:icpc:si,\n  author =       "Roy, Chanchal K. and Begel, Andrew and Moonen, Leon",\n  DOI =          "10.1002/smr.1748",\n  ISSN =         "20477473",\n  journal =      "Journal of Software: Evolution and Process",\n  month =        oct,\n  number =       "10",\n  pages =        "835--837",\n  publisher =    "Wiley",\n  title =        "{Special Issue on Program Comprehension}",\n  URL =          "http://doi.wiley.com/10.1002/smr.1748",\n  volume =       "28",\n  year =         "2016",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the Third International Workshop on Patterns Promotion and Anti-patterns Prevention.\n \n \n \n \n\n\n \n Moonen, L.; Khomh, F.; Washizaki, H.; Guéhéneuc, Y.; and Antoniol, G.\n\n\n \n\n\n\n In International Conference on Software Analysis, Evolution, and Reengineering (SANER), volume 4, pages i-14. IEEE, March 2016.\n \n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{moonen:2016:ppap:proceedings,\n  abstract =     "Patterns are proven solutions to problems that reoccur in certain contexts whereas anti-patterns are their opposite, i.e., known poor practices. It has been over two decades since design patterns and anti-patterns were introduced in software engineering and a well-established body of work has been created on the definition, detection, application, and impact of design patterns and anti-patterns. In the SANER community, patterns and anti-patterns are widely studied in relation to program comprehension, software maintenance, and more generally software quality. Yet, limited feedback exists on the extent to which practitioners benefit from this body of work. The third edition of the PPAP workshop aims to promote patterns and prevent anti-patterns in practice. Concretely, it aims to bring together practitioners, researchers, and students to discuss challenges and opportunities surrounding (anti-)patterns in software evolution and to develop a road-map to further the promotion of patterns and the prevention of anti-patterns.",\n  author =       "Moonen, Leon and Khomh, Foutse and Washizaki, Hironori and Gu{\\'{e}}h{\\'{e}}neuc, Yann-Ga{\\"{e}}l and Antoniol, Giuliano",\n  booktitle =    "International Conference on Software Analysis, Evolution, and Reengineering (SANER)",\n  DOI =          "10.1109/SANER.2016.126",\n  month =        mar,\n  pages =        "i-14",\n  publisher =    "IEEE",\n  title =        "{Proceedings of the Third International Workshop on Patterns Promotion and Anti-patterns Prevention}",\n  URL =          "http://ieeexplore.ieee.org/xpl/tocresult.jsp?isnumber=7476733",\n  volume =       "4",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n Patterns are proven solutions to problems that reoccur in certain contexts whereas anti-patterns are their opposite, i.e., known poor practices. It has been over two decades since design patterns and anti-patterns were introduced in software engineering and a well-established body of work has been created on the definition, detection, application, and impact of design patterns and anti-patterns. In the SANER community, patterns and anti-patterns are widely studied in relation to program comprehension, software maintenance, and more generally software quality. Yet, limited feedback exists on the extent to which practitioners benefit from this body of work. The third edition of the PPAP workshop aims to promote patterns and prevent anti-patterns in practice. Concretely, it aims to bring together practitioners, researchers, and students to discuss challenges and opportunities surrounding (anti-)patterns in software evolution and to develop a road-map to further the promotion of patterns and the prevention of anti-patterns.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Industrial Survey of Safety Evidence Change Impact Analysis Practice.\n \n \n \n \n\n\n \n de la Vara, J. L.; Borg, M.; Wnuk, K.; and Moonen, L.\n\n\n \n\n\n\n IEEE Transactions on Software Engineering, 42(12): 1095–1117. December 2016.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{dlvara:2016:secia,\n  abstract =     "In many application domains, critical systems must comply with safety standards. This involves gathering safety evidence in the form of artefacts such as safety analyses, system specifications, and testing results. These artefacts can evolve during a system's lifecycle, creating a need for impact analysis to guarantee that system safety and compliance are not jeopardised. Although extensive research has been conducted on change impact analysis and on safety evidence management, the knowledge about how safety evidence change impact analysis is addressed in practice is limited. This paper reports on a survey targeted at filling this gap by analysing the circumstances under which safety evidence change impact analysis is addressed, the tool support used, and the challenges faced. We obtained 97 valid responses representing 16 application domains, 28 countries, and 47 safety standards. The results suggest that most practitioners deal with safety evidence change impact analysis during system development and mainly from system specifications. Furthermore, the level of automation in the process is low and insufficient tool support is the most frequent challenge. Other notable findings include that the different artefact types used as safety evidence seem to co-evolve, the evolution of safety case should probably be better managed, and no commercial impact analysis tool has been reported as used for all artefact types. Finally, we identified over 20 areas where the state of the practice in safety evidence change impact analysis can be improved.",\n  author =       "de la Vara, Jose Luis and Borg, Markus and Wnuk, Krzysztof and Moonen, Leon",\n  DOI =          "10.1109/TSE.2016.2553032",\n  ISSN =         "0098-5589",\n  journal =      "IEEE Transactions on Software Engineering",\n  keywords =     "Maintenance process,Methods for SQA and V{\\&}V,Software and System Safety,Standards",\n  month =        dec,\n  number =       "12",\n  pages =        "1095--1117",\n  publisher =    "IEEE",\n  title =        "{An Industrial Survey of Safety Evidence Change Impact Analysis Practice}",\n  URL =          "http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=2553032 http://ieeexplore.ieee.org/document/7450627/",\n  volume =       "42",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n In many application domains, critical systems must comply with safety standards. This involves gathering safety evidence in the form of artefacts such as safety analyses, system specifications, and testing results. These artefacts can evolve during a system's lifecycle, creating a need for impact analysis to guarantee that system safety and compliance are not jeopardised. Although extensive research has been conducted on change impact analysis and on safety evidence management, the knowledge about how safety evidence change impact analysis is addressed in practice is limited. This paper reports on a survey targeted at filling this gap by analysing the circumstances under which safety evidence change impact analysis is addressed, the tool support used, and the challenges faced. We obtained 97 valid responses representing 16 application domains, 28 countries, and 47 safety standards. The results suggest that most practitioners deal with safety evidence change impact analysis during system development and mainly from system specifications. Furthermore, the level of automation in the process is low and insufficient tool support is the most frequent challenge. Other notable findings include that the different artefact types used as safety evidence seem to co-evolve, the evolution of safety case should probably be better managed, and no commercial impact analysis tool has been reported as used for all artefact types. Finally, we identified over 20 areas where the state of the practice in safety evidence change impact analysis can be improved.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Report on the First International Workshop on Technical Debt Analytics (TDA 2016).\n \n \n \n \n\n\n \n Yamashita, A.; Moonen, L.; Mens, T.; and Tahir, A.\n\n\n \n\n\n\n In Sureka, A., editor(s), Joint Proceedings of the 4th International Workshop on Quantitative Approaches to Software Quality (QuASoQ 2016) and 1st International Workshop on Technical Debt Analytics (TDA 2016), pages 58–63, 2016. CEUR Workshop Proceedings\n \n\n\n\n
\n\n\n\n \n \n \"ReportPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yamashita:2016:tda-report,\n  abstract =     "This report outlines the motivation and goals of the First International Workshop on Technical Debt Analytics (TDA 2016), presents the workshop programme, introduces the work accepted for presentation, and summarizes the major results and themes that emerged from the discussion and activities undertaken during the workshop.",\n  author =       "Yamashita, Aiko and Moonen, Leon and Mens, Tom and Tahir, Amjed",\n  booktitle =    "Joint Proceedings of the 4th International Workshop on Quantitative Approaches to Software Quality (QuASoQ 2016) and 1st International Workshop on Technical Debt Analytics (TDA 2016)",\n  editor =       "Sureka, Ashish",\n  ISSN =         "1613-0073",\n  keywords =     "technical debt,technical wealth",\n  pages =        "58--63",\n  publisher =    "CEUR Workshop Proceedings",\n  title =        "{Report on the First International Workshop on Technical Debt Analytics (TDA 2016)}",\n  URL =          "http://ceur-ws.org/Vol-1771/paper9.pdf",\n  year =         "2016",\n}\n\n
\n
\n\n\n
\n This report outlines the motivation and goals of the First International Workshop on Technical Debt Analytics (TDA 2016), presents the workshop programme, introduces the work accepted for presentation, and summarizes the major results and themes that emerged from the discussion and activities undertaken during the workshop.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Introduction to the special issue on software maintenance and evolution.\n \n \n \n \n\n\n \n Moonen, L.; and Pollock, L.\n\n\n \n\n\n\n Journal of Software: Evolution and Process, 28(7): 510–511. July 2016.\n \n\n\n\n
\n\n\n\n \n \n \"IntroductionPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{moonen:2016:icsme:si:intro,\n  author =       "Moonen, Leon and Pollock, Lori",\n  DOI =          "10.1002/smr.1798",\n  ISSN =         "20477473",\n  journal =      "Journal of Software: Evolution and Process",\n  month =        jul,\n  number =       "7",\n  pages =        "510--511",\n  publisher =    "Wiley",\n  title =        "{Introduction to the special issue on software maintenance and evolution}",\n  URL =          "http://doi.wiley.com/10.1002/smr.1798",\n  volume =       "28",\n  year =         "2016",\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Towards evidence-based recommendations to guide the evolution of component-based product families.\n \n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n Science of Computer Programming, 97: 105–112. January 2015.\n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{moonen:2015:evolveit,\n  abstract =     "Many large-scale software-intensive systems are produced as instances of component-based product families, a well-known tactic to develop a portfolio of software products based on a collection of shared assets. However, sharing components between software products introduces dependencies that complicate maintenance and evolution: changes made in a component to address an issue in one product may have undesirable effects on other products in which the same component is used. Therefore, developers not only need to understand how a proposed change will impact the component and product at hand; they also need to understand how it affects the whole product family, including systems that are already deployed. Given that these systems contain thousands of components, it is no surprise that it is hard to reason about the impact of a change on a single product, let alone assess the effects of more complex evolution scenarios on a complete product family. Conventional impact analysis techniques do not suffice for large-scale software-intensive systems and highly populated product families, and software engineers need better support to conduct these tasks. Finally, for an accountable comparison of alternative evolution scenarios, a measure is needed to quantify the scale of impact for each strategy. This is especially important in our context of safety-critical systems since these need to undergo (costly) re-certification after a change. Cost-effective recommendations should prioritize evolution scenarios that minimize impact scale, and thereby minimize re-certification efforts. This paper explores how reverse engineering and program comprehension techniques can be used to develop novel recommendation technology that uses concrete evidence gathered from software artifacts to support engineers with the evolution of families of complex, safety-critical, software-intensive systems. We give an overview of the state of the art in this area, discuss some of the research directions that have been considered up to now and, identify challenges, and pose a number of research questions to advance the state of the art. ?? 2013 Elsevier B.V. All rights reserved.",\n  author =       "Moonen, Leon",\n  DOI =          "10.1016/j.scico.2013.11.009",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 2013 - Towards evidence-based recommendations to guide the evolution of component-based product families - Science of Computer.pdf:pdf",\n  ISBN =         "0167-6423",\n  ISSN =         "01676423",\n  journal =      "Science of Computer Programming",\n  keywords =     "change impact analysis,program comprehension,reverse engineering",\n  month =        jan,\n  pages =        "105--112",\n  title =        "{Towards evidence-based recommendations to guide the evolution of component-based product families}",\n  URL =          "http://www.sciencedirect.com/science/article/pii/S0167642313002931 http://linkinghub.elsevier.com/retrieve/pii/S0167642313002931",\n  volume =       "97",\n  year =         "2015",\n}\n\n
\n
\n\n\n
\n Many large-scale software-intensive systems are produced as instances of component-based product families, a well-known tactic to develop a portfolio of software products based on a collection of shared assets. However, sharing components between software products introduces dependencies that complicate maintenance and evolution: changes made in a component to address an issue in one product may have undesirable effects on other products in which the same component is used. Therefore, developers not only need to understand how a proposed change will impact the component and product at hand; they also need to understand how it affects the whole product family, including systems that are already deployed. Given that these systems contain thousands of components, it is no surprise that it is hard to reason about the impact of a change on a single product, let alone assess the effects of more complex evolution scenarios on a complete product family. Conventional impact analysis techniques do not suffice for large-scale software-intensive systems and highly populated product families, and software engineers need better support to conduct these tasks. Finally, for an accountable comparison of alternative evolution scenarios, a measure is needed to quantify the scale of impact for each strategy. This is especially important in our context of safety-critical systems since these need to undergo (costly) re-certification after a change. Cost-effective recommendations should prioritize evolution scenarios that minimize impact scale, and thereby minimize re-certification efforts. This paper explores how reverse engineering and program comprehension techniques can be used to develop novel recommendation technology that uses concrete evidence gathered from software artifacts to support engineers with the evolution of families of complex, safety-critical, software-intensive systems. We give an overview of the state of the art in this area, discuss some of the research directions that have been considered up to now and, identify challenges, and pose a number of research questions to advance the state of the art. ?? 2013 Elsevier B.V. All rights reserved.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Generating Test-plans by Mining Version Histories (report).\n \n \n \n\n\n \n Rolfsnes, T.; Behjati, R.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2015-01, 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{rolfsnes:2015:test-plans:report,\n  abstract =     "Regression testing is an essential step in safeguarding the evolution of a system, yet there is often not enough time to exercise all available tests. Identifying the subset of tests that can reveal potential issues introduced by a change is a challenge. It requires identifying the parts of the system that are dependent on that change, a task typically done by means of static program analysis. In this paper, we investigate an alternative approach, using software repository mining. We propose a method that mines the revision-history of a system to uncover dependencies, and uses these for test-selection and test-prioritization. We have implemented the approach in a prototype tool that recommends a test plan, given a set of changes. We have applied our tool on 10 years of revision-history from one of the central systems of our industrial partner, Kongsberg Maritime. Our evaluation shows that our approach accurately identifies dependencies among files, and comparing our recommendations with existing test plans shows that relevant tests are given high priority in our recommendation. By reducing the amount of test to exercise, and limiting time spend on test-plan creation, our approach helps to increase cost-effectiveness of regression testing in the company.",\n  author =       "Rolfsnes, Thomas and Behjati, Razieh and Moonen, Leon",\n  booktitle =    "Technical Report",\n  number =       "2015-01",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Generating Test-plans by Mining Version Histories (report)}",\n  year =         "2015",\n}\n\n
\n
\n\n\n
\n Regression testing is an essential step in safeguarding the evolution of a system, yet there is often not enough time to exercise all available tests. Identifying the subset of tests that can reveal potential issues introduced by a change is a challenge. It requires identifying the parts of the system that are dependent on that change, a task typically done by means of static program analysis. In this paper, we investigate an alternative approach, using software repository mining. We propose a method that mines the revision-history of a system to uncover dependencies, and uses these for test-selection and test-prioritization. We have implemented the approach in a prototype tool that recommends a test plan, given a set of changes. We have applied our tool on 10 years of revision-history from one of the central systems of our industrial partner, Kongsberg Maritime. Our evaluation shows that our approach accurately identifies dependencies among files, and comparing our recommendations with existing test plans shows that relevant tests are given high priority in our recommendation. By reducing the amount of test to exercise, and limiting time spend on test-plan creation, our approach helps to increase cost-effectiveness of regression testing in the company.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Cross-language program analysis for the evolution of multi-language software systems: a systematic literature review.\n \n \n \n\n\n \n Yazdanshenas, A. R.; and Moonen, L.\n\n\n \n\n\n\n submitted to Journal of Software: Evolution and Process. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{yazdanshenas:2015:cross-language,\n  abstract =     "Contemporary software systems are rarely implemented uniformly in one programming language, and delivered in one type of development artifact. Instead, it is common practice to use the programming language that fits best to develop each part of the system. Moreover, in large-scale legacy software systems, the maintainance process often has to deal with several programming languages and artifacts simultaneously. This heterogeneity complicates most system-wide tasks in the evolution of such multi-language systems, as cross-language dependencies and interactions are substantially more difficult to identify and manage. This paper seeks to provide a basis for the improvement of software evolution of multi-language systems, by assessing the state of the art in cross-language program analysis, and discussing the implications for research and practice. We conduct a systematic review over the available literature in seven digital libraries, to find the relevant primary studies on cross-language program analysis, and identify additional studies with manual snowballing. We classify the studies based on several criteria, including their purpose (why), the adopted or suggested approach (how), the information leveraged in each programming language or artifact (what), and the conducted evaluation (quality). Our investigation identified 75 relevant papers, which were analysed in depth to answer eight research questions. The results include objective findings on the diversity of the applied techniques, application domains, programming languages, and reliability of the approaches. Building on these findings, several implications for research and practice are discussed, including potential breakthroughs based on use of historic data, and possible negative effects of having a shortage of community-driven research.",\n  annote =       "under revision",\n  author =       "Yazdanshenas, Amir Reza and Moonen, Leon",\n  journal =      "submitted to Journal of Software: Evolution and Process",\n  publisher =    "Wiley",\n  title =        "{Cross-language program analysis for the evolution of multi-language software systems: a systematic literature review}",\n  year =         "2015",\n}\n\n
\n
\n\n\n
\n Contemporary software systems are rarely implemented uniformly in one programming language, and delivered in one type of development artifact. Instead, it is common practice to use the programming language that fits best to develop each part of the system. Moreover, in large-scale legacy software systems, the maintainance process often has to deal with several programming languages and artifacts simultaneously. This heterogeneity complicates most system-wide tasks in the evolution of such multi-language systems, as cross-language dependencies and interactions are substantially more difficult to identify and manage. This paper seeks to provide a basis for the improvement of software evolution of multi-language systems, by assessing the state of the art in cross-language program analysis, and discussing the implications for research and practice. We conduct a systematic review over the available literature in seven digital libraries, to find the relevant primary studies on cross-language program analysis, and identify additional studies with manual snowballing. We classify the studies based on several criteria, including their purpose (why), the adopted or suggested approach (how), the information leveraged in each programming language or artifact (what), and the conducted evaluation (quality). Our investigation identified 75 relevant papers, which were analysed in depth to answer eight research questions. The results include objective findings on the diversity of the applied techniques, application domains, programming languages, and reliability of the approaches. Building on these findings, several implications for research and practice are discussed, including potential breakthroughs based on use of historic data, and possible negative effects of having a shortage of community-driven research.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Assembling Multiple-Case Studies: Potential, Principles and Practical Considerations.\n \n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Evaluation and Assessment in Software Engineering (EASE), pages 1–10, May 2014. ACM\n \n\n\n\n
\n\n\n\n \n \n \"AssemblingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yamashita:2014:assembling,\n  abstract =     "Case studies are a research method aimed at holistically analyzing a phenomenon in its context. Despite the fact that they cannot be used to answer the same precise research questions as, e.g., can be addressed by controlled experiments, case studies can cope much better with situations having several variables of interest, multiple sources of evidence, or rich contexts that cannot be controlled or isolated. As such, case studies are a promising instrument to study the complex phenomena at play in Software Engineering. However, the use of case studies as research methodology entails certain challenges. We argue that one of the biggest challenges is the case selection bias when conducting multiple-case studies. In practice, cases are frequently selected based on their availability, without appropriate control over moderator factors. This hinders the level of comparability across cases, leading to internal validity issues. In this paper, we discuss the notion of assembling cases as a plausible alternative to selecting cases to overcome the selection bias problem when conducting multiple-case studies. In addition, we present and discuss our experiences from applying this approach in a study designed to investigate the impact of software design on maintainability.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "International Conference on Evaluation and Assessment in Software Engineering (EASE)",\n  DOI =          "10.1145/2601248.2601286",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2014 - Assembling Multiple-Case Studies Potential, Principles and Practical Considerations - International Conferenc.pdf:pdf",\n  ISBN =         "9781-450-3247-6-2",\n  keywords =     "case study,empirical studies,internal validity,methodology",\n  month =        may,\n  pages =        "1--10",\n  publisher =    "ACM",\n  title =        "{Assembling Multiple-Case Studies: Potential, Principles and Practical Considerations}",\n  URL =          "http://dl.acm.org/citation.cfm?doid=2601248.2601286",\n  year =         "2014",\n}\n\n
\n
\n\n\n
\n Case studies are a research method aimed at holistically analyzing a phenomenon in its context. Despite the fact that they cannot be used to answer the same precise research questions as, e.g., can be addressed by controlled experiments, case studies can cope much better with situations having several variables of interest, multiple sources of evidence, or rich contexts that cannot be controlled or isolated. As such, case studies are a promising instrument to study the complex phenomena at play in Software Engineering. However, the use of case studies as research methodology entails certain challenges. We argue that one of the biggest challenges is the case selection bias when conducting multiple-case studies. In practice, cases are frequently selected based on their availability, without appropriate control over moderator factors. This hinders the level of comparability across cases, leading to internal validity issues. In this paper, we discuss the notion of assembling cases as a plausible alternative to selecting cases to overcome the selection bias problem when conducting multiple-case studies. In addition, we present and discuss our experiences from applying this approach in a study designed to investigate the impact of software design on maintainability.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Survey on Safety Evidence Change Impact Analysis in Practice : Detailed Description and Analysis (report).\n \n \n \n\n\n \n de la Vara, J. L.; Borg, M.; Wnuk, K.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2014-18, 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{dlvara:2014:secia_detail:report,\n  abstract =     "In many application domains, critical systems must comply with safety standards. This involves gathering safety evidence in the form of artefacts such as safety analyses, system specifications, and testing results. These artefacts can evolve during a system's lifecycle, creating a need for impact analysis to guarantee that system safety and compliance are not jeopardised. Although extensive research has been conducted on change impact analysis and on safety evidence management, the knowledge about how safety evidence change impact analysis is addressed in practice is limited. This paper reports on a survey targeted at filling this gap by analysing the circumstances under which safety evidence change impact analysis is addressed, the tool support used, and the challenges faced. We obtained 97 valid responses representing 16 application domains, 28 countries, and 47 safety standards. The results suggest that most practitioners deal with safety evidence change impact analysis during system development and mainly from system specifications. Furthermore, the level of automation in the process is low and insufficient tool support is the most frequent challenge. Other notable findings include that the different artefact types used as safety evidence seem to co-evolve, the evolution of safety case should probably be better managed, and no commercial impact analysis tool has been reported as used for all artefact types. Finally, we identified over 20 areas where the state of the practice in safety evidence change impact analysis can be improved. Keywords.",\n  author =       "de la Vara, Jose Luis and Borg, Markus and Wnuk, Krzysztof and Moonen, Leon",\n  booktitle =    "Technical Report",\n  file =         ":Users/leon/Documents/Mendeley Desktop/de la Vara et al. - 2014 - Survey on Safety Evidence Change Impact Analysis in Practice Detailed Description and Analysis (report) - Te.pdf:pdf",\n  keywords =     "change management,impact analysis,safety assurance,safety certification,safety evidence,safety-critical system",\n  number =       "2014-18",\n  pages =        "44",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Survey on Safety Evidence Change Impact Analysis in Practice : Detailed Description and Analysis (report)}",\n  type =         "techreport",\n  year =         "2014",\n}\n\n
\n
\n\n\n
\n In many application domains, critical systems must comply with safety standards. This involves gathering safety evidence in the form of artefacts such as safety analyses, system specifications, and testing results. These artefacts can evolve during a system's lifecycle, creating a need for impact analysis to guarantee that system safety and compliance are not jeopardised. Although extensive research has been conducted on change impact analysis and on safety evidence management, the knowledge about how safety evidence change impact analysis is addressed in practice is limited. This paper reports on a survey targeted at filling this gap by analysing the circumstances under which safety evidence change impact analysis is addressed, the tool support used, and the challenges faced. We obtained 97 valid responses representing 16 application domains, 28 countries, and 47 safety standards. The results suggest that most practitioners deal with safety evidence change impact analysis during system development and mainly from system specifications. Furthermore, the level of automation in the process is low and insufficient tool support is the most frequent challenge. Other notable findings include that the different artefact types used as safety evidence seem to co-evolve, the evolution of safety case should probably be better managed, and no commercial impact analysis tool has been reported as used for all artefact types. Finally, we identified over 20 areas where the state of the practice in safety evidence change impact analysis can be improved. Keywords.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Survey on Safety Evidence Change Impact Analysis for Critical Systems: Summary of Results (report).\n \n \n \n\n\n \n de la Vara, J. L.; Borg, M.; Wnuk, K.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2014-03, 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{dlvara:2014:secia_results:report,\n  author =       "de la Vara, Jose Luis and Borg, Markus and Wnuk, Krzysztof and Moonen, Leon",\n  booktitle =    "Technical Report",\n  number =       "2014-03",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Survey on Safety Evidence Change Impact Analysis for Critical Systems: Summary of Results (report)}",\n  year =         "2014",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Supporting Certification and Evolution of Cyber-Physical Product Families (keynote).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Software Evolution in Belgium, the Netherlands and neighbouring countries (BENEVOL), November 2014. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{moonen:2014:benevol,\n  abstract =     "The research presented in this talk is part of an ongoing industrial collaboration with Kongsberg Maritime (KM), one of the largest suppliers of maritime systems worldwide. The division that we work with specialises in computerised systems for safety monitoring and automatic corrective actions on unacceptable hazardous situations. In particular, we study a family of complex safety-critical embedded software systems that connect software control components to physical sensors and mechanical actuators. The overall goal of the collaboration is to provide our partner with software analysis tooling that provides source based evidence to support cost-effective certification and evolution of their systems. A frequently advocated approach to manage the development of complex software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialisation and interconnections. Although analysis of programs within closed code boundaries has been studied for some decades and is well-established, there is surprisingly little support for incorporating information regarding the composition and configuration of components in the static verification and validation of these systems. Moreover, sharing components between software products introduces dependencies that complicate maintenance and evolution: changes made in a component to address an issue in one product may have undesirable effects on other products in which the same component is used. Therefore, developers not only need to understand how a proposed change will impact the component and product at hand; they also need to understand how it affects the whole product family, including systems that are already deployed. Given that these systems contain thousands of components, it is no surprise that it is hard to reason about the impact on a single product, let alone on a complete product family. Conventional impact analysis techniques do not suffice for large-scale software-intensive systems and highly populated product families, and engineers need better support to conduct these tasks. In the talk, we will discuss the techniques we developed to support analysis *across* the components of a heterogeneous component-based system. First, we reverse engineer fine-grained homogeneous models for systems composed of heterogeneous artifacts. Next, we track the information flow in these models using slicing, and apply several transformations that enable us to visualise the information flow at various levels of abstraction, trading off between scope and detail and aimed to serve both safety domain experts as well as developers. These techniques are implemented in a prototype tool-set that has been successfully used to answer software certification questions of our industrial partner. In addition, we discuss our ongoing research to build recommendation technology that supports engineers with the evolution of families of safety-critical, software-intensive systems. This technology builds on extensions of the previously discussed techniques to systematically reverse engineer abstract representations of software products to complete software product families, new algorithms to conduct scalable and precise change impact analysis (CIA) on such representations, and recommendation technology that uses the CIA results and constraint programming to find an evolution strategy that minimises re-certification efforts. Finally, if time permits, we will discuss some challenges, research opportunities and initial results around testing of such systems in an industrial setting.",\n  author =       "Moonen, Leon",\n  booktitle =    "Software Evolution in Belgium, the Netherlands and neighbouring countries (BENEVOL)",\n  month =        nov,\n  title =        "{Supporting Certification and Evolution of Cyber-Physical Product Families (keynote)}",\n  year =         "2014",\n}\n\n
\n
\n\n\n
\n The research presented in this talk is part of an ongoing industrial collaboration with Kongsberg Maritime (KM), one of the largest suppliers of maritime systems worldwide. The division that we work with specialises in computerised systems for safety monitoring and automatic corrective actions on unacceptable hazardous situations. In particular, we study a family of complex safety-critical embedded software systems that connect software control components to physical sensors and mechanical actuators. The overall goal of the collaboration is to provide our partner with software analysis tooling that provides source based evidence to support cost-effective certification and evolution of their systems. A frequently advocated approach to manage the development of complex software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialisation and interconnections. Although analysis of programs within closed code boundaries has been studied for some decades and is well-established, there is surprisingly little support for incorporating information regarding the composition and configuration of components in the static verification and validation of these systems. Moreover, sharing components between software products introduces dependencies that complicate maintenance and evolution: changes made in a component to address an issue in one product may have undesirable effects on other products in which the same component is used. Therefore, developers not only need to understand how a proposed change will impact the component and product at hand; they also need to understand how it affects the whole product family, including systems that are already deployed. Given that these systems contain thousands of components, it is no surprise that it is hard to reason about the impact on a single product, let alone on a complete product family. Conventional impact analysis techniques do not suffice for large-scale software-intensive systems and highly populated product families, and engineers need better support to conduct these tasks. In the talk, we will discuss the techniques we developed to support analysis *across* the components of a heterogeneous component-based system. First, we reverse engineer fine-grained homogeneous models for systems composed of heterogeneous artifacts. Next, we track the information flow in these models using slicing, and apply several transformations that enable us to visualise the information flow at various levels of abstraction, trading off between scope and detail and aimed to serve both safety domain experts as well as developers. These techniques are implemented in a prototype tool-set that has been successfully used to answer software certification questions of our industrial partner. In addition, we discuss our ongoing research to build recommendation technology that supports engineers with the evolution of families of safety-critical, software-intensive systems. This technology builds on extensions of the previously discussed techniques to systematically reverse engineer abstract representations of software products to complete software product families, new algorithms to conduct scalable and precise change impact analysis (CIA) on such representations, and recommendation technology that uses the CIA results and constraint programming to find an evolution strategy that minimises re-certification efforts. Finally, if time permits, we will discuss some challenges, research opportunities and initial results around testing of such systems in an industrial setting.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proceedings of the 30th IEEE International Conference on Software Maintenance and Evolution.\n \n \n \n \n\n\n \n Müller, H.; Moonen, L.; and Pollock, L.,\n editors.\n \n\n\n \n\n\n\n IEEE, October 2014.\n \n\n\n\n
\n\n\n\n \n \n \"ProceedingsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{ICSME2014,\n  abstract =     "ICSME 2014 is the premier international venue in software maintenance and evolution, where participants from academia, government, and industry share ideas and experiences in solving challenging software maintenance and evolution problems.",\n  DOI =          "10.1109/ICSME.2014.1",\n  editor =       "M{\\"{u}}ller, Hausi and Moonen, Leon and Pollock, Lori",\n  ISBN =         "9780-7695-530-3-0",\n  month =        oct,\n  publisher =    "IEEE",\n  title =        "{Proceedings of the 30th IEEE International Conference on Software Maintenance and Evolution}",\n  URL =          "http://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=6969845",\n  year =         "2014",\n}\n\n
\n
\n\n\n
\n ICSME 2014 is the premier international venue in software maintenance and evolution, where participants from academia, government, and industry share ideas and experiences in solving challenging software maintenance and evolution problems.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Assessment and Evolution of Safety-Critical Cyber-Physical Product Families (keynote).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Summerschool on Advanced Techniques & Tools for Software Evolution (SATToSE), July 2014. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{moonen:2014:sattose,\n  abstract =     "The research presented in this talk is part of an ongoing industrial collaboration with Kongsberg Maritime (KM), one of the largest suppliers of maritime systems worldwide. The division that we work with specialises in computerised systems for safety monitoring and automatic corrective actions on unacceptable hazardous situations. The overall goal of the collaboration is to provide our partner with software analysis tooling that provides source based evidence to support cost-effective software certification of evolving systems. In particular, we study a family of complex safety-critical embedded software systems that connect software control components to physical sensors and mechanical actuators. A frequently advocated approach to manage the development of such complex software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialisation and interconnections. Although correctly engineering the composition and configuration of components is crucial for the overall behaviour, there is surprisingly little support for incorporating this information in the static verification and validation of these systems. Analysing the properties of programs within closed code boundaries has been studied for some decades and is well-established. Moreover, sharing components between software products introduces dependencies that complicate maintenance and evolution: changes made in a component to address an issue in one product may have undesirable effects on other products in which the same component is used. Therefore, developers not only need to understand how a proposed change will impact the component and product at hand; they also need to understand how it affects the whole product family, including systems that are already deployed. Given that these systems contain thousands of components, it is no surprise that it is hard to reason about the impact on a single product, let alone on a complete product family. Conventional impact analysis techniques do not suffice for large-scale software-intensive systems and highly populated product families, and engineers need better support to conduct these tasks. In the talk, we will discuss the techniques we developed to support analysis *across* the components of a heterogeneous component-based system. We build upon OMG's Knowledge Discovery Metamodel to reverse engineer fine-grained homogeneous models for systems composed of heterogeneous artifacts. Next, we track the information flow in these models using slicing, and apply several transformations that enable us to visualise the information flow at various levels of abstraction, trading off between scope and detail and aimed to serve both safety domain experts as well as developers. These techniques are implemented in a prototype tool-set that has been successfully used to answer software certification questions of our industrial partner. In addition, we discuss our ongoing research to build recommendation technology that supports engineers with the evolution of families of safety-critical, software-intensive systems. This technology builds on extensions of the previously discussed techniques to systematically reverse engineer abstract representations of software products to complete software product families, new algorithms to conduct scalable and precise change impact analysis (CIA) on such representations, and recommendation technology that uses the CIA results and constraint programming to find an evolution strategy that minimises re-certification efforts.",\n  author =       "Moonen, Leon",\n  booktitle =    "Summerschool on Advanced Techniques {\\&} Tools for Software Evolution (SATToSE)",\n  month =        jul,\n  title =        "{Assessment and Evolution of Safety-Critical Cyber-Physical Product Families (keynote)}",\n  year =         "2014",\n}\n\n
\n
\n\n\n
\n The research presented in this talk is part of an ongoing industrial collaboration with Kongsberg Maritime (KM), one of the largest suppliers of maritime systems worldwide. The division that we work with specialises in computerised systems for safety monitoring and automatic corrective actions on unacceptable hazardous situations. The overall goal of the collaboration is to provide our partner with software analysis tooling that provides source based evidence to support cost-effective software certification of evolving systems. In particular, we study a family of complex safety-critical embedded software systems that connect software control components to physical sensors and mechanical actuators. A frequently advocated approach to manage the development of such complex software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialisation and interconnections. Although correctly engineering the composition and configuration of components is crucial for the overall behaviour, there is surprisingly little support for incorporating this information in the static verification and validation of these systems. Analysing the properties of programs within closed code boundaries has been studied for some decades and is well-established. Moreover, sharing components between software products introduces dependencies that complicate maintenance and evolution: changes made in a component to address an issue in one product may have undesirable effects on other products in which the same component is used. Therefore, developers not only need to understand how a proposed change will impact the component and product at hand; they also need to understand how it affects the whole product family, including systems that are already deployed. Given that these systems contain thousands of components, it is no surprise that it is hard to reason about the impact on a single product, let alone on a complete product family. Conventional impact analysis techniques do not suffice for large-scale software-intensive systems and highly populated product families, and engineers need better support to conduct these tasks. In the talk, we will discuss the techniques we developed to support analysis *across* the components of a heterogeneous component-based system. We build upon OMG's Knowledge Discovery Metamodel to reverse engineer fine-grained homogeneous models for systems composed of heterogeneous artifacts. Next, we track the information flow in these models using slicing, and apply several transformations that enable us to visualise the information flow at various levels of abstraction, trading off between scope and detail and aimed to serve both safety domain experts as well as developers. These techniques are implemented in a prototype tool-set that has been successfully used to answer software certification questions of our industrial partner. In addition, we discuss our ongoing research to build recommendation technology that supports engineers with the evolution of families of safety-critical, software-intensive systems. This technology builds on extensions of the previously discussed techniques to systematically reverse engineer abstract representations of software products to complete software product families, new algorithms to conduct scalable and precise change impact analysis (CIA) on such representations, and recommendation technology that uses the CIA results and constraint programming to find an evolution strategy that minimises re-certification efforts.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Model-based information flow analysis to support software certification (keynote).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Workshop on Validation Strategies for Software Evolution (VSSE) at ETAPS, April 2014. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{moonen:2014:vsse,\n  abstract =     "The research presented in this talk is part of an ongoing industrial collaboration with Kongsberg Maritime (KM), one of the largest suppliers of maritime systems worldwide. The division that we work with specializes in computerized systems for safety monitoring and automatic corrective actions on unacceptable hazardous situations. The overall goal of the collaboration is to provide our partner with software analysis tooling that provides source based evidence to support software certification. In particular, we study a family of complex safety-critical embedded software systems that connect software control components to physical sensors and mechanical actuators. A frequently advocated approach to manage the development of such complex software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialization and interconnections. Although correctly engineering the composition and configuration of components is crucial for the overall behavior, there is surprisingly little support for incorporating this information in the static verification and validation of these systems. Analyzing the properties of programs within closed code boundaries has been studied for some decades and is well-established. The presentation will discuss the techniques we developed to support analysis across the components of a component-based system. We build upon OMG's Knowledge Discovery Metamodel to reverse engineer fine-grained homogeneous models for systems composed of heterogeneous artifacts. Next, we track the information flow in these models using slicing, and apply several transformations that enable us to visualize the information flow at various levels of abstraction, trading off between scope and detail and aimed to serve both safety domain experts as well as developers. Our techniques are implemented in a prototype toolset that has been successfully used to answer software certification questions of our industrial partner.",\n  author =       "Moonen, Leon",\n  booktitle =    "Workshop on Validation Strategies for Software Evolution (VSSE) at ETAPS",\n  month =        apr,\n  title =        "{Model-based information flow analysis to support software certification (keynote)}",\n  year =         "2014",\n}\n\n
\n
\n\n\n
\n The research presented in this talk is part of an ongoing industrial collaboration with Kongsberg Maritime (KM), one of the largest suppliers of maritime systems worldwide. The division that we work with specializes in computerized systems for safety monitoring and automatic corrective actions on unacceptable hazardous situations. The overall goal of the collaboration is to provide our partner with software analysis tooling that provides source based evidence to support software certification. In particular, we study a family of complex safety-critical embedded software systems that connect software control components to physical sensors and mechanical actuators. A frequently advocated approach to manage the development of such complex software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialization and interconnections. Although correctly engineering the composition and configuration of components is crucial for the overall behavior, there is surprisingly little support for incorporating this information in the static verification and validation of these systems. Analyzing the properties of programs within closed code boundaries has been studied for some decades and is well-established. The presentation will discuss the techniques we developed to support analysis across the components of a component-based system. We build upon OMG's Knowledge Discovery Metamodel to reverse engineer fine-grained homogeneous models for systems composed of heterogeneous artifacts. Next, we track the information flow in these models using slicing, and apply several transformations that enable us to visualize the information flow at various levels of abstraction, trading off between scope and detail and aimed to serve both safety domain experts as well as developers. Our techniques are implemented in a prototype toolset that has been successfully used to answer software certification questions of our industrial partner.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings of the 22nd IEEE International Conference on Program Comprehension.\n \n \n \n\n\n \n Roy, C. K; Begel, A.; and Moonen, L.,\n editors.\n \n\n\n \n\n\n\n ACM, June 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{ICPC2014,\n  abstract =     "The International Conference on Program Comprehension (ICPC) is the principal venue for works in the area of program comprehension, which encompasses both the human activity of comprehending software and the technologies for supporting the program comprehension. ICPC 2014 provides a quality forum for researchers and practitioners from academia, industry, and government to present and to discuss state-of-the-art results and best practices in the field of program comprehension. ICPC 2014 takes place June 2-3, 2014, co-located with the 36th International Conference on Software Engineering (ICSE'14), at Hyderabad, India. A special issue based on a selection of best papers from ICPC'2014 will be published in Wiley's Journal of Software: Evolution and Process (JSEP).",\n  editor =       "Roy, Chanchal K and Begel, Andrew and Moonen, Leon",\n  ISBN =         "9781-450-3287-9-1",\n  month =        jun,\n  publisher =    "ACM",\n  title =        "{Proceedings of the 22nd IEEE International Conference on Program Comprehension}",\n  year =         "2014",\n}\n\n
\n
\n\n\n
\n The International Conference on Program Comprehension (ICPC) is the principal venue for works in the area of program comprehension, which encompasses both the human activity of comprehending software and the technologies for supporting the program comprehension. ICPC 2014 provides a quality forum for researchers and practitioners from academia, industry, and government to present and to discuss state-of-the-art results and best practices in the field of program comprehension. ICPC 2014 takes place June 2-3, 2014, co-located with the 36th International Conference on Software Engineering (ICSE'14), at Hyderabad, India. A special issue based on a selection of best papers from ICPC'2014 will be published in Wiley's Journal of Software: Evolution and Process (JSEP).\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n First International Workshop on Multi Product Line Engineering (MultiPLE 2013).\n \n \n \n \n\n\n \n Moonen, L.; Behjati, R.; Rabiser, R.; Acharya, M.; Tekinerdogan, B.; and Kang, K.\n\n\n \n\n\n\n In International Software Product Line Conference (SPLC) - Proceedings Volume 2, pages 95, ., 2013. ACM\n \n\n\n\n
\n\n\n\n \n \n \"FirstPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2013:multiple:proceedings,\n  abstract =     "In an industrial context, software systems are rarely developed by a single organization. For software product lines, this means that various organizations collaborate to provide and integrate the assets used in a product line. It is not uncommon that these assets themselves are built as product lines, a practice which is referred to as multi product lines. This cross-organizational distribution of reusable assets leads to numerous challenges, such as inconsistent configuration, costly and time-consuming integration, diverging evolution speed and direction, and inadequate testing. The workshop is aimed at discussing the challenges involved with the development and evolution of multi product lines and the assets used for their production and these proceedings include the papers accepted for the workshop.",\n  address =      ".",\n  author =       "Moonen, Leon and Behjati, Razieh and Rabiser, Rick and Acharya, Mithun and Tekinerdogan, Bedir and Kang, Kyo-Chul",\n  booktitle =    "International Software Product Line Conference (SPLC) - Proceedings Volume 2",\n  chapter =      "5",\n  DOI =          "10.1145/2491627.2499882",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen et al. - 2013 - Proceedings of the First International Workshop on Multi Product Line Engineering (MultiPLE 2013) - International.pdf:pdf",\n  ISBN =         "9781-4503-1968-3",\n  keywords =     "Conference,certification,cross-organizational product line engineering,design,evolution,multi product lines,software ecosystems",\n  pages =        "95",\n  publisher =    "ACM",\n  title =        "{First International Workshop on Multi Product Line Engineering (MultiPLE 2013)}",\n  type =         "Conference",\n  URL =          "http://dl.acm.org/citation.cfm?doid=2491627.2499882",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n In an industrial context, software systems are rarely developed by a single organization. For software product lines, this means that various organizations collaborate to provide and integrate the assets used in a product line. It is not uncommon that these assets themselves are built as product lines, a practice which is referred to as multi product lines. This cross-organizational distribution of reusable assets leads to numerous challenges, such as inconsistent configuration, costly and time-consuming integration, diverging evolution speed and direction, and inadequate testing. The workshop is aimed at discussing the challenges involved with the development and evolution of multi product lines and the assets used for their production and these proceedings include the papers accepted for the workshop.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards a Taxonomy of Programming-Related Difficulties during Maintenance.\n \n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Software Maintenance (ICSM), pages 424–427, September 2013. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"TowardsPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yamashita:2013:taxonomy,\n  abstract =     "Empirical studies investigating relationships between source code characteristics and maintenance outcomes rarely use causal models to explain the relations between the code characteristics and the outcomes. We conjecture that one of the reasons behind this is the lack of a comprehensive and detailed compendium of programming-related difficulties and their effects on different maintenance outcomes. This paper takes the first step in addressing this situation. Based on empirical data from a maintenance project that was observed in detail for 7 weeks, we propose a preliminary taxonomy to describe the programming-related difficulties that developers face during maintenance.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  DOI =          "10.1109/ICSM.2013.63",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2013 - Towards a Taxonomy of Programming-Related Difficulties during Maintenance - International Conference on Softw.pdf:pdf",\n  ISBN =         "9780-7695-498-1-1",\n  keywords =     "empirical study,maintainability,maintenance difficulties,maintenance problems,program comprehension",\n  month =        sep,\n  pages =        "424--427",\n  publisher =    "IEEE",\n  title =        "{Towards a Taxonomy of Programming-Related Difficulties during Maintenance}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6676923",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n Empirical studies investigating relationships between source code characteristics and maintenance outcomes rarely use causal models to explain the relations between the code characteristics and the outcomes. We conjecture that one of the reasons behind this is the lack of a comprehensive and detailed compendium of programming-related difficulties and their effects on different maintenance outcomes. This paper takes the first step in addressing this situation. Based on empirical data from a maintenance project that was observed in detail for 7 weeks, we propose a preliminary taxonomy to describe the programming-related difficulties that developers face during maintenance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n ARCS: Aligning Research on Code Smells.\n \n \n \n\n\n \n Moonen, L.; Yamashita, A.; Hall, T.; and Counsell, S.\n\n\n \n\n\n\n In European Software Engineering Conference and ACM SIGSOFT Symposium on the Foundations of Software Engineering (ESEC/FSE), August 2013. ACM\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2013:arcs,\n  abstract =     "Code smells are a metaphor to describe issues with source code that are associated with bad program design and bad programming practices which negatively affect software comprehensibility and maintainability. As such, code smells could be ideal indicators to monitor the ``health'' of a system throughout its evolution, and to guide refactoring efforts to sustain comprehensibility and maintainability. Much code smell related work by both researchers and tool vendors has focused on the formalization and detection of code smells. Recently, studies have started to deeper investigate the impact of smells on software maintenance and evolution. However, there is a long road to go before code smell analysis can cost-efficiently drive exactly those refactorings that will improve the comprehensibility and maintainability of a given system. We identify four challenges to be tackled before we can reach this goal: (1) Lack of a common vocabulary to build a consistent knowledge base, (2) Lack of an ontological framework to compare/synthesize results across studies, (3) Lack of an evaluation framework to compare/evaluate the quality of detection approaches, and (4) Lack of a research agenda to make the knowledge/tools accessible to industrial contexts. The ARCS workshop will bring the members of this community together to establish a common research agenda addressing each of these challenges.",\n  author =       "Moonen, Leon and Yamashita, Aiko and Hall, Tracy and Counsell, Steve",\n  booktitle =    "European Software Engineering Conference and ACM SIGSOFT Symposium on the Foundations of Software Engineering (ESEC/FSE)",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen et al. - 2013 - ARCS Aligning Research on Code Smells - European Software Engineering Conference and ACM SIGSOFT Symposium on the.pdf:pdf",\n  keywords =     "code smells,maintainability",\n  month =        aug,\n  publisher =    "ACM",\n  title =        "{ARCS: Aligning Research on Code Smells}",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n Code smells are a metaphor to describe issues with source code that are associated with bad program design and bad programming practices which negatively affect software comprehensibility and maintainability. As such, code smells could be ideal indicators to monitor the ``health'' of a system throughout its evolution, and to guide refactoring efforts to sustain comprehensibility and maintainability. Much code smell related work by both researchers and tool vendors has focused on the formalization and detection of code smells. Recently, studies have started to deeper investigate the impact of smells on software maintenance and evolution. However, there is a long road to go before code smell analysis can cost-efficiently drive exactly those refactorings that will improve the comprehensibility and maintainability of a given system. We identify four challenges to be tackled before we can reach this goal: (1) Lack of a common vocabulary to build a consistent knowledge base, (2) Lack of an ontological framework to compare/synthesize results across studies, (3) Lack of an evaluation framework to compare/evaluate the quality of detection approaches, and (4) Lack of a research agenda to make the knowledge/tools accessible to industrial contexts. The ARCS workshop will bring the members of this community together to establish a common research agenda addressing each of these challenges.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Surveying developer knowledge and interest in code smells through online freelance marketplaces.\n \n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n In Sadowski, C.; and Begel, A., editor(s), International Workshop on User Evaluations for Software Engineering Researchers (USER), pages 5–8, May 2013. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"SurveyingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yamashita:2013:surveying,\n  abstract =     "This paper discusses the use of freelance marketplaces to conduct a survey amongst professional developers about specific software engineering phenomena, in our case their knowledge and interest in code smells and their detection/removal. We first discuss the context and motivation of our research, and the idea of using freelance marketplaces for conducting studies involving software professionals. Then, we describe the design of the survey study and the specifics on the selected freelance marketplace (i.e., Freelancer.com). Finally, we discuss why the use of freelance markets constitutes a feasible and advantageous approach for conducting user evaluations that involve large numbers of software professionals, and what challenges such an approach may entail.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "International Workshop on User Evaluations for Software Engineering Researchers (USER)",\n  DOI =          "10.1109/USER.2013.6603077",\n  editor =       "Sadowski, Caitlin and Begel, Andrew",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2013 - Surveying developer knowledge and interest in code smells through online freelance marketplaces - Internation.pdf:pdf",\n  ISBN =         "9781-467-3643-3-1",\n  keywords =     "code smells,developer knowledge,survey,user evaluation",\n  month =        may,\n  pages =        "5--8",\n  publisher =    "IEEE",\n  title =        "{Surveying developer knowledge and interest in code smells through online freelance marketplaces}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6603077",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n This paper discusses the use of freelance marketplaces to conduct a survey amongst professional developers about specific software engineering phenomena, in our case their knowledge and interest in code smells and their detection/removal. We first discuss the context and motivation of our research, and the idea of using freelance marketplaces for conducting studies involving software professionals. Then, we describe the design of the survey study and the specifics on the selected freelance marketplace (i.e., Freelancer.com). Finally, we discuss why the use of freelance markets constitutes a feasible and advantageous approach for conducting user evaluations that involve large numbers of software professionals, and what challenges such an approach may entail.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n To what extent can maintenance problems be predicted by code smell detection? – An empirical study.\n \n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n Information and Software Technology, 55(12): 2223–2242. December 2013.\n \n\n\n\n
\n\n\n\n \n \n \"ToPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{yamashita:2013:extent,\n  abstract =     "Context: Code smells are indicators of poor coding and design choices that can cause problems during software maintenance and evolution. Objective: This study is aimed at a detailed investigation to which extent problems in maintenance projects can be predicted by the detection of currently known code smells. Method: A multiple case study was conducted, in which the problems faced by six developers working on four different Java systems were registered on a daily basis, for a period up to four weeks. Where applicable, the files associated to the problems were registered. Code smells were detected in the pre-maintenance version of the systems, using the tools Borland Together and InCode. In-depth examination of quantitative and qualitative data was conducted to determine if the observed problems could be explained by the detected smells. Results: From the total set of problems, roughly 30{\\%} percent were related to files containing code smells. In addition, interaction effects were observed amongst code smells, and between code smells and other code characteristics, and these effects led to severe problems during maintenance. Code smell interactions were observed between collocated smells (i.e., in the same file), and between coupled smells (i.e., spread over multiple files that were coupled). Conclusions: The role of code smells on the overall system maintainability is relatively minor, thus complementary approaches are needed to achieve more comprehensive assessments of maintainability. Moreover, to improve the explanatory power of code smells, interaction effects amongst collocated smells and coupled smells should be taken into account during analysis. Keywords: code smells, maintainability, empirical study",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  DOI =          "10.1016/j.infsof.2013.08.002",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2013 - To what extent can maintenance problems be predicted by code smell detection – An empirical study - Informa.pdf:pdf",\n  ISSN =         "09505849",\n  journal =      "Information and Software Technology",\n  keywords =     "case study,code smells,empirical study,maintainability",\n  month =        dec,\n  number =       "12",\n  pages =        "2223--2242",\n  title =        "{To what extent can maintenance problems be predicted by code smell detection? – An empirical study}",\n  URL =          "http://linkinghub.elsevier.com/retrieve/pii/S0950584913001614",\n  volume =       "55",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n Context: Code smells are indicators of poor coding and design choices that can cause problems during software maintenance and evolution. Objective: This study is aimed at a detailed investigation to which extent problems in maintenance projects can be predicted by the detection of currently known code smells. Method: A multiple case study was conducted, in which the problems faced by six developers working on four different Java systems were registered on a daily basis, for a period up to four weeks. Where applicable, the files associated to the problems were registered. Code smells were detected in the pre-maintenance version of the systems, using the tools Borland Together and InCode. In-depth examination of quantitative and qualitative data was conducted to determine if the observed problems could be explained by the detected smells. Results: From the total set of problems, roughly 30% percent were related to files containing code smells. In addition, interaction effects were observed amongst code smells, and between code smells and other code characteristics, and these effects led to severe problems during maintenance. Code smell interactions were observed between collocated smells (i.e., in the same file), and between coupled smells (i.e., spread over multiple files that were coupled). Conclusions: The role of code smells on the overall system maintainability is relatively minor, thus complementary approaches are needed to achieve more comprehensive assessments of maintainability. Moreover, to improve the explanatory power of code smells, interaction effects amongst collocated smells and coupled smells should be taken into account during analysis. Keywords: code smells, maintainability, empirical study\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Do Developers Care About Code Smells? – An Exploratory Survey (report).\n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2013-01, 2013.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{yamashita:2013:developers:report,\n  abstract =     "Code smells are a well-known metaphor to describe symptoms of code decay or other issues with code quality which can lead to a variety of maintenance problems. Even though code smell detection and removal has been well-researched over the last decade, it remains open to debate whether or not code smells should be considered meaningful conceptualizations of code quality issues from the developer's perspective. To some extend, this question applies as well to the results provided by current code smell detection tools. Are code smells really important for developers? If they are not, is this due to the lack of relevance of the underlying concepts, due to the lack of awareness about code smells on the developers' side, or due to the lack of appropriate tools for code smell analysis or removal? In order to align and direct research efforts to address actual needs and problems of professional developers, we need to better understand the knowledge about, and interest in code smells, together with their perceived criticality. This paper reports on the results obtained from an exploratory survey involving 85 professional software developers.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "Technical Report",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2013 - Do Developers Care About Code Smells -- An Exploratory Survey (report) - Technical Report.pdf:pdf",\n  keywords =     "code analysis tools,code smell detection,code smells,maintainability,refactoring,survey,usability",\n  number =       "2013-01",\n  pages =        "11",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Do Developers Care About Code Smells? -- An Exploratory Survey (report)}",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n Code smells are a well-known metaphor to describe symptoms of code decay or other issues with code quality which can lead to a variety of maintenance problems. Even though code smell detection and removal has been well-researched over the last decade, it remains open to debate whether or not code smells should be considered meaningful conceptualizations of code quality issues from the developer's perspective. To some extend, this question applies as well to the results provided by current code smell detection tools. Are code smells really important for developers? If they are not, is this due to the lack of relevance of the underlying concepts, due to the lack of awareness about code smells on the developers' side, or due to the lack of appropriate tools for code smell analysis or removal? In order to align and direct research efforts to address actual needs and problems of professional developers, we need to better understand the knowledge about, and interest in code smells, together with their perceived criticality. This paper reports on the results obtained from an exploratory survey involving 85 professional software developers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Do developers care about code smells? An exploratory survey.\n \n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), pages 242–251, October 2013. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"DoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yamashita:2013:developers,\n  abstract =     "Code smells are a well-known metaphor to describe symptoms of code decay or other issues with code quality which can lead to a variety of maintenance problems. Even though code smell detection and removal has been well-researched over the last decade, it remains open to debate whether or not code smells should be considered meaningful conceptualizations of code quality issues from the developer{\\&}{\\$}{\\#}{\\$}39;s perspective. To some extend, this question applies as well to the results provided by current code smell detection tools. Are code smells really important for developers? If they are not, is this due to the lack of relevance of the underlying concepts, due to the lack of awareness about code smells on the developers{\\&}{\\$}{\\#}{\\$}39; side, or due to the lack of appropriate tools for code smell analysis or removal? In order to align and direct research efforts to address actual needs and problems of professional developers, we need to better understand the knowledge about, and interest in code smells, together with their perceived criticality. This paper reports on the results obtained from an exploratory survey involving 85 professional software developers.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  DOI =          "10.1109/WCRE.2013.6671299",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2013 - Do developers care about code smells An exploratory survey - 2013 20th Working Conference on Reverse Engineer.pdf:pdf",\n  ISBN =         "9781-479-9293-1-3",\n  keywords =     "code analysis tools,code smell detection,code smells,maintainability,refactoring,survey,usability",\n  month =        oct,\n  pages =        "242--251",\n  publisher =    "IEEE",\n  title =        "{Do developers care about code smells? An exploratory survey}",\n  type =         "Conference",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6671299 http://ieeexplore.ieee.org/document/6671299/",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n Code smells are a well-known metaphor to describe symptoms of code decay or other issues with code quality which can lead to a variety of maintenance problems. Even though code smell detection and removal has been well-researched over the last decade, it remains open to debate whether or not code smells should be considered meaningful conceptualizations of code quality issues from the developer&$}{#}{$39;s perspective. To some extend, this question applies as well to the results provided by current code smell detection tools. Are code smells really important for developers? If they are not, is this due to the lack of relevance of the underlying concepts, due to the lack of awareness about code smells on the developers&$}{#}{$39; side, or due to the lack of appropriate tools for code smell analysis or removal? In order to align and direct research efforts to address actual needs and problems of professional developers, we need to better understand the knowledge about, and interest in code smells, together with their perceived criticality. This paper reports on the results obtained from an exploratory survey involving 85 professional software developers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings of the First International Workshop on Multi Product Line Engineering (MultiPLE 2013).\n \n \n \n\n\n \n Moonen, L.; Behjati, R.; Rabiser, R.; Acharya, M.; Tekinerdogan, B.; and Kang, K.\n\n\n \n\n\n\n In International Software Product Line Conference (SPLC) - Proceedings Volume 2, pages 95, 2013. ACM\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2013:multiple:proceedings,\n  abstract =     "In an industrial context, software systems are rarely developed by a single organization. For software product lines, this means that various organizations collaborate to provide and integrate the assets used in a product line. It is not uncommon that these assets themselves are built as product lines, a practice which is referred to as multi product lines. This cross-organizational distribution of reusable assets leads to numerous challenges, such as inconsistent configuration, costly and time-consuming integration, diverging evolution speed and direction, and inadequate testing. The workshop is aimed at discussing the challenges involved with the development and evolution of multi product lines and the assets used for their production and these proceedings include the papers accepted for the workshop.",\n  author =       "Moonen, Leon and Behjati, Razieh and Rabiser, Rick and Acharya, Mithun and Tekinerdogan, Bedir and Kang, Kyo-Chul",\n  booktitle =    "International Software Product Line Conference (SPLC) - Proceedings Volume 2",\n  chapter =      "5",\n  DOI =          "10.1145/2499777.2500721",\n  file =         ":Users/leon/Documents/Mendeley Desktop//Moonen et al. - 2013 - Proceedings of the First International Workshop on Multi Product Line Engineering (MultiPLE 2013) - International.pdf:pdf",\n  ISBN =         "9781-450-3232-5-3",\n  keywords =     "certification,cross-organizational product line engineering,design,evolution,multi product lines,software ecosystems",\n  number =       "MultiPLE",\n  pages =        "95",\n  publisher =    "ACM",\n  title =        "{Proceedings of the First International Workshop on Multi Product Line Engineering (MultiPLE 2013)}",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n In an industrial context, software systems are rarely developed by a single organization. For software product lines, this means that various organizations collaborate to provide and integrate the assets used in a product line. It is not uncommon that these assets themselves are built as product lines, a practice which is referred to as multi product lines. This cross-organizational distribution of reusable assets leads to numerous challenges, such as inconsistent configuration, costly and time-consuming integration, diverging evolution speed and direction, and inadequate testing. The workshop is aimed at discussing the challenges involved with the development and evolution of multi product lines and the assets used for their production and these proceedings include the papers accepted for the workshop.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring the Impact of Inter-Smell Relations on Software Maintainability: an Empirical Study.\n \n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Software Engineering (ICSE), pages 682–691, May 2013. ACM/IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ExploringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yamashita:2013:exploring,\n  abstract =     "Code smells are indicators of issues with source code quality that may hinder evolution. While previous studies mainly focused on the effects of individual code smells on maintainability, we conjecture that not only the individual code smells but also the interactions between code smells affect maintenance. We empirically investigate the interactions amongst 12 code smells and analyze how those interactions relate to maintenance problems. Professional developers were hired for a period of four weeks to implement change requests on four medium-sized Java systems with known smells. On a daily basis, we recorded what specific problems they faced and which source files were associated with them. The collected data was analyzed using Principal Component Analysis (PCA) to identify patterns of collocated code smells. Analysis of the nature of the problems observed revealed how certain smells that were collocated in the same file interacted with each other, causing maintenance problems. Moreover, we found that code smell interactions occurred across coupled files, with comparable negative effects as same-file collocation. We argue that future studies into the effects of code smells on maintainability should integrate dependency analysis in their process so that they can obtain a more complete understanding by including these coupled interactions. Index Terms: code smells; inter-smells; maintenance; quality.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "International Conference on Software Engineering (ICSE)",\n  DOI =          "10.1109/ICSE.2013.6606614",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2013 - Exploring the impact of inter-smell relations on software maintainability An empirical study - 35th Internati.pdf:pdf",\n  ISBN =         "9781-467-3307-6-3",\n  keywords =     "bad smells,code smell interaction,code smells,inter-smell relations,maintainability,software quality assurance",\n  month =        may,\n  pages =        "682--691",\n  publisher =    "ACM/IEEE",\n  title =        "{Exploring the Impact of Inter-Smell Relations on Software Maintainability: an Empirical Study}",\n  type =         "Conference",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6606614",\n  year =         "2013",\n}\n\n
\n
\n\n\n
\n Code smells are indicators of issues with source code quality that may hinder evolution. While previous studies mainly focused on the effects of individual code smells on maintainability, we conjecture that not only the individual code smells but also the interactions between code smells affect maintenance. We empirically investigate the interactions amongst 12 code smells and analyze how those interactions relate to maintenance problems. Professional developers were hired for a period of four weeks to implement change requests on four medium-sized Java systems with known smells. On a daily basis, we recorded what specific problems they faced and which source files were associated with them. The collected data was analyzed using Principal Component Analysis (PCA) to identify patterns of collocated code smells. Analysis of the nature of the problems observed revealed how certain smells that were collocated in the same file interacted with each other, causing maintenance problems. Moreover, we found that code smell interactions occurred across coupled files, with comparable negative effects as same-file collocation. We argue that future studies into the effects of code smells on maintainability should integrate dependency analysis in their process so that they can obtain a more complete understanding by including these coupled interactions. Index Terms: code smells; inter-smells; maintenance; quality.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (9)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Exploring the impact of inter-smell relations in the maintainability of a system: An empirical study (report).\n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2012-14, 2012.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{yamashita:2012:exploring:report,\n  abstract =     "Code smells are indicators of deeper problems in the design that may cause difficulties in the evolution of a system. While previous studies have mainly focused on studying the effects of individual smells on maintainability, we believe that interactions tend to occur between code smells. The research in this paper investigates the potential interactions amongst twelve different code smells, and how those interactions can lead to maintenance problems. Four medium-sized systems with equivalent functionality but dissimilar design were examined for smells. The systems were the object of several change requests for a period of four weeks. During that period, we recorded on a daily basis problems faced by developers and their associated Java files. The first analysis is based on Principal Component Analysis (PCA), to identify components formed by collocated code smells (i.e., smells located in the same file). Analysis on the nature of the problems, as reported by the developers in daily interviews and think-aloud sessions, revealed how some of the collocated smells interacted with each other, causing maintenance problems. Finally, we could observe that some interactions occur across files, for which we suggest integrating dependency analysis when analyzing effects of code smells on maintainability.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "Technical Report",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2012 - Exploring the impact of inter-smell relations in the maintainability of a system An empirical study (report).pdf:pdf",\n  keywords =     "bad smells,code smells,inter-smell relations,smell interaction,software maintenance,software quality",\n  number =       "2012-14",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Exploring the impact of inter-smell relations in the maintainability of a system: An empirical study (report)}",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n Code smells are indicators of deeper problems in the design that may cause difficulties in the evolution of a system. While previous studies have mainly focused on studying the effects of individual smells on maintainability, we believe that interactions tend to occur between code smells. The research in this paper investigates the potential interactions amongst twelve different code smells, and how those interactions can lead to maintenance problems. Four medium-sized systems with equivalent functionality but dissimilar design were examined for smells. The systems were the object of several change requests for a period of four weeks. During that period, we recorded on a daily basis problems faced by developers and their associated Java files. The first analysis is based on Principal Component Analysis (PCA), to identify components formed by collocated code smells (i.e., smells located in the same file). Analysis on the nature of the problems, as reported by the developers in daily interviews and think-aloud sessions, revealed how some of the collocated smells interacted with each other, causing maintenance problems. Finally, we could observe that some interactions occur across files, for which we suggest integrating dependency analysis when analyzing effects of code smells on maintainability.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Assuring software quality by code smell detection.\n \n \n \n \n\n\n \n van Emden, E.; and Moonen, L.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), pages xix, October 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"AssuringPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{emden:2012:assuring,\n  abstract =     "In this retrospective we will review the paper “Java Quality Assurance by Detecting Code Smells” that was published ten years ago at WCRE. The work presents an approach for the automatic detection and visualization of code smells and discusses how this approach could be used in the design of a software inspection tool. The feasibility of the proposed approach was illustrated with the development of jCOSMO, a prototype code smell browser that detects and visualizes code smells in JAVA source code. It was the first tool to automatically detect code smells in source code, and we demonstrated the application of this tool in an industrial quality assessment case study. In addition to reviewing the WCRE 2002 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. In particular, we will have a look at recent related work in which we empirically investigated the relation between code smells and software maintainability in a longitudinal study where professional developers were observed while maintaining four different software systems that exhibited known code smells. We conclude with a discussion of the lessons learned and opportunities for further research.",\n  author =       "van Emden, Eva and Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  DOI =          "10.1109/WCRE.2012.69",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Emden, Moonen - 2012 - Assuring software quality by code smell detection - Working Conference on Reverse Engineering (WCRE).pdf:pdf",\n  ISBN =         "9780-7695-489-1-3",\n  keywords =     "Java,code smells,refactoring,software inspection,software quality assurance",\n  month =        oct,\n  pages =        "xix",\n  publisher =    "IEEE",\n  title =        "{Assuring software quality by code smell detection}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6385092",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n In this retrospective we will review the paper “Java Quality Assurance by Detecting Code Smells” that was published ten years ago at WCRE. The work presents an approach for the automatic detection and visualization of code smells and discusses how this approach could be used in the design of a software inspection tool. The feasibility of the proposed approach was illustrated with the development of jCOSMO, a prototype code smell browser that detects and visualizes code smells in JAVA source code. It was the first tool to automatically detect code smells in source code, and we demonstrated the application of this tool in an industrial quality assessment case study. In addition to reviewing the WCRE 2002 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. In particular, we will have a look at recent related work in which we empirically investigated the relation between code smells and software maintainability in a longitudinal study where professional developers were observed while maintaining four different software systems that exhibited known code smells. We conclude with a discussion of the lessons learned and opportunities for further research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Do code smells reflect important maintainability aspects?.\n \n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Software Maintenance (ICSM), pages 306–315, September 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"DoPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yamashita:2012:code,\n  abstract =     "Code smells are manifestations of design flaws that can degrade code maintainability. As such, the existence of code smells seems an ideal indicator for maintainability assessments. However, to achieve comprehensive and accurate evaluations based on code smells, we need to know how well they reflect factors affecting maintainability. After identifying which maintainability factors are reflected by code smells and which not, we can use complementary means to assess the factors that are not addressed by smells. This paper reports on an empirical study that investigates the extent to which code smells reflect factors affecting maintainability that have been identified as important by programmers. We consider two sources for our analysis: (1) expert-based maintainability assessments of four Java systems before they entered a maintenance project, and (2) observations and interviews with professional developers who maintained these systems during 14 working days and implemented a number of change requests.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  DOI =          "10.1109/ICSM.2012.6405287",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yamashita, Moonen - 2012 - Do code smells reflect important maintainability aspects - International Conference on Software Maintenance (.pdf:pdf",\n  ISBN =         "9781-467-3231-2-3",\n  keywords =     "case study,code smells,empirical study,maintainability",\n  month =        sep,\n  pages =        "306--315",\n  publisher =    "IEEE",\n  title =        "{Do code smells reflect important maintainability aspects?}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6405287",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n Code smells are manifestations of design flaws that can degrade code maintainability. As such, the existence of code smells seems an ideal indicator for maintainability assessments. However, to achieve comprehensive and accurate evaluations based on code smells, we need to know how well they reflect factors affecting maintainability. After identifying which maintainability factors are reflected by code smells and which not, we can use complementary means to assess the factors that are not addressed by smells. This paper reports on an empirical study that investigates the extent to which code smells reflect factors affecting maintainability that have been identified as important by programmers. We consider two sources for our analysis: (1) expert-based maintainability assessments of four Java systems before they entered a maintenance project, and (2) observations and interviews with professional developers who maintained these systems during 14 working days and implemented a number of change requests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fine-Grained Change Impact Analysis for Component-Based Product Families.\n \n \n \n \n\n\n \n Yazdanshenas, A. R.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Software Maintenance (ICSM), pages 119–128, September 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Fine-GrainedPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yazdanshenas:2012:finegrained,\n  abstract =     "Developing software product-lines based on a set of shared components is a proven tactic to enhance reuse, quality, and time to market in producing a portfolio of products. Large-scale product families face rapidly increasing maintenance challenges as their evolution can happen both as a result of collective domain engineering activities, and as a result of product-specific developments. To make informed decisions about prospective modifications, developers need to estimate what other sections of the system will be affected and need attention, which is known as change impact analysis. This paper contributes a method to carry out change impact analysis in a component-based product family, based on system-wide information flow analysis. We use static program slicing as the underlying analysis technique, and use model-driven engineering (MDE) techniques to propagate the ripple effects from a source code modification into all members of the product family. In addition, our approach ranks results based on an approximation of the scale of their impact. We have implemented our approach in a prototype tool, called Richter, which was evaluated on a real-world product family.",\n  author =       "Yazdanshenas, Amir Reza and Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  DOI =          "10.1109/ICSM.2012.6405262",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yazdanshenas, Moonen - 2012 - Fine-Grained Change Impact Analysis for Component-Based Product Families - IEEE International Conference o.pdf:pdf",\n  ISBN =         "9781-467-3231-2-3",\n  keywords =     "-component-based software development,change impact analysis,component-based software development,information flow,product-lines,software,software product-lines",\n  month =        sep,\n  number =       "5",\n  pages =        "119--128",\n  publisher =    "IEEE",\n  title =        "{Fine-Grained Change Impact Analysis for Component-Based Product Families}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6405262",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n Developing software product-lines based on a set of shared components is a proven tactic to enhance reuse, quality, and time to market in producing a portfolio of products. Large-scale product families face rapidly increasing maintenance challenges as their evolution can happen both as a result of collective domain engineering activities, and as a result of product-specific developments. To make informed decisions about prospective modifications, developers need to estimate what other sections of the system will be affected and need attention, which is known as change impact analysis. This paper contributes a method to carry out change impact analysis in a component-based product family, based on system-wide information flow analysis. We use static program slicing as the underlying analysis technique, and use model-driven engineering (MDE) techniques to propagate the ripple effects from a source code modification into all members of the product family. In addition, our approach ranks results based on an approximation of the scale of their impact. We have implemented our approach in a prototype tool, called Richter, which was evaluated on a real-world product family.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n User evaluation of a domain specific program comprehension tool.\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Sadowski, C.; and Begel, A., editor(s), International Workshop on User Evaluation for Software Engineering Researchers (USER), pages 45–48, June 2012. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2012:user,\n  abstract =     "The subject of the user evaluation discussed in this paper is a tool that was developed to support the comprehension of large industrial component-based software systems in the maritime domain. We discuss the context and motivation of our research, and present the user-specific details of our tool, called FlowTracker. We include a walk-through of the system and an overview of the profiles of our prospective users. Next, we discuss the design of an initial qualitative evaluation that we have conducted. We conclude with a summary of lessons learned and challenges that we see for user evaluation of such domain-specific program comprehension tools. Keywords: user evaluation, domain specific tooling, program comprehension, software visualization.",\n  author =       "Moonen, Leon",\n  booktitle =    "International Workshop on User Evaluation for Software Engineering Researchers (USER)",\n  DOI =          "10.1109/USER.2012.6226583",\n  editor =       "Sadowski, Caitlin and Begel, Andrew",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 2012 - User evaluation of a domain specific program comprehension tool - International Workshop on User Evaluation for Software.pdf:pdf",\n  ISBN =         "9781-467-3185-9-4",\n  keywords =     "domain specific tooling,program comprehension,software visualization,user evaluation",\n  month =        jun,\n  pages =        "45--48",\n  publisher =    "IEEE",\n  title =        "{User evaluation of a domain specific program comprehension tool}",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n The subject of the user evaluation discussed in this paper is a tool that was developed to support the comprehension of large industrial component-based software systems in the maritime domain. We discuss the context and motivation of our research, and present the user-specific details of our tool, called FlowTracker. We include a walk-through of the system and an overview of the profiles of our prospective users. Next, we discuss the design of an initial qualitative evaluation that we have conducted. We conclude with a summary of lessons learned and challenges that we see for user evaluation of such domain-specific program comprehension tools. Keywords: user evaluation, domain specific tooling, program comprehension, software visualization.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Do code smells reflect important maintainability aspects? (report).\n \n \n \n\n\n \n Yamashita, A.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2012-10, 2012.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{yamashita:2012:code:report,\n  abstract =     "Code smells are manifestations of design flaws that can degrade code maintainability. As such, the existence of code smells seems an ideal indicator for maintainability assessments. However, to achieve comprehensive and accurate evaluations based on code smells, we need to know how well they reflect factors affecting maintainability. After identifying which maintainability factors are reflected by code smells and which not, we can use complementary means to assess the factors that are not addressed by smells. This paper reports on an empirical study that investigates the extent to which code smells reflect factors affecting maintainability that have been identified as important by programmers. We consider two sources for our analysis: (1) expert-based maintainability assessments of four Java systems before they entered a maintenance project, and (2) observations and interviews with professional developers who maintained these systems during 14 working days and implemented a number of change requests.",\n  author =       "Yamashita, Aiko and Moonen, Leon",\n  booktitle =    "Technical Report",\n  keywords =     "code smells,maintainability evaluation",\n  number =       "2012-10",\n  pages =        "12",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Do code smells reflect important maintainability aspects? (report)}",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n Code smells are manifestations of design flaws that can degrade code maintainability. As such, the existence of code smells seems an ideal indicator for maintainability assessments. However, to achieve comprehensive and accurate evaluations based on code smells, we need to know how well they reflect factors affecting maintainability. After identifying which maintainability factors are reflected by code smells and which not, we can use complementary means to assess the factors that are not addressed by smells. This paper reports on an empirical study that investigates the extent to which code smells reflect factors affecting maintainability that have been identified as important by programmers. We consider two sources for our analysis: (1) expert-based maintainability assessments of four Java systems before they entered a maintenance project, and (2) observations and interviews with professional developers who maintained these systems during 14 working days and implemented a number of change requests.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Tracking and visualizing information flow in component-based systems (report).\n \n \n \n\n\n \n Yazdanshenas, A. R.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2012-03, 2012.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{yazdanshenas:2012:tracking:report,\n  abstract =     "Component-based software engineering is aimed at managing the complexity of large-scale software development by composing systems from reusable parts. In order to understand or validate the behavior of a given system, one needs to acquire understanding of the components involved in combination with understanding how these components are instantiated, initialized and interconnected in the particular system. In practice, this task is often hindered by the heterogeneous nature of source and configuration artifacts and there is little to no tool support to help software engineers with such a system-wide analysis. This paper contributes a method to track and visualize information flow in a component-based system at various levels of abstraction. We propose a hierarchy of 5 interconnected views to support the comprehension needs of both safety domain experts and developers from our industrial partner. We discuss the implementation of our approach in a prototype tool, and present an initial qualitative evaluation of the effectiveness and usability of the proposed views for software development and software certification. The prototype was already found to be very useful and a number of directions for further improvement were suggested. We conclude by discussing these improvements and lessons learned.",\n  author =       "Yazdanshenas, Amir Reza and Moonen, Leon",\n  booktitle =    "Technical Report",\n  keywords =     "component-based software systems,information flow analysis,model reconstruction,software visualization",\n  number =       "2012-03",\n  pages =        "10",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Tracking and visualizing information flow in component-based systems (report)}",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n Component-based software engineering is aimed at managing the complexity of large-scale software development by composing systems from reusable parts. In order to understand or validate the behavior of a given system, one needs to acquire understanding of the components involved in combination with understanding how these components are instantiated, initialized and interconnected in the particular system. In practice, this task is often hindered by the heterogeneous nature of source and configuration artifacts and there is little to no tool support to help software engineers with such a system-wide analysis. This paper contributes a method to track and visualize information flow in a component-based system at various levels of abstraction. We propose a hierarchy of 5 interconnected views to support the comprehension needs of both safety domain experts and developers from our industrial partner. We discuss the implementation of our approach in a prototype tool, and present an initial qualitative evaluation of the effectiveness and usability of the proposed views for software development and software certification. The prototype was already found to be very useful and a number of directions for further improvement were suggested. We conclude by discussing these improvements and lessons learned.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tracking and Visualizing Information Flow in Component-Based Systems.\n \n \n \n \n\n\n \n Yazdanshenas, A. R.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Program Comprehension (ICPC), pages 143–152, June 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"TrackingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{yazdanshenas:2012:tracking,\n  abstract =     "Component-based software engineering is aimed at managing the complexity of large-scale software development by composing systems from reusable parts. In order to understand or validate the behavior of a given system, one needs to acquire understanding of the components involved in combination with understanding how these components are instantiated, initialized and interconnected in the particular system. In practice, this task is often hindered by the heterogeneous nature of source and configuration artifacts and there is little to no tool support to help software engineers with such a system-wide analysis. This paper contributes a method to track and visualize information flow in a component-based system at various levels of abstraction. We propose a hierarchy of 5 interconnected views to support the comprehension needs of both safety domain experts and developers from our industrial partner. We discuss the implementation of our approach in a prototype tool, and present an initial qualitative evaluation of the effectiveness and usability of the proposed views for software development and software certification. The prototype was already found to be very useful and a number of directions for further improvement were suggested. We conclude by discussing these improvements and lessons learned.",\n  author =       "Yazdanshenas, Amir Reza and Moonen, Leon",\n  booktitle =    "International Conference on Program Comprehension (ICPC)",\n  DOI =          "10.1109/ICPC.2012.6240482",\n  ISBN =         "9781-467-3121-6-5",\n  keywords =     "component-based software systems,information flow analysis,model reconstruction,software visualization",\n  month =        jun,\n  pages =        "143--152",\n  publisher =    "IEEE",\n  title =        "{Tracking and Visualizing Information Flow in Component-Based Systems}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6240482",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n Component-based software engineering is aimed at managing the complexity of large-scale software development by composing systems from reusable parts. In order to understand or validate the behavior of a given system, one needs to acquire understanding of the components involved in combination with understanding how these components are instantiated, initialized and interconnected in the particular system. In practice, this task is often hindered by the heterogeneous nature of source and configuration artifacts and there is little to no tool support to help software engineers with such a system-wide analysis. This paper contributes a method to track and visualize information flow in a component-based system at various levels of abstraction. We propose a hierarchy of 5 interconnected views to support the comprehension needs of both safety domain experts and developers from our industrial partner. We discuss the implementation of our approach in a prototype tool, and present an initial qualitative evaluation of the effectiveness and usability of the proposed views for software development and software certification. The prototype was already found to be very useful and a number of directions for further improvement were suggested. We conclude by discussing these improvements and lessons learned.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Assuring Software Quality by Code Smell Detection (keynote).\n \n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), pages xix, October 2012. \n \n\n\n\n
\n\n\n\n \n \n \"AssuringHttp://ieeexplore.ieee.org/xpl/login.jsp?tp\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2012:assuring,\n  abstract =     "(Invited talk, Most Influential Paper Award) In this retrospective we will review the paper Java Quality Assurance by Detecting Code Smells that was published ten years ago at WCRE. The work presents an approach for the automatic detection and visualization of code smells and discusses how this approach could be used in the design of a software inspection tool. The feasibility of the proposed approach was illustrated with the development of jCOSMO, a prototype code smell browser that detects and visualizes code smells in JAVA source code. It was the first tool to automatically detect code smells in source code, and we demonstrated the application of this tool in an industrial quality assessment case study. In addition to reviewing the WCRE 2002 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. In particular, we will have a look at recent related work in which we empirically investigated the relation between code smells and software maintainability in a longitudinal study where professional developers were observed while maintaining four different software systems that exhibited known code smells. We conclude with a discussion of the lessons learned and opportunities for further research. Index Terms: software inspection, quality assurance, Java, refactoring, code smells.",\n  author =       "Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  DOI =          "10.1109/WCRE.2012.69",\n  ISBN =         "9781-467-3453-6-1",\n  keywords =     "Java,code smells,refactoring,software inspection,software quality assurance",\n  month =        oct,\n  pages =        "xix",\n  title =        "{Assuring Software Quality by Code Smell Detection (keynote)}",\n  URL =          "http://ieeexplore.ieee.org/xpl/login.jsp?tp={\\&}arnumber=6385092{\\&}url=http{\\%}3A{\\%}2F{\\%}2Fieeexplore.ieee.org{\\%}2Fxpls{\\%}2Fabs{\\_}all.jsp{\\%}3Farnumber{\\%}3D6385092",\n  year =         "2012",\n}\n\n
\n
\n\n\n
\n (Invited talk, Most Influential Paper Award) In this retrospective we will review the paper Java Quality Assurance by Detecting Code Smells that was published ten years ago at WCRE. The work presents an approach for the automatic detection and visualization of code smells and discusses how this approach could be used in the design of a software inspection tool. The feasibility of the proposed approach was illustrated with the development of jCOSMO, a prototype code smell browser that detects and visualizes code smells in JAVA source code. It was the first tool to automatically detect code smells in source code, and we demonstrated the application of this tool in an industrial quality assessment case study. In addition to reviewing the WCRE 2002 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. In particular, we will have a look at recent related work in which we empirically investigated the relation between code smells and software maintainability in a longitudinal study where professional developers were observed while maintaining four different software systems that exhibited known code smells. We conclude with a discussion of the lessons learned and opportunities for further research. Index Terms: software inspection, quality assurance, Java, refactoring, code smells.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Crossing the boundaries while analyzing heterogeneous component-based software systems.\n \n \n \n \n\n\n \n Yazdanshenas, A. R.; and Moonen, L.\n\n\n \n\n\n\n In International Conference on Software Maintenance (ICSM), pages 193–202, 2011. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"CrossingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Yazdanshenas2011,\n  abstract =     "One way to manage the complexity of software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialization and interconnections. Although correctly engineering the composition and configuration of components is crucial for the overall behavior, there is surprisingly little support for incorporating this information in the static verification and validation of these systems. Analyzing the properties of programs within closed code boundaries has been studied for some decades and is well-established. This paper contributes a method to support analysis across the components of a component-based system. We build upon the Knowledge Discovery Metamodel to reverse engineer homogeneous models for systems composed of heterogeneous artifacts. Our method is implemented in a prototype tool that has been successfully used to track information flow across the components of a component-based system using program slicing.",\n  author =       "Yazdanshenas, Amir Reza and Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  DOI =          "10.1109/ICSM.2011.6080786",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Yazdanshenas, Moonen - 2011 - Crossing the boundaries while analyzing heterogeneous component-based software systems - Proceedings of th.pdf:pdf",\n  ISBN =         "9781-457-7066-4-6",\n  pages =        "193--202",\n  publisher =    "IEEE",\n  title =        "{Crossing the boundaries while analyzing heterogeneous component-based software systems}",\n  URL =          "http://dx.doi.org/10.1109/ICSM.2011.6080786 http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6080786",\n  year =         "2011",\n}\n\n
\n
\n\n\n
\n One way to manage the complexity of software systems is to compose them from reusable components, instead of starting from scratch. Components may be implemented in different programming languages and are tied together using configuration files, or glue code, defining instantiation, initialization and interconnections. Although correctly engineering the composition and configuration of components is crucial for the overall behavior, there is surprisingly little support for incorporating this information in the static verification and validation of these systems. Analyzing the properties of programs within closed code boundaries has been studied for some decades and is well-established. This paper contributes a method to support analysis across the components of a component-based system. We build upon the Knowledge Discovery Metamodel to reverse engineer homogeneous models for systems composed of heterogeneous artifacts. Our method is implemented in a prototype tool that has been successfully used to track information flow across the components of a component-based system using program slicing.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Parsing using Island Grammars Revisited.\n \n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering, of WCRE 2001 Most Influential Paper, October 2011. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2011:robust,\n  abstract =     "In this retrospective we will briefly review the paper “Generating Robust Parsers using Island Grammars” that was published ten years ago at WCRE. The work addressed a common challenge in source model extraction and proposed the use of island grammars to generate robust parsers that combine the detail and accuracy of syntactical analysis with the flexibility and development speed of lexical approaches. In addition to reviewing the WCRE 2001 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. We conclude with a discussion of the lessons learned and an overview of opportunities for further research in this field.",\n  author =       "Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering",\n  DOI =          "10.1109/WCRE.2011.75",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 2011 - Robust Parsing using Island Grammars Revisited (keynote) - Working Conference on Reverse Engineering (WCRE).pdf:pdf",\n  ISBN =         "9781-457-7194-8-6",\n  keywords =     "Island grammars,agile parsing,extraction,fact extraction,fuzzy parsing,parser generation,partial parsing,program analysis.,reverse engineering,source model",\n  month =        oct,\n  publisher =    "IEEE",\n  series =       "WCRE 2001 Most Influential Paper",\n  title =        "{Robust Parsing using Island Grammars Revisited}",\n  URL =          "http://ieeexplore.ieee.org/document/6079880/",\n  year =         "2011",\n}\n\n
\n
\n\n\n
\n In this retrospective we will briefly review the paper “Generating Robust Parsers using Island Grammars” that was published ten years ago at WCRE. The work addressed a common challenge in source model extraction and proposed the use of island grammars to generate robust parsers that combine the detail and accuracy of syntactical analysis with the flexibility and development speed of lexical approaches. In addition to reviewing the WCRE 2001 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. We conclude with a discussion of the lessons learned and an overview of opportunities for further research in this field.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust Parsing using Island Grammars Revisited (keynote).\n \n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), October 2011. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"RobustPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2011:robust,\n  abstract =     "In this retrospective we will briefly review the paper “Generating Robust Parsers using Island Grammars” that was published ten years ago at WCRE. The work addressed a common challenge in source model extraction and proposed the use of island grammars to generate robust parsers that combine the detail and accuracy of syntactical analysis with the flexibility and development speed of lexical approaches. In addition to reviewing the WCRE 2001 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. We conclude with a discussion of the lessons learned and an overview of opportunities for further research in this field.",\n  author =       "Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  DOI =          "10.1109/WCRE.2011.75",\n  file =         ":Users/leon/Documents/Mendeley Desktop//Moonen - 2011 - Robust Parsing using Island Grammars Revisited (keynote) - Working Conference on Reverse Engineering (WCRE).pdf:pdf",\n  ISBN =         "9781-457-7194-8-6",\n  keywords =     "agile parsing,fact extraction,fuzzy parsing,island grammars,parser generation,partial parsing,program analysis,reverse engineering,source model extraction",\n  month =        oct,\n  publisher =    "IEEE",\n  title =        "{Robust Parsing using Island Grammars Revisited (keynote)}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6079880",\n  year =         "2011",\n}\n\n
\n
\n\n\n
\n In this retrospective we will briefly review the paper “Generating Robust Parsers using Island Grammars” that was published ten years ago at WCRE. The work addressed a common challenge in source model extraction and proposed the use of island grammars to generate robust parsers that combine the detail and accuracy of syntactical analysis with the flexibility and development speed of lexical approaches. In addition to reviewing the WCRE 2001 work, we will discuss subsequent developments in this area by looking at a selection of papers that were published in its wake. We conclude with a discussion of the lessons learned and an overview of opportunities for further research in this field.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Evaluating and Guiding the Use of Coding Standards to Reduce Software Faults (keynote).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Embedded Source Code Quality Control, Antwerp, Belgium, 2011. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{moonen:2011:evaluating,\n  abstract =     "In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. Not only can enforcing compliance with rules that have little impact on the number of faults be considered a wasted effort, it can actually result in an increase in faults, as any modification has a non-zero probability of introducing a fault or triggering a previously concealed one. Therefore, it is important to build a body of empirical knowledge that helps us understand which rules are worthwhile enforcing, and which ones can be ignored in the context of fault reduction. In this talk, we reflect on our efforts to quantify and characterize the relation between rule violations and actual faults for the MISRA C 2004 standard on three industrial case studies. We found that 10 rules in the standard were significant predictors of fault location in these projects. We discuss how software and issue archives can be used to link standard violations to known bugs, and how this information can be used to address two practical issues in introducing or using a coding standard: which rules should be adhered to as they would have caught earlier issues, and how to prioritize violations. Our studies showed that such a history-based approach can help to select a subset of rules that reduce the number of locations to inspect by 64{\\%} to 95{\\%}, while still containing 64{\\%} to 86{\\%} of the real issues covered by all rules.",\n  address =      "Antwerp, Belgium",\n  author =       "Moonen, Leon",\n  booktitle =    "Embedded Source Code Quality Control",\n  title =        "{Evaluating and Guiding the Use of Coding Standards to Reduce Software Faults (keynote)}",\n  year =         "2011",\n}\n\n
\n
\n\n\n
\n In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. Not only can enforcing compliance with rules that have little impact on the number of faults be considered a wasted effort, it can actually result in an increase in faults, as any modification has a non-zero probability of introducing a fault or triggering a previously concealed one. Therefore, it is important to build a body of empirical knowledge that helps us understand which rules are worthwhile enforcing, and which ones can be ignored in the context of fault reduction. In this talk, we reflect on our efforts to quantify and characterize the relation between rule violations and actual faults for the MISRA C 2004 standard on three industrial case studies. We found that 10 rules in the standard were significant predictors of fault location in these projects. We discuss how software and issue archives can be used to link standard violations to known bugs, and how this information can be used to address two practical issues in introducing or using a coding standard: which rules should be adhered to as they would have caught earlier issues, and how to prioritize violations. Our studies showed that such a history-based approach can help to select a subset of rules that reduce the number of locations to inspect by 64% to 95%, while still containing 64% to 86% of the real issues covered by all rules.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Building a Better Map: Wayfinding in Software Systems (keynote).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In International Conference on Program Comprehension (ICPC), pages xvi, June 2011. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2011:wayfinding,\n  abstract =     "Program understanding is one of the core activities in software engineering, and one of the main challenges in getting a grip on large industrial systems is finding appropriate representations that support the comprehension process. In this talk, we will investigate the benefits and challenges of using a map metaphor to help software engineers explore and understand software systems. We will analyze what factors influence the legibitility of a software map, i.e. what makes the information contained in a map easy to understand, interpret and remember. In addition, we will look at what has been done in city planning and architecture to make it easier for people find their way in unknown terrain, and reflect on opportunities for using these results in program comprehension research.",\n  author =       "Moonen, Leon",\n  booktitle =    "International Conference on Program Comprehension (ICPC)",\n  DOI =          "http://dx.doi.org/10.1109/ICPC.2011.58",\n  keywords =     "legibility,map metaphore,program comprehension,reverse engineering,software exploration,software visualisation",\n  month =        jun,\n  pages =        "xvi",\n  title =        "{Building a Better Map: Wayfinding in Software Systems (keynote)}",\n  year =         "2011",\n}\n\n
\n
\n\n\n
\n Program understanding is one of the core activities in software engineering, and one of the main challenges in getting a grip on large industrial systems is finding appropriate representations that support the comprehension process. In this talk, we will investigate the benefits and challenges of using a map metaphor to help software engineers explore and understand software systems. We will analyze what factors influence the legibitility of a software map, i.e. what makes the information contained in a map easy to understand, interpret and remember. In addition, we will look at what has been done in city planning and architecture to make it easier for people find their way in unknown terrain, and reflect on opportunities for using these results in program comprehension research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Crossing the Boundaries of Program Analysis in Heterogeneous Software Systems (report).\n \n \n \n\n\n \n Yazdanshenas, A. R.; and Moonen, L.\n\n\n \n\n\n\n Technical Report 2011-11, 2011.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{yazdanshenas:2011:boundaries:report,\n  annote =       "This technical report is an earlier version of a published conference paper. The published version can be found here:https://www.simula.no/publications/crossing-boundaries-while-analyzing-heterogeneous-component-based-software-systems",\n  author =       "Yazdanshenas, Amir Reza and Moonen, Leon",\n  booktitle =    "Technical Report",\n  number =       "2011-11",\n  publisher =    "Simula Research Laboratory",\n  title =        "{Crossing the Boundaries of Program Analysis in Heterogeneous Software Systems (report)}",\n  year =         "2011",\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Software Entropy in Agile Product Evolution.\n \n \n \n \n\n\n \n Hanssen, G. K.; Yamashita, A. F.; Conradi, R.; and Moonen, L.\n\n\n \n\n\n\n In Sprague, R. H, editor(s), Hawaii International Conference on System Sciences (HICSS), pages 1–10, January 2010. IEEE, IEEE\n \n\n\n\n
\n\n\n\n \n \n \"SoftwarePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{hanssen:2010:entropy,\n  abstract =     "As agile software development principles and methods are being adopted by large software product organizations it is important to understand the role of software entropy. That is, how the maintainability of a system may degrade over time due to continuous change. This may on one side affect the ability to act agile in planning and development. On the other side, an agile process may affect growth of entropy. We report from a case study of a successful software product line organization that has adopted the agile development method Evo, showing how agility and entropy are negatively related. We conclude this study by suggesting a two-step approach to manage entropy while maintaining process agility. First, the system needs to be restructured to establish a level of manageable entropy, and then, that the agile process must be complemented with continuous semi-automated quality monitoring and refactoring support.",\n  author =       "Hanssen, Geir Kjetil and Yamashita, Aiko Fallas and Conradi, Reidar and Moonen, Leon",\n  booktitle =    "Hawaii International Conference on System Sciences (HICSS)",\n  DOI =          "10.1109/HICSS.2010.344",\n  editor =       "Sprague, Ralph H",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Hanssen et al. - 2010 - Software Entropy in Agile Product Evolution - Hawaii International Conference on System Sciences (HICSS).pdf:pdf",\n  ISBN =         "9781-424-4550-9-6",\n  keywords =     "agile,software maintenance,software quality assurance",\n  month =        jan,\n  organization = "IEEE",\n  pages =        "1--10",\n  publisher =    "IEEE",\n  title =        "{Software Entropy in Agile Product Evolution}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5428534",\n  year =         "2010",\n}\n\n
\n
\n\n\n
\n As agile software development principles and methods are being adopted by large software product organizations it is important to understand the role of software entropy. That is, how the maintainability of a system may degrade over time due to continuous change. This may on one side affect the ability to act agile in planning and development. On the other side, an agile process may affect growth of entropy. We report from a case study of a successful software product line organization that has adopted the agile development method Evo, showing how agility and entropy are negatively related. We conclude this study by suggesting a two-step approach to manage entropy while maintaining process agility. First, the system needs to be restructured to establish a level of manageable entropy, and then, that the agile process must be complemented with continuous semi-automated quality monitoring and refactoring support.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n An Integrated Crosscutting Concern Migration Strategy and Its Semi-Automated Application to JHotDraw.\n \n \n \n \n\n\n \n Marin, M.; van Deursen, A.; Moonen, L.; and van der Rijst, R.\n\n\n \n\n\n\n Automated Software Engineering, 16(2): 323–356. June 2009.\n \n\n\n\n
\n\n\n\n \n \n \"AnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{marin:2009:integrated,\n  abstract =     "In this paper we propose a systematic strategy for migrating crosscutting concerns in existing object-oriented systems to aspect-oriented programming solutions. The proposed strategy consists of four steps: mining, exploration, documentation and refactoring of crosscutting concerns. We discuss in detail a new approach to refactoring to aspect-oriented programming that is fully integrated with our strategy, and apply the whole strategy to an object-oriented system, namely the JHotDraw framework. Moreover, we present a method to semi-automatically perform the aspect-introducing refactorings based on identified crosscutting concern sorts which is supported by a prototype tool called SAIR. We perform an exploratory case study in which we apply this tool on the same object-oriented system and compare its results with the results of manual migration in order to assess the feasibility of automated aspect refactoring. Both the refactoring tool SAIR and the results of the manual migration are made available as open-source, the latter providing the largest aspect-introducing refactoring available to date. We report on our experiences with conducting both case studies and reflect on the success and challenges of the migration process.",\n  annote =       "From Duplicate 2 (An Integrated Crosscutting Concern Migration Strategy and Its Semi-Automated Application to JHotDraw - Marin, Marius; van Deursen, Arie; Moonen, Leon; van der Rijst, Robin) DOI: 10.1007/s10515-009-0051-2",\n  author =       "Marin, Marius and van Deursen, Arie and Moonen, Leon and van der Rijst, Robin",\n  DOI =          "10.1007/s10515-009-0051-2",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Marin et al. - 2009 - An Integrated Crosscutting Concern Migration Strategy and Its Semi-Automated Application to JHotDraw - Automated S.pdf:pdf",\n  ISSN =         "0928-8910",\n  journal =      "Automated Software Engineering",\n  keywords =     "aspect-oriented programming,code refactoring,concern modeling,cross-cutting concerns,program analysis,reverse engineering",\n  month =        jun,\n  number =       "2",\n  pages =        "323--356",\n  publisher =    "Springer",\n  title =        "{An Integrated Crosscutting Concern Migration Strategy and Its Semi-Automated Application to JHotDraw}",\n  URL =          "http://link.springer.com/10.1007/s10515-009-0051-2",\n  volume =       "16",\n  year =         "2009",\n}\n\n
\n
\n\n\n
\n In this paper we propose a systematic strategy for migrating crosscutting concerns in existing object-oriented systems to aspect-oriented programming solutions. The proposed strategy consists of four steps: mining, exploration, documentation and refactoring of crosscutting concerns. We discuss in detail a new approach to refactoring to aspect-oriented programming that is fully integrated with our strategy, and apply the whole strategy to an object-oriented system, namely the JHotDraw framework. Moreover, we present a method to semi-automatically perform the aspect-introducing refactorings based on identified crosscutting concern sorts which is supported by a prototype tool called SAIR. We perform an exploratory case study in which we apply this tool on the same object-oriented system and compare its results with the results of manual migration in order to assess the feasibility of automated aspect refactoring. Both the refactoring tool SAIR and the results of the manual migration are made available as open-source, the latter providing the largest aspect-introducing refactoring available to date. We report on our experiences with conducting both case studies and reflect on the success and challenges of the migration process.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Evaluating the Relation Between Coding Standard Violations and Faults Within and Across Software Versions.\n \n \n \n \n\n\n \n Boogerd, C.; and Moonen, L.\n\n\n \n\n\n\n In International Working Conference on Mining Software Repositories (MSR), pages 41–50, May 2009. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"EvaluatingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{boogerd:2009:evaluating,\n  abstract =     "In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. In previous work, we performed a pilot study to assess the relation between rule violations and actual faults, using the MISRA C 2004 standard on an industrial case. In this paper, we investigate three different aspects of the relation between violations and faults on a larger case study, and compare the results across the two projects. We find that 10 rules in the standard are significant predictors of fault location.",\n  author =       "Boogerd, Cathal and Moonen, Leon",\n  booktitle =    "International Working Conference on Mining Software Repositories (MSR)",\n  DOI =          "10.1109/MSR.2009.5069479",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Boogerd, Moonen - 2009 - Evaluating the Relation Between Coding Standard Violations and Faults Within and Across Software Versions - Int.pdf:pdf",\n  ISBN =         "9781-424-4349-3-0",\n  keywords =     "coding standards,software faults,software inspection,software quality assurance",\n  month =        may,\n  pages =        "41--50",\n  publisher =    "IEEE",\n  title =        "{Evaluating the Relation Between Coding Standard Violations and Faults Within and Across Software Versions}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=5069479",\n  year =         "2009",\n}\n\n
\n
\n\n\n
\n In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. In previous work, we performed a pilot study to assess the relation between rule violations and actual faults, using the MISRA C 2004 standard on an industrial case. In this paper, we investigate three different aspects of the relation between violations and faults on a larger case study, and compare the results across the two projects. We find that 10 rules in the standard are significant predictors of fault location.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Maintenance and agile development: challenges, opportunities and future directions.\n \n \n \n\n\n \n Hanssen, G. K.; Yamashita, A.; Conradi, R.; and Moonen, L.\n\n\n \n\n\n\n In Beyer, D., editor(s), International Conference on Software Maintenance (ICSM), March 2009. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.629,\n  abstract =     "Software entropy is a phenomenon where repeated changes gradually degrade the structure of the system, making it hard to understand and maintain. This phenomenon imposes challenges for organizations that have moved to agile methods from other processes, despite agile's focus on adaptability and responsiveness to change. We have investigated this issue through an industrial case study, and reviewed the literature on addressing software entropy, focussing on the detection of ``code smells'' and their treatment by refactoring. We found that in order to remain agile despite of software entropy, developers need better support for understanding, planning and testing the impact of changes. However, it is exactly work on refactoring decision support and task complexity analysis that is lacking in literature. Based on our findings, we discuss strategies for dealing with entropy in this context and present avenues for future research.",\n  author =       "Hanssen, Geir Kjetil and Yamashita, Aiko and Conradi, Reidar and Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  editor =       "Beyer, Dirk",\n  ISBN =         "9781-424-4489-7-5",\n  month =        mar,\n  publisher =    "IEEE",\n  title =        "{Maintenance and agile development: challenges, opportunities and future directions}",\n  year =         "2009",\n}\n\n
\n
\n\n\n
\n Software entropy is a phenomenon where repeated changes gradually degrade the structure of the system, making it hard to understand and maintain. This phenomenon imposes challenges for organizations that have moved to agile methods from other processes, despite agile's focus on adaptability and responsiveness to change. We have investigated this issue through an industrial case study, and reviewed the literature on addressing software entropy, focussing on the detection of ``code smells'' and their treatment by refactoring. We found that in order to remain agile despite of software entropy, developers need better support for understanding, planning and testing the impact of changes. However, it is exactly work on refactoring decision support and task complexity analysis that is lacking in literature. Based on our findings, we discuss strategies for dealing with entropy in this context and present avenues for future research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Systematic Survey of Program Comprehension through Dynamic Analysis.\n \n \n \n\n\n \n Cornelissen, B.; Zaidman, A.; van Deursen, A.; Moonen, L.; and Koschke, R.\n\n\n \n\n\n\n IEEE Transactions on Software Engineering, 35(5): 684–702. 2009.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{cornelissen:2009:slr,\n  abstract =     "Program comprehension is an important activity in software maintenance, as software must be sufficiently understood before it can be properly modified. The study of a program's execution, known as dynamic analysis, has become a common technique in this respect and has received substantial attention from the research community, particularly over the last decade. These efforts have resulted in a large research body of which currently there exists no comprehensive overview. This paper reports on a systematic literature survey aimed at the identification and structuring of research on program comprehension through dynamic analysis. From a research body consisting of 4,795 articles published in 14 relevant venues between July 1999 and June 2008 and the references therein, we have systematically selected 172 articles and characterized them in terms of four main facets: activity, target, method, and evaluation. The resulting overview offers insight in what constitutes the main contributions of the field, supports the task of identifying gaps and opportunities, and has motivated our discussion of several important research directions that merit additional consideration in the near future.",\n  author =       "Cornelissen, Bas and Zaidman, Andy and van Deursen, Arie and Moonen, Leon and Koschke, Rainer",\n  DOI =          "10.1109/TSE.2009.28",\n  journal =      "IEEE Transactions on Software Engineering",\n  number =       "5",\n  pages =        "684--702",\n  title =        "{A Systematic Survey of Program Comprehension through Dynamic Analysis}",\n  volume =       "35",\n  year =         "2009",\n}\n\n
\n
\n\n\n
\n Program comprehension is an important activity in software maintenance, as software must be sufficiently understood before it can be properly modified. The study of a program's execution, known as dynamic analysis, has become a common technique in this respect and has received substantial attention from the research community, particularly over the last decade. These efforts have resulted in a large research body of which currently there exists no comprehensive overview. This paper reports on a systematic literature survey aimed at the identification and structuring of research on program comprehension through dynamic analysis. From a research body consisting of 4,795 articles published in 14 relevant venues between July 1999 and June 2008 and the references therein, we have systematically selected 172 articles and characterized them in terms of four main facets: activity, target, method, and evaluation. The resulting overview offers insight in what constitutes the main contributions of the field, supports the task of identifying gaps and opportunities, and has motivated our discussion of several important research directions that merit additional consideration in the near future.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Using Concept Mapping for Maintainability Assessments.\n \n \n \n\n\n \n Yamashita, A.; Anda, B. C. D.; Sjøberg, D.; Benestad, H. C.; Arnstad, P. E.; and Moonen, L.\n\n\n \n\n\n\n In Miller, J.; and Selby, R., editor(s), International Symposium on Empirical Software Engineering and Measurement (ESEM), October 2009. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.617,\n  abstract =     "Many important phenomena within software engineering are difficult to define and measure. A good example is software maintainability, which has been the subject of considerable research and is believed to be a critical determinant of total software costs. Yet, there is no common agreement on how to describe and measure software maintainability in a concrete setting. We propose using concept mapping, a well-grounded method used in social research, to operationalize this concept according to a given goal and perspective. We apply this method to describe four systems that were developed as part of an industrial multiple-case study. The outcome is a conceptual map that displays an arrangement of maintainability constructs, their interrelations and corresponding measures. Our experience is that concept mapping (1) provides a structured way of combining static code analysis and expert judgment; (2) helps tailoring the choice of measures to a particular system context; and (3) supports the mapping between software measures and aspects of software maintainability. As such, it represents a strong addition to existing frameworks for evaluating quality such as ISO/IEC 9126 and GQM, and tools for static measurement of software code. Overall, we find that concept mapping provides a systematic, structured and repeatable method for developing constructs and measures of the phenomenon of interest, and we deem it useful for defining constructs and measures of other aspects of software engineering, in addition to maintainability.",\n  author =       "Yamashita, Aiko and Anda, Bente Cecilie Dahlum and Sj{\\o}berg, Dag and Benestad, Hans Christian and Arnstad, Per Einar and Moonen, Leon",\n  booktitle =    "International Symposium on Empirical Software Engineering and Measurement (ESEM)",\n  editor =       "Miller, James and Selby, Rick",\n  ISBN =         "10.1109/ESEM.2009.5316044",\n  month =        oct,\n  publisher =    "IEEE",\n  title =        "{Using Concept Mapping for Maintainability Assessments}",\n  year =         "2009",\n}\n\n
\n
\n\n\n
\n Many important phenomena within software engineering are difficult to define and measure. A good example is software maintainability, which has been the subject of considerable research and is believed to be a critical determinant of total software costs. Yet, there is no common agreement on how to describe and measure software maintainability in a concrete setting. We propose using concept mapping, a well-grounded method used in social research, to operationalize this concept according to a given goal and perspective. We apply this method to describe four systems that were developed as part of an industrial multiple-case study. The outcome is a conceptual map that displays an arrangement of maintainability constructs, their interrelations and corresponding measures. Our experience is that concept mapping (1) provides a structured way of combining static code analysis and expert judgment; (2) helps tailoring the choice of measures to a particular system context; and (3) supports the mapping between software measures and aspects of software maintainability. As such, it represents a strong addition to existing frameworks for evaluating quality such as ISO/IEC 9126 and GQM, and tools for static measurement of software code. Overall, we find that concept mapping provides a systematic, structured and repeatable method for developing constructs and measures of the phenomenon of interest, and we deem it useful for defining constructs and measures of other aspects of software engineering, in addition to maintainability.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Using software history to guide deployment of coding standards.\n \n \n \n\n\n \n Boogerd, C.; and Moonen, L.\n\n\n \n\n\n\n In Mathijssen, R, editor(s), Trader: Reliability of high-volume consumer products, 4, pages 39–52. Embedded Systems Institute, Eindhoven, the Netherlands, 2009.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Simula.SE.683,\n  abstract =     "In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. Therefore, we propose to use information from software and issue archives to link standard violations to known bugs. In this chapter we introduce such an approach and apply it to three industrial case studies. Furthermore, we discuss how to use the historical data to address two practical issues in using a coding standard: which rules to adhere to, and how to rank violations of those rules.",\n  address =      "Eindhoven, the Netherlands",\n  author =       "Boogerd, Cathal and Moonen, Leon",\n  booktitle =    "Trader: Reliability of high-volume consumer products",\n  chapter =      "4",\n  editor =       "Mathijssen, R",\n  ISBN =         "978-907-867-9-04-2",\n  pages =        "39--52",\n  publisher =    "Embedded Systems Institute",\n  title =        "{Using software history to guide deployment of coding standards}",\n  year =         "2009",\n}\n\n
\n
\n\n\n
\n In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. Therefore, we propose to use information from software and issue archives to link standard violations to known bugs. In this chapter we introduce such an approach and apply it to three industrial case studies. Furthermore, we discuss how to use the historical data to address two practical issues in using a coding standard: which rules to adhere to, and how to rank violations of those rules.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n On the Use of Data Flow Analysis in Static Profiling.\n \n \n \n \n\n\n \n Boogerd, C.; and Moonen, L.\n\n\n \n\n\n\n In Cordy, J.; and Zhang, L., editor(s), International Working Conference on Source Code Analysis and Manipulation (SCAM), pages 79–88, September 2008. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"OnPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{boogerd:2008:dfa,\n  abstract =     "Static profiling is a technique that produces estimates of execution likelihoods or frequencies based on source code analysis only. It is frequently used in determining cost/benefit ratios for certain compiler optimizations. In previous work,we introduced a simple algorithm to compute execution likelihoods,based on a control flow graph and heuristic branch prediction. In this paper we examine the benefits of using more involved analysis techniques in such a static profiler. In particular, we explore the use of value range propagation to improve the accuracy of the estimates, and we investigate the differences in estimating execution likelihoods and frequencies.",\n  author =       "Boogerd, Cathal and Moonen, Leon",\n  booktitle =    "International Working Conference on Source Code Analysis and Manipulation (SCAM)",\n  DOI =          "10.1109/SCAM.2008.18",\n  editor =       "Cordy, James and Zhang, Lu",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Boogerd, Moonen - 2008 - On the Use of Data Flow Analysis in Static Profiling - International Working Conference on Source Code Analysis.pdf:pdf",\n  ISBN =         "9780-7695-335-3-7",\n  keywords =     "cost-benefit analysis,data flow analysis,static analysis,static profiling,value range propagation",\n  month =        sep,\n  pages =        "79--88",\n  publisher =    "IEEE",\n  title =        "{On the Use of Data Flow Analysis in Static Profiling}",\n  type =         "Conference",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4637541",\n  year =         "2008",\n}\n\n
\n
\n\n\n
\n Static profiling is a technique that produces estimates of execution likelihoods or frequencies based on source code analysis only. It is frequently used in determining cost/benefit ratios for certain compiler optimizations. In previous work,we introduced a simple algorithm to compute execution likelihoods,based on a control flow graph and heuristic branch prediction. In this paper we examine the benefits of using more involved analysis techniques in such a static profiler. In particular, we explore the use of value range propagation to improve the accuracy of the estimates, and we investigate the differences in estimating execution likelihoods and frequencies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An assessment methodology for trace reduction techniques.\n \n \n \n\n\n \n Cornelissen, B.; Moonen, L.; and Zaidman, A.\n\n\n \n\n\n\n In Mei, H.; and Wong, K., editor(s), International Conference on Software Maintenance (ICSM), pages 107–116, October 2008. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{cornelissen:2008:assessment,\n  abstract =     "Program comprehension is an important concern in software maintenance because these tasks generally require a degree of knowledge of the system at hand. While the use of dynamic analysis in this process has become increasingly popular, the literature indicates that dealing with the huge amounts of dynamic information remains a formidable challenge. Although various trace reduction techniques have been proposed to address these scalability concerns, their applicability in different contexts often remains unclear because extensive comparisons are lacking. This makes it difficult for end-users to determine which reduction types are best suited for a certain analysis task. In this paper, we propose an assessment methodology for the evaluation and comparison of trace reduction techniques. We illustrate the methodology using a selection of four types of reduction methods found in literature, which we evaluate and compare using a test set of seven large execution traces. Our approach enables a systematic assessment of trace reduction techniques, which eases the selection of suitable reductions in different settings, and allows for a more effective use of dynamic analysis tools in software maintenance.",\n  author =       "Cornelissen, Bas and Moonen, Leon and Zaidman, Andy",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  editor =       "Mei, Hong and Wong, Kenny",\n  ISBN =         "9781-424-4261-3-3",\n  month =        oct,\n  pages =        "107--116",\n  publisher =    "IEEE",\n  title =        "{An assessment methodology for trace reduction techniques}",\n  year =         "2008",\n}\n\n
\n
\n\n\n
\n Program comprehension is an important concern in software maintenance because these tasks generally require a degree of knowledge of the system at hand. While the use of dynamic analysis in this process has become increasingly popular, the literature indicates that dealing with the huge amounts of dynamic information remains a formidable challenge. Although various trace reduction techniques have been proposed to address these scalability concerns, their applicability in different contexts often remains unclear because extensive comparisons are lacking. This makes it difficult for end-users to determine which reduction types are best suited for a certain analysis task. In this paper, we propose an assessment methodology for the evaluation and comparison of trace reduction techniques. We illustrate the methodology using a selection of four types of reduction methods found in literature, which we evaluate and compare using a test set of seven large execution traces. Our approach enables a systematic assessment of trace reduction techniques, which eases the selection of suitable reductions in different settings, and allows for a more effective use of dynamic analysis tools in software maintenance.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On the Interplay Between Software Testing and Evolution – and its Effect on Program Comprehension.\n \n \n \n\n\n \n Moonen, L.; van Deursen, A.; Zaidman, A.; and Bruntink, M.\n\n\n \n\n\n\n In Mens, T.; and Demeyer, S., editor(s), Software Evolution, 8, pages 173–202. Springer, 2008.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Simula.SE.437,\n  author =       "Moonen, Leon and van Deursen, Arie and Zaidman, Andy and Bruntink, Machiel",\n  booktitle =    "Software Evolution",\n  chapter =      "8",\n  editor =       "Mens, Tom and Demeyer, Serge",\n  ISBN =         "9783-540-7643-9-7",\n  pages =        "173--202",\n  publisher =    "Springer",\n  title =        "{On the Interplay Between Software Testing and Evolution -- and its Effect on Program Comprehension}",\n  year =         "2008",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Tools for software maintenance, visualization, and reverse engineering - 2nd International Workshop on Advanced Software Development Tools and Techniques (WASDeTT).\n \n \n \n\n\n \n Kienle, H. M; Moonen, L.; Godfrey, M. W; and Müller, H. A\n\n\n \n\n\n\n In Mei, H.; and Wong, K., editor(s), International Conference on Software Maintenance (ICSM), pages 408–409, September 2008. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.335,\n  abstract =     "The objective of the 2nd International Workshop on Advanced Software Development Tools and Techniques (WASDeTT) is to provide interested researchers with a forum to share their tool building experiences and to explore how tools can be built more effectively and efficiently. This workshop specifically focuses on tools for software maintenance and comprehension and addresses issues such as tool-building in an industrial context, component-based tool building, and tool building in teams.",\n  author =       "Kienle, Holger M and Moonen, Leon and Godfrey, Michael W and M{\\"{u}}ller, Hausi A",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  editor =       "Mei, Hong and Wong, Kenny",\n  ISBN =         "9781-424-4261-3-3",\n  month =        sep,\n  pages =        "408--409",\n  publisher =    "IEEE",\n  title =        "{Tools for software maintenance, visualization, and reverse engineering - 2nd International Workshop on Advanced Software Development Tools and Techniques (WASDeTT)}",\n  year =         "2008",\n}\n\n
\n
\n\n\n
\n The objective of the 2nd International Workshop on Advanced Software Development Tools and Techniques (WASDeTT) is to provide interested researchers with a forum to share their tool building experiences and to explore how tools can be built more effectively and efficiently. This workshop specifically focuses on tools for software maintenance and comprehension and addresses issues such as tool-building in an industrial context, component-based tool building, and tool building in teams.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Assessing the value of coding standards: An empirical study.\n \n \n \n\n\n \n Boogerd, C.; and Moonen, L.\n\n\n \n\n\n\n In Mei, H.; and Wong, K., editor(s), International Conference on Software Maintenance (ICSM), pages 277–286, October 2008. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{boogerd:2008:assessing,\n  abstract =     "In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. Not only can compliance with a set of rules having little impact on the number of faults be considered wasted effort, but it can actually result in an increase in faults, as any modification has a non-zero probability of introducing a fault or triggering a previously concealed one. Therefore, it is important to build a body of empirical knowledge, helping us understand which rules are worthwhile enforcing, and which ones should be ignored in the context of fault reduction. In this paper, we describe two approaches to quantify the relation between rule violations and actual faults, and present empirical data on this relation for the MISRA C 2004 standard on an industrial case study.",\n  author =       "Boogerd, Cathal and Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  editor =       "Mei, Hong and Wong, Kenny",\n  ISBN =         "9781-424-4261-3-3",\n  month =        oct,\n  pages =        "277--286",\n  publisher =    "IEEE",\n  title =        "{Assessing the value of coding standards: An empirical study}",\n  year =         "2008",\n}\n\n
\n
\n\n\n
\n In spite of the widespread use of coding standards and tools enforcing their rules, there is little empirical evidence supporting the intuition that they prevent the introduction of faults in software. Not only can compliance with a set of rules having little impact on the number of faults be considered wasted effort, but it can actually result in an increase in faults, as any modification has a non-zero probability of introducing a fault or triggering a previously concealed one. Therefore, it is important to build a body of empirical knowledge, helping us understand which rules are worthwhile enforcing, and which ones should be ignored in the context of fault reduction. In this paper, we describe two approaches to quantify the relation between rule violations and actual faults, and present empirical data on this relation for the MISRA C 2004 standard on an industrial case study.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Execution Trace Analysis Through Massive Sequence and Circular Bundle Views to Support Program Comprehension.\n \n \n \n\n\n \n Cornelissen, B.; Holten, D.; Zaidman, A.; Moonen, L.; van Wijk, J J; and van Deursen, A.\n\n\n \n\n\n\n Journal of Systems and Software, 81(12). December 2008.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{Cornelissen2008,\n  abstract =     "An important part of many software maintenance tasks is to gain a sufficient level of understanding of the system at hand. The use of dynamic information to aid in this software understanding process is a common practice nowadays. A major issue in this context is scalability: due to the vast amounts of information, it is a very difficult task to successfully navigate through the dynamic data contained in execution traces without getting lost. In this paper, we propose the use of two novel trace visualization techniques based on the massive sequence and circular bundle view, which both reflect a strong emphasis on scalability. These techniques have been implemented in a tool called Extravis. By means of distinct usage scenarios that were conducted on three different software systems, we show how our approach is applicable in three typical program comprehension tasks: trace exploration, feature location, and top-down analysis with domain knowledge.",\n  author =       "Cornelissen, Bas and Holten, Danny and Zaidman, Andy and Moonen, Leon and van Wijk, J J and van Deursen, Arie",\n  journal =      "Journal of Systems and Software",\n  month =        dec,\n  number =       "12",\n  title =        "{Execution Trace Analysis Through Massive Sequence and Circular Bundle Views to Support Program Comprehension}",\n  volume =       "81",\n  year =         "2008",\n}\n\n
\n
\n\n\n
\n An important part of many software maintenance tasks is to gain a sufficient level of understanding of the system at hand. The use of dynamic information to aid in this software understanding process is a common practice nowadays. A major issue in this context is scalability: due to the vast amounts of information, it is a very difficult task to successfully navigate through the dynamic data contained in execution traces without getting lost. In this paper, we propose the use of two novel trace visualization techniques based on the massive sequence and circular bundle view, which both reflect a strong emphasis on scalability. These techniques have been implemented in a tool called Extravis. By means of distinct usage scenarios that were conducted on three different software systems, we show how our approach is applicable in three typical program comprehension tasks: trace exploration, feature location, and top-down analysis with domain knowledge.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dealing with Crosscutting Concerns in Existing Software (keynote).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Hausi Müller, S. T.; and Wong, K., editor(s), International Conference on Software Maintenance - Frontiers of Software Maintenance (ICSM/FoSM), pages 68–77, 2008. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.337,\n  abstract =     "This paper provides a roadmap for dealing with crosscutting concerns while trying to understand, maintain, and evolve existing software systems. We describe an integrated, systematic, approach that helps a software engineer with identifying, documenting and migrating crosscutting concerns in the source code of a software system, and discuss the integration considerations. We conclude with a number of lessons learned and directions for future research.",\n  author =       "Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance - Frontiers of Software Maintenance (ICSM/FoSM)",\n  editor =       "{Hausi M{\\"{u}}ller}, Scott Tilley and Wong, Kenny",\n  ISBN =         "9781-424-4265-4-6",\n  pages =        "68--77",\n  publisher =    "IEEE",\n  title =        "{Dealing with Crosscutting Concerns in Existing Software (keynote)}",\n  year =         "2008",\n}\n\n
\n
\n\n\n
\n This paper provides a roadmap for dealing with crosscutting concerns while trying to understand, maintain, and evolve existing software systems. We describe an integrated, systematic, approach that helps a software engineer with identifying, documenting and migrating crosscutting concerns in the source code of a software system, and discuss the integration considerations. We conclude with a number of lessons learned and directions for future research.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Identifying crosscutting concerns using fan-in analysis.\n \n \n \n\n\n \n Marin, M.; van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n ACM Transactions on Software Engineering and Methodology, 17(1): 1–37. January 2008.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{mdm08.tosem,\n  abstract =     "Aspect mining is a reverse engineering process that aims at finding crosscutting concerns in existing systems. This paper proposes an aspect mining approach based on determining methods that are called from many different places, and hence have a high fan-in, which can be seen as a symptom of crosscutting functionality. The approach is semi-automatic, and consists of three steps: metric calculation, method filtering, and call site analysis. Carrying out these steps is an interactive process supported by an Eclipse plug-in called FINT. Fan-in analysis has been applied to three open source Java systems, totaling around 200,000 lines of code. The most interesting concerns identified are discussed in detail, which includes several concerns not previously discussed in the aspect-oriented literature. The results show that a significant number of crosscutting concerns can be recognized using fan-in analysis, and each of the three steps can be supported by tools.",\n  author =       "Marin, Marius and van Deursen, Arie and Moonen, Leon",\n  journal =      "ACM Transactions on Software Engineering and Methodology",\n  month =        jan,\n  number =       "1",\n  pages =        "1--37",\n  title =        "{Identifying crosscutting concerns using fan-in analysis}",\n  volume =       "17",\n  year =         "2008",\n}\n\n
\n
\n\n\n
\n Aspect mining is a reverse engineering process that aims at finding crosscutting concerns in existing systems. This paper proposes an aspect mining approach based on determining methods that are called from many different places, and hence have a high fan-in, which can be seen as a symptom of crosscutting functionality. The approach is semi-automatic, and consists of three steps: metric calculation, method filtering, and call site analysis. Carrying out these steps is an interactive process supported by an Eclipse plug-in called FINT. Fan-in analysis has been applied to three open source Java systems, totaling around 200,000 lines of code. The most interesting concerns identified are discussed in detail, which includes several concerns not previously discussed in the aspect-oriented literature. The results show that a significant number of crosscutting concerns can be recognized using fan-in analysis, and each of the three steps can be supported by tools.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Special Issue on Source Code Analysis and Manipulation.\n \n \n \n\n\n \n Di Penta, M.; and Moonen, L.\n\n\n \n\n\n\n Journal on Software Maintenance and Evolution: Research and Practice, 19(-): 203–204. July 2007.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{scam06specialissue,\n  author =       "{Di Penta}, Massimiliano and Moonen, Leon",\n  journal =      "Journal on Software Maintenance and Evolution: Research and Practice",\n  month =        jul,\n  number =       "-",\n  pages =        "203--204",\n  title =        "{Special Issue on Source Code Analysis and Manipulation}",\n  volume =       "19",\n  year =         "2007",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n SoQueT: Query-Based Documentation of Crosscutting Concerns.\n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In International Conference on Software Engineering (ICSE), pages 758–761, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.461,\n  abstract =     "Understanding crosscutting concerns is difficult because their underlying relations remain hidden in a class-based decomposition of a system. Based on an extensive investigation of crosscutting concerns in existing systems and literature, we identified a number of typical implementation idioms and relations that allow us to group such concerns around so-called ``sorts''. In this paper, we present SoQuet, a tool that uses sorts to support the consistent description and documentation of crosscutting relations using pre-defined, sort-specific query templates.",\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "International Conference on Software Engineering (ICSE)",\n  pages =        "758--761",\n  publisher =    "IEEE",\n  title =        "{SoQueT: Query-Based Documentation of Crosscutting Concerns}",\n  year =         "2007",\n}\n\n
\n
\n\n\n
\n Understanding crosscutting concerns is difficult because their underlying relations remain hidden in a class-based decomposition of a system. Based on an extensive investigation of crosscutting concerns in existing systems and literature, we identified a number of typical implementation idioms and relations that allow us to group such concerns around so-called ``sorts''. In this paper, we present SoQuet, a tool that uses sorts to support the consistent description and documentation of crosscutting relations using pre-defined, sort-specific query templates.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Documenting Typical Crosscutting Concerns.\n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In Penta, M. D.; and Maletic, J. I, editor(s), Working Conference on Reverse Engineering (WCRE), pages 31–40, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.453,\n  abstract =     "Our analysis of crosscutting concerns in real-life software systems (totaling over 500,000 LOC) and in reports from literature indicated a number of properties that allow for their decomposition in primitive building blocks which are atomic crosscutting concerns. We classify these blocks in crosscutting concern sorts, and we use them to describe the crosscutting structure of many (well-known) designs and common mechanisms in software systems. In this paper, we formalize the notion of crosscutting concern sorts by means of relational queries over (object-oriented) source models. Based on these queries, we present a concern management tool called SOQUET, which can be used to document the occurrences of crosscutting concerns in object-oriented systems. We assess the sorts-based approach by using the tool to cover various crosscutting concerns in two open-source systems: JHOTDRAW and Java PETSTORE.",\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  editor =       "Penta, Masasimiliano Di and Maletic, Jonathan I",\n  pages =        "31--40",\n  publisher =    "IEEE",\n  title =        "{Documenting Typical Crosscutting Concerns}",\n  year =         "2007",\n}\n\n
\n
\n\n\n
\n Our analysis of crosscutting concerns in real-life software systems (totaling over 500,000 LOC) and in reports from literature indicated a number of properties that allow for their decomposition in primitive building blocks which are atomic crosscutting concerns. We classify these blocks in crosscutting concern sorts, and we use them to describe the crosscutting structure of many (well-known) designs and common mechanisms in software systems. In this paper, we formalize the notion of crosscutting concern sorts by means of relational queries over (object-oriented) source models. Based on these queries, we present a concern management tool called SOQUET, which can be used to document the occurrences of crosscutting concerns in object-oriented systems. We assess the sorts-based approach by using the tool to cover various crosscutting concerns in two open-source systems: JHOTDRAW and Java PETSTORE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Overview of the 3rd International Workshop on Code Based Software Security Assessments (CoBaSSA).\n \n \n \n\n\n \n Moonen, L.; and Mancoridis, S.\n\n\n \n\n\n\n In Penta, M. D.; and Maletic, J. I, editor(s), Working Conference on Reverse Engineering (WCRE), pages 300–302, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{cobassa07b,\n  author =       "Moonen, Leon and Mancoridis, Spiros",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  editor =       "Penta, Masasimiliano Di and Maletic, Jonathan I",\n  pages =        "300--302",\n  publisher =    "IEEE",\n  title =        "{Overview of the 3rd International Workshop on Code Based Software Security Assessments (CoBaSSA)}",\n  year =         "2007",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Visualizing Testsuites to Aid in Software Understanding.\n \n \n \n\n\n \n Cornelissen, B.; van Deursen, A.; Moonen, L.; and Zaidman, A.\n\n\n \n\n\n\n In Conference on Software Maintenance and Reengineering (CSMR), 2007. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.465,\n  author =       "Cornelissen, Bas and van Deursen, Arie and Moonen, Leon and Zaidman, Andy",\n  booktitle =    "Conference on Software Maintenance and Reengineering (CSMR)",\n  publisher =    "IEEE",\n  title =        "{Visualizing Testsuites to Aid in Software Understanding}",\n  year =         "2007",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Integrated Crosscutting Concern Migration Strategy and its Application to JHotDraw.\n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In Korel, B.; and Godfrey, M. W, editor(s), International Working Conference on Source Code Analysis and Manipulation (SCAM), pages 101–110, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{marin:2007:integrated,\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "International Working Conference on Source Code Analysis and Manipulation (SCAM)",\n  editor =       "Korel, Bogdan and Godfrey, Michael W",\n  ISBN =         "0-7695-3034-6",\n  pages =        "101--110",\n  publisher =    "IEEE",\n  title =        "{An Integrated Crosscutting Concern Migration Strategy and its Application to JHotDraw}",\n  year =         "2007",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exploring Similarities in Execution Traces.\n \n \n \n\n\n \n Cornelissen, B.; and Moonen, L.\n\n\n \n\n\n\n In Greevy, O.; Hamou-Lhadj, A.; and Zaidman, A., editor(s), Workshop on Program Comprehension through Dynamic Analysis (PCODA), 2007. Delft University of Technology\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.451,\n  author =       "Cornelissen, Bas and Moonen, Leon",\n  booktitle =    "Workshop on Program Comprehension through Dynamic Analysis (PCODA)",\n  editor =       "Greevy, Orla and Hamou-Lhadj, Abdelwahab and Zaidman, Andy",\n  publisher =    "Delft University of Technology",\n  title =        "{Exploring Similarities in Execution Traces}",\n  year =         "2007",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings 3rd International Workshop on Code Based Software Security Assessments (CoBaSSA).\n \n \n \n\n\n \n Moonen, L.; and Mancoridis, S.,\n editors.\n \n\n\n \n\n\n\n Delft University of Technology, Software Engineering Research Group, 2007.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{cobassa07a,\n  editor =       "Moonen, Leon and Mancoridis, Spiros",\n  ISBN =         "1872-5392",\n  publisher =    "Delft University of Technology, Software Engineering Research Group",\n  title =        "{Proceedings 3rd International Workshop on Code Based Software Security Assessments (CoBaSSA)}",\n  year =         "2007",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n SoQueT: Query-Based Documentation of Crosscutting Concerns.\n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In International Conference on Aspect-Oriented Software Development (AOSD), 2007. ACM\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.463,\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "International Conference on Aspect-Oriented Software Development (AOSD)",\n  publisher =    "ACM",\n  title =        "{SoQueT: Query-Based Documentation of Crosscutting Concerns}",\n  year =         "2007",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Understanding Execution Traces Using Massive Sequence and Circular Bundle Views.\n \n \n \n\n\n \n Cornelissen, B.; Holten, D.; Zaidman, A.; Moonen, L.; van Wijk, J J; and van Deursen, A.\n\n\n \n\n\n\n In International Conference on Program Comprehension (ICPC), pages 49–58, 2007. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.459,\n  abstract =     "The use of dynamic information to aid in software understanding is a common practice nowadays. One of the many approaches concerns the comprehension of execution traces. A major issue in this context is scalability: due to the vast amounts of information, it is a very difficult task to successfully find your way through such traces without getting lost. In this paper, we propose the use of a novel trace visu- alization method based on a massive sequence and circular bundle view, constructed with scalability in mind. By means of three usage scenarios that were conducted on three dif- ferent software systems, we show how our approach, imple- mented in a tool called EXTRAVIS, is applicable to the areas of trace exploration, feature location, and feature comprehension.",\n  author =       "Cornelissen, Bas and Holten, Danny and Zaidman, Andy and Moonen, Leon and van Wijk, J J and van Deursen, Arie",\n  booktitle =    "International Conference on Program Comprehension (ICPC)",\n  ISBN =         "0-7695-2860-0",\n  pages =        "49--58",\n  title =        "{Understanding Execution Traces Using Massive Sequence and Circular Bundle Views}",\n  year =         "2007",\n}\n\n
\n
\n\n\n
\n The use of dynamic information to aid in software understanding is a common practice nowadays. One of the many approaches concerns the comprehension of execution traces. A major issue in this context is scalability: due to the vast amounts of information, it is a very difficult task to successfully find your way through such traces without getting lost. In this paper, we propose the use of a novel trace visu- alization method based on a massive sequence and circular bundle view, constructed with scalability in mind. By means of three usage scenarios that were conducted on three dif- ferent software systems, we show how our approach, imple- mented in a tool called EXTRAVIS, is applicable to the areas of trace exploration, feature location, and feature comprehension.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (14)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Overview of the Second International Workshop on Code Based Software Security Assessments (CoBaSSA 2006).\n \n \n \n\n\n \n Moonen, L.; and Mancoridis, S.\n\n\n \n\n\n\n In Proceedings 13th IEEE Working Conference on Reverse Engineering (WCRE), 2006. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.471,\n  author =       "Moonen, Leon and Mancoridis, Spiros",\n  booktitle =    "Proceedings 13th IEEE Working Conference on Reverse Engineering (WCRE)",\n  publisher =    "IEEE",\n  title =        "{Overview of the Second International Workshop on Code Based Software Security Assessments (CoBaSSA 2006)}",\n  type =         "Conference",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings Sixth IEEE International Conference on Source Code Analysis and Manipulation (SCAM).\n \n \n \n\n\n \n Di Penta, M.; and Moonen, L.,\n editors.\n \n\n\n \n\n\n\n IEEE, 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{scam06a,\n  editor =       "{Di Penta}, Massimiliano and Moonen, Leon",\n  ISBN =         "0-7695-2353-6",\n  publisher =    "IEEE",\n  title =        "{Proceedings Sixth IEEE International Conference on Source Code Analysis and Manipulation (SCAM)}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings 2nd International Workshop on Code Based Software Security Assessments (CoBaSSA).\n \n \n \n\n\n \n Moonen, L.; and Mancoridis, S.,\n editors.\n \n\n\n \n\n\n\n Delft University of Technology, Software Engineering Research Group, November 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{cobassa06a,\n  editor =       "Moonen, Leon and Mancoridis, Spiros",\n  month =        nov,\n  publisher =    "Delft University of Technology, Software Engineering Research Group",\n  title =        "{Proceedings 2nd International Workshop on Code Based Software Security Assessments (CoBaSSA)}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Aiding in the Comprehension of Testsuites.\n \n \n \n\n\n \n Cornelissen, B.; van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Greevy, O.; Hamou-Lhadj, A.; and Zaidman, A., editor(s), Workshop on Program Comprehension through Dynamic Analysis (PCODA), pages 17–20, 2006. Universiteit Antwerpen\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{cornelissen:2006:aiding,\n  author =       "Cornelissen, Bas and van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Workshop on Program Comprehension through Dynamic Analysis (PCODA)",\n  editor =       "Greevy, Orla and Hamou-Lhadj, Abdelwahab and Zaidman, Andy",\n  pages =        "17--20",\n  publisher =    "Universiteit Antwerpen",\n  title =        "{Aiding in the Comprehension of Testsuites}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings Dagstuhl Seminar on Aspects for Legacy Applications (#06302).\n \n \n \n\n\n \n Moonen, L.; Ramalingam, G.; and Clarke, S.,\n editors.\n \n\n\n \n\n\n\n Dagstuhl, 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{afla06a,\n  editor =       "Moonen, Leon and Ramalingam, Ganesan and Clarke, Siobhan",\n  ISBN =         "1862 - 4405 (ISSN)",\n  publisher =    "Dagstuhl",\n  title =        "{Proceedings Dagstuhl Seminar on Aspects for Legacy Applications ({\\#}06302)}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Aspects for Legacy Applications.\n \n \n \n\n\n \n Moonen, L.; Ramalingam, G.; and Clarke, S.\n\n\n \n\n\n\n In Dagstuhl Seminar on Aspects for Legacy Applications (#06302), July 2006. Schloss Dagstuhl, Germany\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{moonen:2006:aspects,\n  author =       "Moonen, Leon and Ramalingam, Ganesan and Clarke, Siobhan",\n  booktitle =    "Dagstuhl Seminar on Aspects for Legacy Applications ({\\#}06302)",\n  month =        jul,\n  publisher =    "Schloss Dagstuhl, Germany",\n  title =        "{Aspects for Legacy Applications}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Ranking Software Inspection Results using Execution Likelihood.\n \n \n \n\n\n \n Boogerd, C.; and Moonen, L.\n\n\n \n\n\n\n In Philips Software Conference (PSC), November 2006. Philips\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.467,\n  author =       "Boogerd, Cathal and Moonen, Leon",\n  booktitle =    "Philips Software Conference (PSC)",\n  month =        nov,\n  publisher =    "Philips",\n  title =        "{Ranking Software Inspection Results using Execution Likelihood}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Applying and Combining Three Different Aspect Mining Techniques.\n \n \n \n\n\n \n Ceccato, M.; Marin, M.; Mens, K.; Moonen, L.; Tonella, P.; and Tourwé, T.\n\n\n \n\n\n\n Software Quality Journal, 14(3): 209–231. September 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{ceccato:2006:applying,\n  abstract =     "Understanding a software system at source-code level requires understanding the different concerns that it addresses, which in turn requires a way to identify these concerns in the source code. Whereas some concerns are explicitly represented by program entities (like classes, methods and variables) and thus are easy to identify, crosscutting concerns are not captured by a single program entity but are scattered over many program entities and are tangled with the other concerns. Because of their crosscutting nature, such crosscutting concerns are difficult to identify, and reduce the understandability of the system as a whole. In this paper, we report on a combined experiment in which we try to identify crosscutting concerns in the JHotDraw framework automatically. We first apply three independently developed aspect mining techniques to JHotDraw and evaluate and compare their results. Based on this analysis, we present three interesting combinations of these three techniques, and show how these combinations provide a more complete coverage of the detected concerns as compared to the original techniques individually. Our results are a first step towards improving the understandability of a system that contains crosscutting concerns, and can be used as a basis for refactoring the identified crosscutting concerns into aspects.",\n  author =       "Ceccato, Mariano and Marin, Marius and Mens, Kim and Moonen, Leon and Tonella, Paolo and Tourw{\\'{e}}, Tom",\n  journal =      "Software Quality Journal",\n  month =        sep,\n  number =       "3",\n  pages =        "209--231",\n  title =        "{Applying and Combining Three Different Aspect Mining Techniques}",\n  volume =       "14",\n  year =         "2006",\n}\n\n
\n
\n\n\n
\n Understanding a software system at source-code level requires understanding the different concerns that it addresses, which in turn requires a way to identify these concerns in the source code. Whereas some concerns are explicitly represented by program entities (like classes, methods and variables) and thus are easy to identify, crosscutting concerns are not captured by a single program entity but are scattered over many program entities and are tangled with the other concerns. Because of their crosscutting nature, such crosscutting concerns are difficult to identify, and reduce the understandability of the system as a whole. In this paper, we report on a combined experiment in which we try to identify crosscutting concerns in the JHotDraw framework automatically. We first apply three independently developed aspect mining techniques to JHotDraw and evaluate and compare their results. Based on this analysis, we present three interesting combinations of these three techniques, and show how these combinations provide a more complete coverage of the detected concerns as compared to the original techniques individually. Our results are a first step towards improving the understandability of a system that contains crosscutting concerns, and can be used as a basis for refactoring the identified crosscutting concerns into aspects.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings First International Workshop Towards Evaluation of Aspect Mining (TEAM).\n \n \n \n\n\n \n Breu, S.; Moonen, L.; Bruntink, M.; and Krinke, J.,\n editors.\n \n\n\n \n\n\n\n Deft University of Technology, Software Engineering Research Group, 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{team06a,\n  editor =       "Breu, Silvia and Moonen, Leon and Bruntink, Magiel and Krinke, Jens",\n  ISBN =         "1872-5392 (ISSN)",\n  publisher =    "Deft University of Technology, Software Engineering Research Group",\n  title =        "{Proceedings First International Workshop Towards Evaluation of Aspect Mining (TEAM)}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Prioritizing Software Inspection Results using Static Profiling.\n \n \n \n\n\n \n Boogerd, C.; and Moonen, L.\n\n\n \n\n\n\n In International Workshop on Source Code Analysis and Manipulation (SCAM), September 2006. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.479,\n  abstract =     "Static software checking tools are useful as an additional automated software inspection step that can easily be integrated in the development cycle and assist in creating secure, reliable and high quality code. However, an often quoted disadvantage of these tools is that they generate an overly large number of warnings, including many false positives due to the approximate analysis techniques. This information overload effectively limits their usefulness. In this paper we present ELAN, a technique that helps the user prioritize the information generated by a software inspection tool, based on a demand-driven computation of the likelihood that execution reaches the locations for which warnings are reported. This analysis is orthogonal to other prioritization techniques known from literature, such as severity levels and statistical analysis to reduce false positives. We evaluate feasibility of our technique using a number of case studies and assess the quality of our predictions by comparing them to actual values obtained by dynamic profiling.",\n  author =       "Boogerd, Cathal and Moonen, Leon",\n  booktitle =    "International Workshop on Source Code Analysis and Manipulation (SCAM)",\n  month =        sep,\n  publisher =    "IEEE",\n  title =        "{Prioritizing Software Inspection Results using Static Profiling}",\n  year =         "2006",\n}\n\n
\n
\n\n\n
\n Static software checking tools are useful as an additional automated software inspection step that can easily be integrated in the development cycle and assist in creating secure, reliable and high quality code. However, an often quoted disadvantage of these tools is that they generate an overly large number of warnings, including many false positives due to the approximate analysis techniques. This information overload effectively limits their usefulness. In this paper we present ELAN, a technique that helps the user prioritize the information generated by a software inspection tool, based on a demand-driven computation of the likelihood that execution reaches the locations for which warnings are reported. This analysis is orthogonal to other prioritization techniques known from literature, such as severity levels and statistical analysis to reduce false positives. We evaluate feasibility of our technique using a number of case studies and assess the quality of our predictions by comparing them to actual values obtained by dynamic profiling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Overview of the 2nd International Workshop on Code Based Software Security Assessments (CoBaSSA).\n \n \n \n\n\n \n Moonen, L.; and Mancoridis, S.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), 2006. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{cobassa06b,\n  author =       "Moonen, Leon and Mancoridis, Spiros",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  publisher =    "IEEE",\n  title =        "{Overview of the 2nd International Workshop on Code Based Software Security Assessments (CoBaSSA)}",\n  year =         "2006",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n FINT: Tool Support for Aspect Mining.\n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), October 2006. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Marin:2006:fint,\n  abstract =     "Aspect mining requires adequate tool support to locate source code elements implementing crosscutting concerns (aka seeds), to explore and understand relations describing these elements, and to manage concerns and seeds during the project's life cycle. FINT is a tool implemented as an Eclipse plug-in that presently supports a number of techniques for the automatic identification of crosscutting concern seeds in source code. Furthermore, FINT allows for combination of mining techniques (results), facilitates code navigation and comprehension to reason and decide about candidate-seeds, and supports seeds management and persistence.",\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  month =        oct,\n  publisher =    "IEEE",\n  title =        "{FINT: Tool Support for Aspect Mining}",\n  year =         "2006",\n}\n\n
\n
\n\n\n
\n Aspect mining requires adequate tool support to locate source code elements implementing crosscutting concerns (aka seeds), to explore and understand relations describing these elements, and to manage concerns and seeds during the project's life cycle. FINT is a tool implemented as an Eclipse plug-in that presently supports a number of techniques for the automatic identification of crosscutting concern seeds in source code. Furthermore, FINT allows for combination of mining techniques (results), facilitates code navigation and comprehension to reason and decide about candidate-seeds, and supports seeds management and persistence.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Documenting Software Systems Using Types.\n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n Science of Computer Programming, 60(2): 205–220. April 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{dm06.te,\n  abstract =     "We show how hypertext-based program understanding tools can achieve new levels of abstraction by using inferred type information for cases where the subject software system is written in a weakly typed language. We propose TypeExplorer, a tool for browsing COBOL legacy systems based on these types. The paper addresses (1) how types, an invented abstraction, can be presented meaningfully to software re-engineers; (2) the implementation techniques used to construct TypeExplorer; and (3) the use of TypeExplorer for understanding legacy systems, at the level of individual statements as well as at the level of the software architecture - which is illustrated by using TypeExplorer to browse an industrial COBOL system of 100,000 lines of code.",\n  author =       "van Deursen, Arie and Moonen, Leon",\n  journal =      "Science of Computer Programming",\n  month =        apr,\n  number =       "2",\n  pages =        "205--220",\n  title =        "{Documenting Software Systems Using Types}",\n  volume =       "60",\n  year =         "2006",\n}\n\n
\n
\n\n\n
\n We show how hypertext-based program understanding tools can achieve new levels of abstraction by using inferred type information for cases where the subject software system is written in a weakly typed language. We propose TypeExplorer, a tool for browsing COBOL legacy systems based on these types. The paper addresses (1) how types, an invented abstraction, can be presented meaningfully to software re-engineers; (2) the implementation techniques used to construct TypeExplorer; and (3) the use of TypeExplorer for understanding legacy systems, at the level of individual statements as well as at the level of the software architecture - which is illustrated by using TypeExplorer to browse an industrial COBOL system of 100,000 lines of code.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A common framework for aspect mining based on crosscutting concern sorts.\n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), October 2006. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{marin:2006:common,\n  abstract =     "The increasing number of aspect mining techniques proposed in literature calls for a methodological way of comparing and combining them in order to assess, and improve on, their quality. This paper addresses this situation by proposing a common framework based on crosscutting concern sorts which allows for consistent assessment, comparison and combination of aspect mining techniques. The framework identifies a set of requirements that ensure homogeneity in formulating the mining goals, presenting the results and assessing their quality. We demonstrate feasibility of the approach by retrofitting an existing aspect mining technique to the framework, and by using it to design and implement two new mining techniques. We apply the three techniques to a known aspect mining benchmark and show how they can be consistently assessed and combined to increase the quality of the results. The techniques and combinations are implemented in FINT, our publicly available free aspect mining tool.",\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  month =        oct,\n  publisher =    "IEEE",\n  title =        "{A common framework for aspect mining based on crosscutting concern sorts}",\n  year =         "2006",\n}\n\n
\n
\n\n\n
\n The increasing number of aspect mining techniques proposed in literature calls for a methodological way of comparing and combining them in order to assess, and improve on, their quality. This paper addresses this situation by proposing a common framework based on crosscutting concern sorts which allows for consistent assessment, comparison and combination of aspect mining techniques. The framework identifies a set of requirements that ensure homogeneity in formulating the mining goals, presenting the results and assessing their quality. We demonstrate feasibility of the approach by retrofitting an existing aspect mining technique to the framework, and by using it to design and implement two new mining techniques. We apply the three techniques to a known aspect mining benchmark and show how they can be consistently assessed and combined to increase the quality of the results. The techniques and combinations are implemented in FINT, our publicly available free aspect mining tool.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A classification of crosscutting concerns.\n \n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In International Conference on Software Maintenance (ICSM), pages 673–676, September 2005. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{marin:2005:classification,\n  abstract =     "Refactoring software to apply aspect oriented solutions requires a clear understanding of what are the potential crosscutting concerns and which aspect solutions to replace them with. This process can benefit from the recognition of recurring generic concerns and their reusable aspect solutions. In this paper, we propose a classification of crosscutting concerns in sorts based on the analysis of various refactoring efforts. We discuss how sorts help concern understanding and refactoring, how they support the identification of crosscutting concerns, and how they can contribute to the evolution of aspect languages.",\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  DOI =          "10.1109/ICSM.2005.7",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Marin, Moonen, Deursen - 2005 - A classification of crosscutting concerns - International Conference on Software Maintenance (ICSM).pdf:pdf",\n  ISBN =         "0-7695-2368-4",\n  keywords =     "aspect mining,aspect-oriented programming,object-oriented languages,object-oriented programming,program comprehension,refactoring,reverse engineering",\n  month =        sep,\n  pages =        "673--676",\n  publisher =    "IEEE",\n  title =        "{A classification of crosscutting concerns}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1510171",\n  year =         "2005",\n}\n\n
\n
\n\n\n
\n Refactoring software to apply aspect oriented solutions requires a clear understanding of what are the potential crosscutting concerns and which aspect solutions to replace them with. This process can benefit from the recognition of recurring generic concerns and their reusable aspect solutions. In this paper, we propose a classification of crosscutting concerns in sorts based on the analysis of various refactoring efforts. We discuss how sorts help concern understanding and refactoring, how they support the identification of crosscutting concerns, and how they can contribute to the evolution of aspect languages.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n AJHotDraw: a Showcase for Refactoring to Aspects.\n \n \n \n\n\n \n van Deursen, A.; Marin, M.; and Moonen, L.\n\n\n \n\n\n\n In AOSD Workshop on Linking Aspect Technology and Evolution (LATE), 2005. CWI, the Netherlands\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.489,\n  author =       "van Deursen, Arie and Marin, Marius and Moonen, Leon",\n  booktitle =    "AOSD Workshop on Linking Aspect Technology and Evolution (LATE)",\n  publisher =    "CWI, the Netherlands",\n  title =        "{AJHotDraw: a Showcase for Refactoring to Aspects}",\n  type =         "Workshop",\n  year =         "2005",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Systematic Aspect-Oriented Refactoring and Testing Strategy, and Its Application to JHotDraw (report).\n \n \n \n\n\n \n van Deursen, A.; Marin, M.; and Moonen, L.\n\n\n \n\n\n\n Technical Report SEN-R0507, 2005.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@TechReport{deursen:2005:refactoring:report,\n  author =       "van Deursen, Arie and Marin, Marius and Moonen, Leon",\n  booktitle =    "Technical Report",\n  number =       "SEN-R0507",\n  publisher =    "Centrum voor Wiskunde en Informatica (CWI)",\n  title =        "{A Systematic Aspect-Oriented Refactoring and Testing Strategy, and Its Application to JHotDraw (report)}",\n  year =         "2005",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Qualitative Comparison of Three Aspect Mining Techniques.\n \n \n \n\n\n \n Ceccato, M.; Marin, M.; Mens, K.; Moonen, L.; Tonella, P.; and Tourwé, T.\n\n\n \n\n\n\n In International Conference on Program Comprehension (ICPC), 2005. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{ceccato:2005:qualitative,\n  author =       "Ceccato, Mariano and Marin, Marius and Mens, Kim and Moonen, Leon and Tonella, Paolo and Tourw{\\'{e}}, Tom",\n  booktitle =    "International Conference on Program Comprehension (ICPC)",\n  publisher =    "IEEE",\n  title =        "{A Qualitative Comparison of Three Aspect Mining Techniques}",\n  year =         "2005",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings First International Workshop on Code Based Software Security Assessments (CoBaSSA).\n \n \n \n\n\n \n Moonen, L.; and Mancoridis, S.\n\n\n \n\n\n\n Delft University of Technology, 2005.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{cobassa05a,\n  author =       "Moonen, Leon and Mancoridis, Spiros",\n  publisher =    "Delft University of Technology",\n  title =        "{Proceedings First International Workshop on Code Based Software Security Assessments (CoBaSSA)}",\n  year =         "2005",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Identification of Variation Points Using Dynamic Analysis.\n \n \n \n\n\n \n Cornelissen, B.; Graaf, B.; and Moonen, L.\n\n\n \n\n\n\n In International Workshop on Reengineering Towards Product Lines (R2PL), pages 9–13, 2005. Software Engineering Institute, USA\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.485,\n  author =       "Cornelissen, Bas and Graaf, Bas and Moonen, Leon",\n  booktitle =    "International Workshop on Reengineering Towards Product Lines (R2PL)",\n  pages =        "9--13",\n  publisher =    "Software Engineering Institute, USA",\n  title =        "{Identification of Variation Points Using Dynamic Analysis}",\n  year =         "2005",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Approach to Aspect Refactoring Based on Crosscutting Concern Types.\n \n \n \n\n\n \n Marin, M.; Moonen, L.; and van Deursen, A.\n\n\n \n\n\n\n In International Workshop on the Modeling and Analysis of Concerns in Software (MACS) at ICSE, 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{marin:2005:approach,\n  author =       "Marin, Marius and Moonen, Leon and van Deursen, Arie",\n  booktitle =    "International Workshop on the Modeling and Analysis of Concerns in Software (MACS) at ICSE",\n  title =        "{An Approach to Aspect Refactoring Based on Crosscutting Concern Types}",\n  year =         "2005",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Overview of the First International Workshop on Code Based Software Security Assessments (CoBaSSA).\n \n \n \n\n\n \n Moonen, L.; and Mancoridis, S.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), 2005. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{cobassa05b,\n  author =       "Moonen, Leon and Mancoridis, Spiros",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  publisher =    "IEEE",\n  title =        "{Overview of the First International Workshop on Code Based Software Security Assessments (CoBaSSA)}",\n  year =         "2005",\n}\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Identifying aspects using fan-in analysis.\n \n \n \n\n\n \n Marin, M.; van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), pages 132–141, 2004. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.495,\n  author =       "Marin, Marius and van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  pages =        "132--141",\n  publisher =    "IEEE",\n  title =        "{Identifying aspects using fan-in analysis}",\n  year =         "2004",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Viewpoints in Software Architecture Reconstruction.\n \n \n \n\n\n \n van Deursen, A.; Hofmeister, C.; Koschke, R.; Moonen, L.; and Riva, C.\n\n\n \n\n\n\n In Proceedings 6th Workshop on Software Reengineering (WSR), 2004. Bad Honnef\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.527,\n  author =       "van Deursen, Arie and Hofmeister, Christine and Koschke, Rainer and Moonen, Leon and Riva, Claudio",\n  booktitle =    "Proceedings 6th Workshop on Software Reengineering (WSR)",\n  publisher =    "Bad Honnef",\n  title =        "{Viewpoints in Software Architecture Reconstruction}",\n  type =         "Workshop",\n  year =         "2004",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Symphony: View-Driven Software Architecture Reconstruction.\n \n \n \n\n\n \n van Deursen, A.; Hofmeister, C.; Koschke, R.; Moonen, L.; and Riva, C.\n\n\n \n\n\n\n In Proceedings IEEE/IFIP Working Conference on Software Architecture (WICSA), 2004. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.497,\n  abstract =     "Authentic descriptions of a software architecture are required as a reliable foundation for any but trivial changes to a system. Far too often, architecture descriptions of exist- ing systems are out of sync with the implementation. If they are, they must be reconstructed. There are many existing techniques for reconstructing individual architecture views, but no information about how to select views for reconstruction, or about process aspects of architecture reconstruction in general. In this paper we describe view-driven process for reconstructing software architecture that fills this gap. To describe Symphony, we present and compare different case studies, thus serving a secondary goal of sharing real-life reconstruction experience. The Symphony process incorporates the state of the practice, where reconstruction is problem-driven and uses a rich set of architecture views. Symphony provides a common framework for reporting reconstruction experiences and for comparing reconstruction approaches. Finally, it is a vehicle for exposing and demarcating research problems in software architecture reconstruction.",\n  author =       "van Deursen, Arie and Hofmeister, Christine and Koschke, Rainer and Moonen, Leon and Riva, Claudio",\n  booktitle =    "Proceedings IEEE/IFIP Working Conference on Software Architecture (WICSA)",\n  publisher =    "IEEE",\n  title =        "{Symphony: View-Driven Software Architecture Reconstruction}",\n  type =         "Conference",\n  year =         "2004",\n}\n\n
\n
\n\n\n
\n Authentic descriptions of a software architecture are required as a reliable foundation for any but trivial changes to a system. Far too often, architecture descriptions of exist- ing systems are out of sync with the implementation. If they are, they must be reconstructed. There are many existing techniques for reconstructing individual architecture views, but no information about how to select views for reconstruction, or about process aspects of architecture reconstruction in general. In this paper we describe view-driven process for reconstructing software architecture that fills this gap. To describe Symphony, we present and compare different case studies, thus serving a secondary goal of sharing real-life reconstruction experience. The Symphony process incorporates the state of the practice, where reconstruction is problem-driven and uses a rich set of architecture views. Symphony provides a common framework for reporting reconstruction experiences and for comparing reconstruction approaches. Finally, it is a vehicle for exposing and demarcating research problems in software architecture reconstruction.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2003\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Generalized Parsing and Term Rewriting - Semantics Directed Disambiguation.\n \n \n \n\n\n \n van den Brand, M. G. J.; Klusener, S.; Moonen, L.; and Vinju, J.\n\n\n \n\n\n\n Electronic Notes in Theoretical Computer Science, 82(3): 575–591. 2003.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{bmv03-entcs,\n  abstract =     "Generalized parsing technology provides the power and flexibility to attack real-world parsing applications. However, many programming languages have syntactical ambiguities that can only be solved using semantical analysis. In this paper we propose to apply the paradigm of term rewriting to filter ambiguities based on semantical information. We start with the definition of a representation of ambiguous derivations. Then we extend term rewriting with means to handle such derivations. Finally, we apply these tools to some real world examples, namely C and COBOL. The resulting architecture is simple and efficient as compared to semantic directed parsing.",\n  author =       "van den Brand, M. G. J. and Klusener, Steven and Moonen, Leon and Vinju, Jurgen",\n  journal =      "Electronic Notes in Theoretical Computer Science",\n  number =       "3",\n  pages =        "575--591",\n  title =        "{Generalized Parsing and Term Rewriting - Semantics Directed Disambiguation}",\n  volume =       "82",\n  year =         "2003",\n}\n\n
\n
\n\n\n
\n Generalized parsing technology provides the power and flexibility to attack real-world parsing applications. However, many programming languages have syntactical ambiguities that can only be solved using semantical analysis. In this paper we propose to apply the paradigm of term rewriting to filter ambiguities based on semantical information. We start with the definition of a representation of ambiguous derivations. Then we extend term rewriting with means to handle such derivations. Finally, we apply these tools to some real world examples, namely C and COBOL. The resulting architecture is simple and efficient as compared to semantic directed parsing.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exploring Software Systems.\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In International Conference on Software Maintenance (ICSM), September 2003. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.501,\n  author =       "Moonen, Leon",\n  booktitle =    "International Conference on Software Maintenance (ICSM)",\n  month =        sep,\n  publisher =    "IEEE",\n  title =        "{Exploring Software Systems}",\n  year =         "2003",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Generalized Parsing and Term Rewriting - Semantics Directed Disambiguation.\n \n \n \n\n\n \n van den Brand, M. G. J.; Klusener, S.; Moonen, L.; and Vinju, J.\n\n\n \n\n\n\n In Language Descriptions Tools and Applications (LDTA), 2003. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.499,\n  author =       "van den Brand, M. G. J. and Klusener, Steven and Moonen, Leon and Vinju, Jurgen",\n  booktitle =    "Language Descriptions Tools and Applications (LDTA)",\n  title =        "{Generalized Parsing and Term Rewriting - Semantics Directed Disambiguation}",\n  type =         "Workshop",\n  year =         "2003",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Aspect Mining and Refactoring.\n \n \n \n\n\n \n van Deursen, A.; Marin, M.; and Moonen, L.\n\n\n \n\n\n\n In International Workshop on REFactoring: Achievements, Challenges, Effects (REFACE), November 2003. University of Waterloo, Canada\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.529,\n  author =       "van Deursen, Arie and Marin, Marius and Moonen, Leon",\n  booktitle =    "International Workshop on REFactoring: Achievements, Challenges, Effects (REFACE)",\n  month =        nov,\n  publisher =    "University of Waterloo, Canada",\n  title =        "{Aspect Mining and Refactoring}",\n  type =         "Workshop",\n  year =         "2003",\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Lightweight impact analysis using island grammars.\n \n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In International Workshop on Program Comprehension (IWPC), pages 219–228, June 2002. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"LightweightPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2002:impact,\n  abstract =     "Impact analysis is needed for the planning and estimation of software maintenance projects. Traditional impact analysis techniques tend to be too expensive for this phase, so there is need for more lightweight approaches. We present a technique for the generation of lightweight impact analyzers from island grammars. We demonstrate this technique using a real-world case study in which we describe how island grammars can be used to find account numbers in the software portfolio of a large bank. We show how we have implemented this analysis and achieved lightweightness using a reusable generative framework for impact analyzers.",\n  author =       "Moonen, Leon",\n  booktitle =    "International Workshop on Program Comprehension (IWPC)",\n  DOI =          "10.1109/WPC.2002.1021343",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 2002 - Lightweight impact analysis using island grammars - International Workshop on Program Comprehension (IWPC).pdf:pdf",\n  ISBN =         "0-7695-1495-2",\n  ISSN =         "1092-8138",\n  keywords =     "impact analysis,island grammars,parser generation,program comprehension,software exploration",\n  month =        jun,\n  pages =        "219--228",\n  publisher =    "IEEE",\n  title =        "{Lightweight impact analysis using island grammars}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=1021343",\n  year =         "2002",\n}\n\n
\n
\n\n\n
\n Impact analysis is needed for the planning and estimation of software maintenance projects. Traditional impact analysis techniques tend to be too expensive for this phase, so there is need for more lightweight approaches. We present a technique for the generation of lightweight impact analyzers from island grammars. We demonstrate this technique using a real-world case study in which we describe how island grammars can be used to find account numbers in the software portfolio of a large bank. We show how we have implemented this analysis and achieved lightweightness using a reusable generative framework for impact analyzers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Refactoring Test Code.\n \n \n \n\n\n \n van Deursen, A.; Moonen, L.; van den Bergh, A.; and Kok, G.\n\n\n \n\n\n\n In Marchesi, M; Succi, G; Wells, D; and Williams, L, editor(s), eXtreme Programming Perspectives, 14, pages 141–152. Addison-Wesley Longman, 2002.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Simula.SE.439,\n  author =       "van Deursen, Arie and Moonen, Leon and van den Bergh, Alex and Kok, Gerard",\n  booktitle =    "eXtreme Programming Perspectives",\n  chapter =      "14",\n  editor =       "Marchesi, M and Succi, G and Wells, D and Williams, L",\n  ISBN =         "978-0-201-77005-6",\n  organization = "Addison-Wesley Longman",\n  pages =        "141--152",\n  publisher =    "Addison-Wesley Longman",\n  title =        "{Refactoring Test Code}",\n  year =         "2002",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings First International Workshop on Testing in XP (WTiXP).\n \n \n \n\n\n \n Moonen, L.,\n editor.\n \n\n\n \n\n\n\n Centrum voor Wiskunde en Informatica (CWI), May 2002.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Book{moonen:2002:wtixp,\n  editor =       "Moonen, Leon",\n  month =        may,\n  publisher =    "Centrum voor Wiskunde en Informatica (CWI)",\n  title =        "{Proceedings First International Workshop on Testing in XP (WTiXP)}",\n  year =         "2002",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Java Quality Assurance by Detecting Code Smells.\n \n \n \n\n\n \n van Emden, E.; and Moonen, L.\n\n\n \n\n\n\n In Working Conference on Reverse Engineering (WCRE), pages 97–106, October 2002. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{emden:2002:java,\n  abstract =     "Software inspection is a known technique for improving software quality. It involves carefully examining the code, the design, and the documentation of software and checking these for aspects that are known to be potentially problematic based on past experience. Code smells are a metaphor to describe patterns that are generally associated with bad design and bad programming practices. Originally, code smells are used to find the places in software that could benefit from refactoring. In this paper, we investigate how the quality of code can be automatically assessed by checking for the presence of code smells and how this approach can contribute to automatic code inspection. We present an approach for the automatic detection and visualization of code smells and discuss how this approach can be used in the design of a software inspection tool. We illustrate the feasibility of our approach with the development of jCOSMO, a prototype code smell browser that detects and visualizes code smells in JAVA source code. Finally, we show how this tool was applied in a case study. Keywords: software inspection, quality assurance, Java, refactoring, code smells.",\n  author =       "van Emden, Eva and Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  month =        oct,\n  pages =        "97--106",\n  publisher =    "IEEE",\n  title =        "{Java Quality Assurance by Detecting Code Smells}",\n  year =         "2002",\n}\n\n
\n
\n\n\n
\n Software inspection is a known technique for improving software quality. It involves carefully examining the code, the design, and the documentation of software and checking these for aspects that are known to be potentially problematic based on past experience. Code smells are a metaphor to describe patterns that are generally associated with bad design and bad programming practices. Originally, code smells are used to find the places in software that could benefit from refactoring. In this paper, we investigate how the quality of code can be automatically assessed by checking for the presence of code smells and how this approach can contribute to automatic code inspection. We present an approach for the automatic detection and visualization of code smells and discuss how this approach can be used in the design of a software inspection tool. We illustrate the feasibility of our approach with the development of jCOSMO, a prototype code smell browser that detects and visualizes code smells in JAVA source code. Finally, we show how this tool was applied in a case study. Keywords: software inspection, quality assurance, Java, refactoring, code smells.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exploring Software Systems.\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n Ph.D. Thesis, Faculty of Natural Sciences, Mathematics, and Computer Science, University of Amsterdam, December 2002.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@PhdThesis{moonen-2002-ess,\n  author =       "Moonen, Leon",\n  booktitle =    "PHD Dissertation",\n  ISBN =         "90-5776-094-0",\n  keywords =     "program comprehension,software exploration",\n  month =        dec,\n  school =       "Faculty of Natural Sciences, Mathematics, and Computer Science, University of Amsterdam",\n  title =        "{Exploring Software Systems}",\n  year =         "2002",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n The Video Store Revisited: Thoughts on Refactoring and Testing.\n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Marchesi, M; and Succi, G, editor(s), Proceedings 3nd International Conference on Extreme Programming and Agile Processes in Software Engineering (XP2002), May 2002. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.507,\n  author =       "van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Proceedings 3nd International Conference on Extreme Programming and Agile Processes in Software Engineering (XP2002)",\n  editor =       "Marchesi, M and Succi, G",\n  month =        may,\n  title =        "{The Video Store Revisited: Thoughts on Refactoring and Testing}",\n  type =         "Conference",\n  year =         "2002",\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2001\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n The ASF+SDF Meta-Environment.\n \n \n \n \n\n\n \n van den Brand, M. G. J.; van Deursen, A.; Heering, J.; de Jong, H. A.; de Jonge, M.; Kuipers, T.; Klint, P.; Moonen, L.; Olivier, P. A.; Scheerder, J.; Vinju, J.; Visser, E.; and Visser, J.\n\n\n \n\n\n\n Electronic Notes in Theoretical Computer Science, 44(2): 3–8. June 2001.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{brand:2001:ASF:journal,\n  abstract =     "The ASF+SDF Meta-Environment is an interactive development environment for the automatic generation of interactive systems for constructing language definitions and generating tools for them. Over the years, this system has been used in a variety of academic and commercial projects ranging from formal program manipulation to conversion of COBOL systems. Since the existing implementation of the Meta-Environment started exhibiting more and more characteristics of a legacy system, we decided to build a completely new, component-based, version. We demonstrate this new system and stress its open architecture.",\n  author =       "van den Brand, M. G. J. and van Deursen, Arie and Heering, Jan and de Jong, H. A. and de Jonge, Merijn and Kuipers, Tobias and Klint, Paul and Moonen, Leon and Olivier, P. A. and Scheerder, Jeroen and Vinju, Jurgen and Visser, Eelco and Visser, Joost",\n  DOI =          "10.1016/S1571-0661(04)80917-4",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Brand et al. - 2001 - The ASFSDF Meta-Environment - Electronic Notes in Theoretical Computer Science.pdf:pdf",\n  ISBN =         "3-540-41861-X",\n  ISSN =         "15710661",\n  journal =      "Electronic Notes in Theoretical Computer Science",\n  month =        jun,\n  number =       "2",\n  pages =        "3--8",\n  series =       "LNCS",\n  title =        "{The ASF+SDF Meta-Environment}",\n  URL =          "http://linkinghub.elsevier.com/retrieve/pii/S1571066104809174",\n  volume =       "44",\n  year =         "2001",\n}\n\n
\n
\n\n\n
\n The ASF+SDF Meta-Environment is an interactive development environment for the automatic generation of interactive systems for constructing language definitions and generating tools for them. Over the years, this system has been used in a variety of academic and commercial projects ranging from formal program manipulation to conversion of COBOL systems. Since the existing implementation of the Meta-Environment started exhibiting more and more characteristics of a legacy system, we decided to build a completely new, component-based, version. We demonstrate this new system and stress its open architecture.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The ASF+SDF Meta-environment: A Component-Based Language Development Environment.\n \n \n \n \n\n\n \n van den Brand, M. G. J.; van Deursen, A.; Heering, J.; de Jong, H. A.; de Jonge, M.; Kuipers, T.; Klint, P.; Moonen, L.; Olivier, P. A.; Scheerder, J.; Vinju, J.; Visser, E.; and Visser, J.\n\n\n \n\n\n\n In Compiler Construction (CC), volume 2027, of Lecture Notes in Computer Science, pages 365–370. Springer-Verlag, LNCS 2027 edition, 2001.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{brand:2001:asf:cc,\n  abstract =     "The ASF+SDF Meta-environment is an interactive development environment for the automatic generation of interactive systems for constructing language definitions and generating tools for them. Over the years, this system has been used in a variety of academic and commercial projects ranging from formal program manipulation to conversion of COBOL systems. Since the existing implementation of the Meta-environment started exhibiting more and more characteristics of a legacy system, we decided to build a completely new, component-based, version. We demonstrate this new system and stress its open architecture.",\n  author =       "van den Brand, M. G. J. and van Deursen, Arie and Heering, Jan and de Jong, H. A. and de Jonge, Merijn and Kuipers, Tobias and Klint, Paul and Moonen, Leon and Olivier, P. A. and Scheerder, Jeroen and Vinju, Jurgen and Visser, Eelco and Visser, Joost",\n  booktitle =    "Compiler Construction (CC)",\n  DOI =          "10.1007/3-540-45306-7_26",\n  edition =      "LNCS 2027",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Brand et al. - 2001 - The ASFSDF Meta-environment A Component-Based Language Development Environment - Compiler Construction (CC).pdf:pdf",\n  pages =        "365--370",\n  publisher =    "Springer-Verlag",\n  series =       "Lecture Notes in Computer Science",\n  title =        "{The ASF+SDF Meta-environment: A Component-Based Language Development Environment}",\n  URL =          "http://link.springer.com/10.1007/3-540-45306-7{\\_}26",\n  volume =       "2027",\n  year =         "2001",\n}\n\n
\n
\n\n\n
\n The ASF+SDF Meta-environment is an interactive development environment for the automatic generation of interactive systems for constructing language definitions and generating tools for them. Over the years, this system has been used in a variety of academic and commercial projects ranging from formal program manipulation to conversion of COBOL systems. Since the existing implementation of the Meta-environment started exhibiting more and more characteristics of a legacy system, we decided to build a completely new, component-based, version. We demonstrate this new system and stress its open architecture.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generating Robust Parsers Using Island Grammars.\n \n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Proceedings 8th Working Conference on Reverse Engineering, pages 13–22, October 2001. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"GeneratingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:2001:robust,\n  abstract =     "Source model extraction, the automated extraction of information from system artifacts, is a common phase in reverse engineering tools. One of the major challenges of this phase is creating extractors that can deal with irregularities in the artifacts that are typical for the reverse engineering domain (for example, syntactic errors, incomplete source code, language dialects and embedded languages). The paper proposes a solution in the form of island grammars, a special kind of grammar that combines the detailed specification possibilities of grammars with the liberal behavior of lexical approaches. We show how island grammars can be used to generate robust parsers that combine the accuracy of syntactical analysis with the speed, flexibility and tolerance usually only found in lexical analysis. We conclude with a discussion of the development of MANGROVE, a generator for source model extractors based on island grammars and describe its application to a number of case studies",\n  author =       "Moonen, Leon",\n  booktitle =    "Proceedings 8th Working Conference on Reverse Engineering",\n  DOI =          "10.1109/WCRE.2001.957806",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 2001 - Generating Robust Parsers Using Island Grammars - Proceedings 8th Working Conference on Reverse Engineering.pdf:pdf",\n  ISBN =         "0-7695-1303-4",\n  ISSN =         "1095-1350",\n  keywords =     "fuzzy parsing,island grammars,parser generation,partial parsing,program analysis,reverse engineering,source model extraction",\n  month =        oct,\n  pages =        "13--22",\n  publisher =    "IEEE",\n  title =        "{Generating Robust Parsers Using Island Grammars}",\n  type =         "Conference",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=957806",\n  year =         "2001",\n}\n\n
\n
\n\n\n
\n Source model extraction, the automated extraction of information from system artifacts, is a common phase in reverse engineering tools. One of the major challenges of this phase is creating extractors that can deal with irregularities in the artifacts that are typical for the reverse engineering domain (for example, syntactic errors, incomplete source code, language dialects and embedded languages). The paper proposes a solution in the form of island grammars, a special kind of grammar that combines the detailed specification possibilities of grammars with the liberal behavior of lexical approaches. We show how island grammars can be used to generate robust parsers that combine the accuracy of syntactical analysis with the speed, flexibility and tolerance usually only found in lexical analysis. We conclude with a discussion of the development of MANGROVE, a generator for source model extractors based on island grammars and describe its application to a number of case studies\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Empirical Study Into Cobol Type Inferencing.\n \n \n \n\n\n \n van Deursen, A.; Moonen, L.; van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n Science of Computer Programming, 40(2-3): 189–211. July 2001.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{deursen:2001:empirical,\n  abstract =     "In a typical COBOL program, the data division consists of 50{\\%} of the lines of code. Automatic type inference can help to understand the large collections of variable declarations contained therein, showing how variables are related based on their actual usage. The most problematic aspect of type inference is pollution, the phenomenon that types become too large, and contain variables that intuitively should not belong to the same type. The aim of the paper is to provide empirical evidence for the hypothesis that the use of subtyping is an effective way for dealing with pollution. The main results include a tool set to carry out type inference experiments, a suite of metrics characterizing type inference outcomes, and the experimental observation that only one instance of pollution occurs in the case study conducted.",\n  author =       "van Deursen, Arie and Moonen, Leon and van Deursen, Arie and Moonen, Leon",\n  journal =      "Science of Computer Programming",\n  month =        jul,\n  number =       "2-3",\n  pages =        "189--211",\n  title =        "{An Empirical Study Into Cobol Type Inferencing}",\n  volume =       "40",\n  year =         "2001",\n}\n\n
\n
\n\n\n
\n In a typical COBOL program, the data division consists of 50% of the lines of code. Automatic type inference can help to understand the large collections of variable declarations contained therein, showing how variables are related based on their actual usage. The most problematic aspect of type inference is pollution, the phenomenon that types become too large, and contain variables that intuitively should not belong to the same type. The aim of the paper is to provide empirical evidence for the hypothesis that the use of subtyping is an effective way for dealing with pollution. The main results include a tool set to carry out type inference experiments, a suite of metrics characterizing type inference outcomes, and the experimental observation that only one instance of pollution occurs in the case study conducted.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n From Research to Startup: Experiences in Interoperability.\n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Ebert, J; Kontogiannis, K; and Winter, A, editor(s), Interoperability in Reengineering Tools, of Dagstuhl Seminar Report 296, 23, pages 19. Dagstuhl, 2001.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Simula.SE.443,\n  author =       "van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Interoperability in Reengineering Tools",\n  chapter =      "23",\n  editor =       "Ebert, J and Kontogiannis, K and Winter, A",\n  ISBN =         "1862 - 4405 (ISSN)",\n  organization = "Dagstuhl",\n  pages =        "19",\n  publisher =    "Dagstuhl",\n  series =       "Dagstuhl Seminar Report 296",\n  title =        "{From Research to Startup: Experiences in Interoperability}",\n  year =         "2001",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Legacy to the Extreme.\n \n \n \n\n\n \n van Deursen, A.; Kuipers, T.; and Moonen, L.\n\n\n \n\n\n\n In Marchesi, M; and Succi, G, editor(s), eXtreme Programming Examined, 29, pages 501–514. Addison-Wesley Longman, 2001.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Simula.SE.441,\n  author =       "van Deursen, Arie and Kuipers, Tobias and Moonen, Leon",\n  booktitle =    "eXtreme Programming Examined",\n  chapter =      "29",\n  editor =       "Marchesi, M and Succi, G",\n  ISBN =         "9780-201-71040-3",\n  organization = "Addison-Wesley Longman",\n  pages =        "501--514",\n  publisher =    "Addison-Wesley Longman",\n  title =        "{Legacy to the Extreme}",\n  year =         "2001",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Refactoring Test Code.\n \n \n \n\n\n \n van Deursen, A.; Moonen, L.; van den Bergh, A.; and Kok, G.\n\n\n \n\n\n\n In Marchesi, M; and Succi, G, editor(s), Proceedings 2nd International Conference on Extreme Programming and Flexible Processes in Software Engineering (XP2001), May 2001. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.525,\n  abstract =     "Two key aspects of extreme programming (XP) are unit testing and merciless refactoring. Given the fact that the ideal test code / production code ratio approaches 1:1, it is not surprising that unit tests are being refactored. We found that refactoring test code is different from refactoring production code in two ways: (1) there is a distinct set of bad smells involved, and (2) improving test code involves additional test-specific refactorings. To share our experiences with other XP practitioners, we describe a set of bad smells that indicate trouble in test code, and a collection of test refactorings to remove these smells.",\n  author =       "van Deursen, Arie and Moonen, Leon and van den Bergh, Alex and Kok, Gerard",\n  booktitle =    "Proceedings 2nd International Conference on Extreme Programming and Flexible Processes in Software Engineering (XP2001)",\n  editor =       "Marchesi, M and Succi, G",\n  month =        may,\n  title =        "{Refactoring Test Code}",\n  type =         "Conference",\n  year =         "2001",\n}\n\n
\n
\n\n\n
\n Two key aspects of extreme programming (XP) are unit testing and merciless refactoring. Given the fact that the ideal test code / production code ratio approaches 1:1, it is not surprising that unit tests are being refactored. We found that refactoring test code is different from refactoring production code in two ways: (1) there is a distinct set of bad smells involved, and (2) improving test code involves additional test-specific refactorings. To share our experiences with other XP practitioners, we describe a set of bad smells that indicate trouble in test code, and a collection of test refactorings to remove these smells.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Exploring Legacy Systems Using Types.\n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Proceedings 7th Working Conference on Reverse Engineering, pages 32–41, 2000. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.511,\n  abstract =     "We show how hypertext-based program understanding tools can achieve new levels of abstraction by using inferred type information for cases where the subject software system is written in a weakly typed language. We propose TypeExplorer, a tool for browsing COBOL legacy systems based on these types. The paper addresses (1) how types, an invented abstraction, can be presented meaningfully to software re-engineers; (2) the implementation techniques used to construct TypeExplorer and (3) the use of TypeExplorer for understanding legacy systems, at the level of individual statements as well as at the level of the software architecture - which is illustrated by using TypeExplorer to browse an industrial COBOL system of 100,000 lines of code.",\n  author =       "van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Proceedings 7th Working Conference on Reverse Engineering",\n  pages =        "32--41",\n  publisher =    "IEEE",\n  title =        "{Exploring Legacy Systems Using Types}",\n  type =         "Conference",\n  year =         "2000",\n}\n\n
\n
\n\n\n
\n We show how hypertext-based program understanding tools can achieve new levels of abstraction by using inferred type information for cases where the subject software system is written in a weakly typed language. We propose TypeExplorer, a tool for browsing COBOL legacy systems based on these types. The paper addresses (1) how types, an invented abstraction, can be presented meaningfully to software re-engineers; (2) the implementation techniques used to construct TypeExplorer and (3) the use of TypeExplorer for understanding legacy systems, at the level of individual statements as well as at the level of the software architecture - which is illustrated by using TypeExplorer to browse an industrial COBOL system of 100,000 lines of code.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Arrangement and Method for a Documentation Generation System.\n \n \n \n\n\n \n van Deursen, A.; Kuipers, T.; and Moonen, L.\n\n\n \n\n\n\n U.S. Patent application, August 2000.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{deursen:2000:docgen,\n  author =       "van Deursen, Arie and Kuipers, Tobias and Moonen, Leon",\n  howpublished = "U.S. Patent application",\n  month =        aug,\n  title =        "{Arrangement and Method for a Documentation Generation System}",\n  year =         "2000",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Legacy to the Extreme.\n \n \n \n\n\n \n van Deursen, A.; Kuipers, T.; and Moonen, L.\n\n\n \n\n\n\n In Marchesi, M; and Succi, G, editor(s), International Conference on eXtreme Programming and Flexible Processes in Software Engineering (XP), May 2000. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{deursen:2000:legacy,\n  author =       "van Deursen, Arie and Kuipers, Tobias and Moonen, Leon",\n  booktitle =    "International Conference on eXtreme Programming and Flexible Processes in Software Engineering (XP)",\n  editor =       "Marchesi, M and Succi, G",\n  month =        may,\n  title =        "{Legacy to the Extreme}",\n  year =         "2000",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Arrangement and Method for Exploring Software Systems Using Types.\n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n International Patent application, November 2000.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Misc{deursen:2000:type-explorer,\n  author =       "van Deursen, Arie and Moonen, Leon",\n  howpublished = "International Patent application",\n  month =        nov,\n  title =        "{Arrangement and Method for Exploring Software Systems Using Types}",\n  year =         "2000",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Types and Concept Analysis for Legacy Systems.\n \n \n \n\n\n \n Kuipers, T.; and Moonen, L.\n\n\n \n\n\n\n In International Workshop on Program Comprehension (ICPC), June 2000. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{kuipers:2000:types,\n  author =       "Kuipers, Tobias and Moonen, Leon",\n  booktitle =    "International Workshop on Program Comprehension (ICPC)",\n  month =        jun,\n  publisher =    "IEEE",\n  title =        "{Types and Concept Analysis for Legacy Systems}",\n  year =         "2000",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Legacy to the Extreme.\n \n \n \n\n\n \n van Deursen, A.; Kuipers, T.; and Moonen, L.\n\n\n \n\n\n\n In Marchesi, M; and Succi, G, editor(s), Proceedings 1st International Conference on eXtreme Programming and Flexible Processes in Software Engineering - XP2000, May 2000. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.517,\n  author =       "van Deursen, Arie and Kuipers, Tobias and Moonen, Leon",\n  booktitle =    "Proceedings 1st International Conference on eXtreme Programming and Flexible Processes in Software Engineering - XP2000",\n  editor =       "Marchesi, M and Succi, G",\n  month =        may,\n  title =        "{Legacy to the Extreme}",\n  type =         "Conference",\n  year =         "2000",\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1999\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Understanding Cobol Systems Using Inferred Types.\n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Nielson, H R; and Sag, i., editor(s), Program Analysis, of Dagstuhl Seminar Report 236, 1.7, pages 15. Dagstuhl, 1999.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Simula.SE.445,\n  author =       "van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Program Analysis",\n  chapter =      "1.7",\n  editor =       "Nielson, H R and Sag, iv, M",\n  ISBN =         "1862 - 4405 (ISSN)",\n  organization = "Dagstuhl",\n  pages =        "15",\n  publisher =    "Dagstuhl",\n  series =       "Dagstuhl Seminar Report 236",\n  title =        "{Understanding Cobol Systems Using Inferred Types}",\n  year =         "1999",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Understanding Cobol Systems Using Inferred Types.\n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Woods, S, editor(s), Proceedings 7th International Workshop on Program Comprehension, 1999. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Simula.SE.515,\n  abstract =     "In a typical COBOL program, the data division consists of 50{\\%} of the lines of code. Automatic type inference can help to understand the large collections of variable declarations contained therein, showing how variables are related based on their actual usage. The most problematic aspect of type inference is pollution, the phenomenon that types become too large, and contain variables that intuitively should not belong to the same type. The aim of the paper is to provide empirical evidence for the hypothesis that the use of subtyping is an effective way for dealing with pollution. The main results include a tool set to carry out type inference experiments, a suite of metrics characterizing type inference outcomes, and the conclusion that only one instance of pollution was found in the case study conducted.",\n  author =       "van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Proceedings 7th International Workshop on Program Comprehension",\n  editor =       "Woods, S",\n  publisher =    "IEEE",\n  title =        "{Understanding Cobol Systems Using Inferred Types}",\n  type =         "Conference",\n  year =         "1999",\n}\n\n
\n
\n\n\n
\n In a typical COBOL program, the data division consists of 50% of the lines of code. Automatic type inference can help to understand the large collections of variable declarations contained therein, showing how variables are related based on their actual usage. The most problematic aspect of type inference is pollution, the phenomenon that types become too large, and contain variables that intuitively should not belong to the same type. The aim of the paper is to provide empirical evidence for the hypothesis that the use of subtyping is an effective way for dealing with pollution. The main results include a tool set to carry out type inference experiments, a suite of metrics characterizing type inference outcomes, and the conclusion that only one instance of pollution was found in the case study conducted.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1998\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Type inference for COBOL systems.\n \n \n \n \n\n\n \n van Deursen, A.; and Moonen, L.\n\n\n \n\n\n\n In Blaha, M; Quilici, A; and Verhoef, C, editor(s), Working Conference on Reverse Engineering (WCRE), pages 220–230, October 1998. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"TypePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{deursen:1998:type,\n  abstract =     "Types are a good starting point for various software reengineering tasks. Unfortunately, programs requiring reengineering most desperately are written in languages without an adequate type system (such as COBOL). To solve this problem, we propose a method of automated type inference for these languages. The main ingredients are that if variables are compared using some relational operator their types must be the same; likewise if an expression is assigned to a variable, the type of the expression must be a subtype of that of the variable. We present the formal type system and inference rules for this approach, show their effect on various real life COBOL fragments, describe the implementation of our ideas in a prototype type inference tool for COBOL, and discuss a number of applications",\n  author =       "van Deursen, Arie and Moonen, Leon",\n  booktitle =    "Working Conference on Reverse Engineering (WCRE)",\n  DOI =          "10.1109/WCRE.1998.723192",\n  editor =       "Blaha, M and Quilici, A and Verhoef, C",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Deursen, Moonen - 1998 - Type inference for COBOL systems - Working Conference on Reverse Engineering (WCRE).pdf:pdf",\n  ISBN =         "0-8186-8967-6",\n  keywords =     "cobol,reverse engineering,systems re-engineering,type inference",\n  month =        oct,\n  pages =        "220--230",\n  publisher =    "IEEE",\n  title =        "{Type inference for COBOL systems}",\n  URL =          "http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=723192",\n  year =         "1998",\n}\n\n
\n
\n\n\n
\n Types are a good starting point for various software reengineering tasks. Unfortunately, programs requiring reengineering most desperately are written in languages without an adequate type system (such as COBOL). To solve this problem, we propose a method of automated type inference for these languages. The main ingredients are that if variables are compared using some relational operator their types must be the same; likewise if an expression is assigned to a variable, the type of the expression must be a subtype of that of the variable. We present the formal type system and inference rules for this approach, show their effect on various real life COBOL fragments, describe the implementation of our ideas in a prototype type inference tool for COBOL, and discuss a number of applications\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1997\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Generic Architecture for Data Flow Analysis to Support Reverse Engineering.\n \n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In Sellink, M P A, editor(s), International Workshop on the Theory and Practice of Algebraic Specifications (ASF+SDF), of Electronic Workshops in Computing, pages 1–14, 1997. Springer\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{moonen:1997:generic,\n  abstract =     "Data flow analysis is a process for collecting run-time information about data in programs without actually executing them. In this paper, we focus at the use of data flow analysis to support program understanding and reverse engineering. Data flow analysis is beneficial for these applications since the information obtained can be used to compute relationships between data objects in programs. These relations play a key role, for example, in the determination of the logical components of a system and their interaction. The general support of program understanding and reverse engineering requires the ability to analyse a variety of source languages and the ability to combine the results of analysing multiple languages. We present a flexible and generic software architecture for describing and performing language-independent data flow analysis which allows such transparent multi-language analysis. All components of this architecture were formally specified.",\n  author =       "Moonen, Leon",\n  booktitle =    "International Workshop on the Theory and Practice of Algebraic Specifications (ASF+SDF)",\n  editor =       "Sellink, M P A",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 1997 - A Generic Architecture for Data Flow Analysis to Support Reverse Engineering - International Workshop on the Theory and.pdf:pdf",\n  ISBN =         "3-540-76228-0",\n  keywords =     "data flow analysis,data flow representation language,program comprehension,reverse engineering",\n  pages =        "1--14",\n  publisher =    "Springer",\n  series =       "Electronic Workshops in Computing",\n  title =        "{A Generic Architecture for Data Flow Analysis to Support Reverse Engineering}",\n  URL =          "http://ewic.bcs.org/content/ConWebDoc/4464 http://dl.acm.org/citation.cfm?id=2227706.2227716",\n  year =         "1997",\n}\n\n
\n
\n\n\n
\n Data flow analysis is a process for collecting run-time information about data in programs without actually executing them. In this paper, we focus at the use of data flow analysis to support program understanding and reverse engineering. Data flow analysis is beneficial for these applications since the information obtained can be used to compute relationships between data objects in programs. These relations play a key role, for example, in the determination of the logical components of a system and their interaction. The general support of program understanding and reverse engineering requires the ability to analyse a variety of source languages and the ability to combine the results of analysing multiple languages. We present a flexible and generic software architecture for describing and performing language-independent data flow analysis which allows such transparent multi-language analysis. All components of this architecture were formally specified.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Generic Architecture for Data Flow Analysis to Support Reverse Engineering (report).\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n Technical Report P9711, University of Amsterdam, Programming Research Group, 1997.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@TechReport{moonen:1997:generic:report,\n  abstract =     "Data flow analysis is a process for collecting run-time information about data in programs without actually executing them. In this paper, we focus at the use of data flow analysis to support program understanding and reverse engineering. Data flow analysis is beneficial for these applications since the information obtained can be used to compute relationships between data objects in programs. These relations play a key role, for example, in the determination of the logical components of a system and their interaction. The general support of program understanding and reverse engineering requires the ability to analyse a variety of source languages and the ability to combine the results of analysing multiple languages. We present a flexible and generic software architecture for describing and performing language-independent data flow analysis which allows such transparent multi-language analysis. All components of this architecture were formally specified.",\n  author =       "Moonen, Leon",\n  booktitle =    "Technical Report",\n  file =         ":Users/leon/Documents/Mendeley Desktop/Moonen - 1997 - A generic architecture for data flow analysis to support reverse engineering - Proceedings of the 2nd international conf.pdf:pdf",\n  institution =  "University of Amsterdam, Programming Research Group",\n  keywords =     "data flow analysis,data flow representation language,program comprehension,reverse engineering",\n  number =       "P9711",\n  pages =        "22",\n  title =        "{A Generic Architecture for Data Flow Analysis to Support Reverse Engineering (report)}",\n  year =         "1997",\n}\n\n
\n
\n\n\n
\n Data flow analysis is a process for collecting run-time information about data in programs without actually executing them. In this paper, we focus at the use of data flow analysis to support program understanding and reverse engineering. Data flow analysis is beneficial for these applications since the information obtained can be used to compute relationships between data objects in programs. These relations play a key role, for example, in the determination of the logical components of a system and their interaction. The general support of program understanding and reverse engineering requires the ability to analyse a variety of source languages and the ability to combine the results of analysing multiple languages. We present a flexible and generic software architecture for describing and performing language-independent data flow analysis which allows such transparent multi-language analysis. All components of this architecture were formally specified.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Generic Architecture for Data Flow Analysis to Support Reverse Engineering.\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In van Deursen, A.; Klint, P; and Wijers, G, editor(s), Program Transformations for System Renovation, 13, pages not specified. Centrum voor Wiskunde en Informatica (CWI), 1997.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{Simula.SE.539,\n  author =       "Moonen, Leon",\n  booktitle =    "Program Transformations for System Renovation",\n  chapter =      "13",\n  editor =       "van Deursen, Arie and Klint, P and Wijers, G",\n  organization = "Centrum voor Wiskunde en Informatica (CWI)",\n  pages =        "not specified",\n  publisher =    "Centrum voor Wiskunde en Informatica (CWI)",\n  title =        "{A Generic Architecture for Data Flow Analysis to Support Reverse Engineering}",\n  year =         "1997",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Implementation of a Prototype for the New ASF+SDF Meta-Environment.\n \n \n \n\n\n \n van den Brand, M. G. J.; Kuipers, T.; Moonen, L.; and Olivier, P. A.\n\n\n \n\n\n\n In Sellink, M P A, editor(s), International Workshop on the Theory and Practice of Algebraic Specifications (ASF+SDF), of Electronic Workshops in Computing, November 1997. Springer-Verlag\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{brand:1997:implementation,\n  author =       "van den Brand, M. G. J. and Kuipers, Tobias and Moonen, Leon and Olivier, P. A.",\n  booktitle =    "International Workshop on the Theory and Practice of Algebraic Specifications (ASF+SDF)",\n  editor =       "Sellink, M P A",\n  month =        nov,\n  publisher =    "Springer-Verlag",\n  series =       "Electronic Workshops in Computing",\n  title =        "{Implementation of a Prototype for the New ASF+SDF Meta-Environment}",\n  year =         "1997",\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Extensions and Applications of the Dhal Data Flow Analysis Framework.\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n In van Deursen, A; Klint, P; and Wijers, G, editor(s), Program Transformations for System Renovation, 14, pages not specified. CWI, the Netherlands, 1997.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InCollection{moonen:1997:extensions,\n  author =       "Moonen, Leon",\n  booktitle =    "Program Transformations for System Renovation",\n  chapter =      "14",\n  editor =       "van Deursen, A and Klint, P and Wijers, G",\n  ISBN =         "not available",\n  pages =        "not specified",\n  publisher =    "CWI, the Netherlands",\n  title =        "{Extensions and Applications of the Dhal Data Flow Analysis Framework}",\n  year =         "1997",\n}\n\n
\n
\n\n\n\n
\n\n\n \n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1996\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Data Flow Analysis for Reverse Engineering.\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n Ph.D. Thesis, Programming Research Group, University of Amsterdam, 1996.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@PhdThesis{moonen:1996:dfa,\n  annote =       "Appeared as Technical Report P9613.",\n  author =       "Moonen, Leon",\n  booktitle =    "Master Thesis",\n  school =       "Programming Research Group, University of Amsterdam",\n  title =        "{Data Flow Analysis for Reverse Engineering}",\n  year =         "1996",\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1994\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A Virtual Assembler for an Abstract Machine – Design and implementation of an incremental and retargetable code generator for term rewriting systems.\n \n \n \n\n\n \n Moonen, L.\n\n\n \n\n\n\n Ph.D. Thesis, Eindhoven Polytechnic, Department of Computer Science, 1994.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@PhdThesis{moonen:1994:virtual,\n  author =       "Moonen, Leon",\n  booktitle =    "Master Thesis",\n  school =       "Eindhoven Polytechnic, Department of Computer Science",\n  title =        "{A Virtual Assembler for an Abstract Machine -- Design and implementation of an incremental and retargetable code generator for term rewriting systems}",\n  year =         "1994",\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);