var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=people.eng.unimelb.edu.au/harald/bibbase.bib&theme=dividers&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=people.eng.unimelb.edu.au/harald/bibbase.bib&theme=dividers&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=people.eng.unimelb.edu.au/harald/bibbase.bib&theme=dividers&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A Lightweight Approach to Nontermination Inference Using Constrained Horn Clauses.\n \n \n \n\n\n \n Kafle, B.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Software and Systems Modeling. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{Kaf-Gan-Sch-Son-Stu_SoSyM24,\n  author    = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n  title     = {A Lightweight Approach to Nontermination Inference Using\n\t  \t{Constrained} {Horn} {Clauses}},\n  journal   = {Software and Systems Modeling},\n  year      = {2024},\n  doi       = {10.1007/s10270-024-01161-5},\n  abstract  = {Nontermination is an unwanted program property for some \n\t  \tsoftware systems, and a safety property for other systems.\n\t\tIn either case, automated discovery of preconditions for \n\t\tnontermination is of interest. We introduce \\textsc{NtHorn},\n\t\ta fast lightweight nontermination analyser, which is able to\n\t\tdeduce non-trivial sufficient conditions for nontermination.\n\t\tUsing Constrained Horn Clauses (CHCs) as a vehicle, we show \n\t\thow established techniques for CHC program transformation \n\t\tand abstract interpretation can be exploited for the purpose\n\t\tof nontermination analysis. \\textsc{NtHorn} is comparable in \n\t\teffectiveness to the state-of-the-art nontermination analysis\n\t\ttools, as measured on standard competition benchmark suites \n\t\t(consisting of integer manipulating programs), while typically\n\t\tsolving problems faster by one order of magnitude.},\n}\n\n
\n
\n\n\n
\n Nontermination is an unwanted program property for some software systems, and a safety property for other systems. In either case, automated discovery of preconditions for nontermination is of interest. We introduce \\textscNtHorn, a fast lightweight nontermination analyser, which is able to deduce non-trivial sufficient conditions for nontermination. Using Constrained Horn Clauses (CHCs) as a vehicle, we show how established techniques for CHC program transformation and abstract interpretation can be exploited for the purpose of nontermination analysis. \\textscNtHorn is comparable in effectiveness to the state-of-the-art nontermination analysis tools, as measured on standard competition benchmark suites (consisting of integer manipulating programs), while typically solving problems faster by one order of magnitude.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n The Genesis of Mix: Early Days of Self-Applicable Partial Evaluation (Invited Contribution).\n \n \n \n\n\n \n Sestoft, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Keller, G.; and Wang, M., editor(s), PEPM 2024: Proceedings of the 2024 ACM SIGPLAN International Workshop on Partial Evaluation and Program Manipulation, pages 1–13, 2024. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ses-Son_PEPM24,\n  author    = {Peter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n  title     = {The Genesis of {Mix}: Early Days of Self-Applicable \n\t\tPartial Evaluation (Invited Contribution)},\n  editor    = {G. Keller and M. Wang},\n  booktitle = {PEPM 2024: Proceedings of the 2024 ACM SIGPLAN International\n        \tWorkshop on Partial Evaluation and Program Manipulation},\n  pages     = {1--13},\n  year      = {2024},\n  doi       = {10.1145/3635800.3637445},\n  abstract  = {Forty years ago development started on Mix, a partial evaluator\n\t\tdesigned specifically for the purpose of self-application.\n\t\tThe effort, led by Neil D. Jones at the University of \n\t\tCopenhagen, eventually demonstrated that non-trivial compilers\n\t\tcould be generated automatically by applying a partial \n\t\tevaluator to itself. The possibility, in theory, of such \n\t\tself-application had been known for more than a decade, but\n\t\tremained unrealized by the start of 1984. We describe the \n\t\tgenesis of Mix, including the research environment, the \n\t\tchallenges, and the main insights that led to success. We \n\t\temphasize the critical role played by program annotation as a\n\t\tpre-processing step, a process that became automated in the \n\t\tform of binding-time analysis.},\n  keywords  = {Partial evaluation, Binding-time analysis, Compilation, History},\n}\n\n
\n
\n\n\n
\n Forty years ago development started on Mix, a partial evaluator designed specifically for the purpose of self-application. The effort, led by Neil D. Jones at the University of Copenhagen, eventually demonstrated that non-trivial compilers could be generated automatically by applying a partial evaluator to itself. The possibility, in theory, of such self-application had been known for more than a decade, but remained unrealized by the start of 1984. We describe the genesis of Mix, including the research environment, the challenges, and the main insights that led to success. We emphasize the critical role played by program annotation as a pre-processing step, a process that became automated in the form of binding-time analysis.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Programming to Learn: Logic and Computation from a Programming Perspective.\n \n \n \n\n\n \n Farrugia-Roberts, M.; Jeffries, B.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 27th Annual Conference on Innovation and Technology in Computer Science Education, pages 311–317, New York, NY, 2022. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Far-Jef-Son_ITiCSE22,\n  author    = {Matthew Farrugia-Roberts and\n\t\tBryn Jeffries and\n\t\tHarald S{\\o}ndergaard},\n  title     = {Programming to Learn:\n\t\tLogic and Computation from a Programming Perspective},\n  booktitle = {Proceedings of the 27th Annual Conference on Innovation and \n\t\tTechnology in Computer Science Education},\n  pages     = {311--317},\n  publisher = {Association for Computing Machinery},\n  address   = {New York, NY},\n  year      = {2022},\n  doi       = {10.1145/3502718.3524814},\n  abstract  = {Programming problems are commonly used as a learning and \n\t\tassessment activity for learning to program. We believe\n\t\tthat programming problems can be effective for broader \n\t\tlearning goals. In our large-enrolment course, we have \n\t\tdesigned special programming problems relevant to logic, \n\t\tdiscrete mathematics, and the theory of computation, and \n\t\twe have used them for formative and summative assessment.\n\t\tIn this report, we reflect on our experience. We aim to \n\t\tleverage our students' programming backgrounds by offering\n\t\ta code-based formalism for our mathematical syllabus.\n\t\tWe find we can translate many traditional questions into \n\t\tprogramming problems of a special kind---calling for \n\t\t`programs' as simple as a single expression, such as a \n\t\tformula or automaton represented in code. A web-based \n\t\tplatform enables self-paced learning with rapid contextual\n\t\tcorrective feedback, and helps us scale summative \n\t\tassessment to the size of our cohort. We identify several \n\t\tbarriers arising with our approach and discuss how we have\n\t\tattempted to negate them. We highlight the potential of \n\t\tprogramming problems as a digital learning activity even \n\t\tbeyond a logic and computation course.},\n  keywords  = {Education, Web-based learning},\n}\n\n
\n
\n\n\n
\n Programming problems are commonly used as a learning and assessment activity for learning to program. We believe that programming problems can be effective for broader learning goals. In our large-enrolment course, we have designed special programming problems relevant to logic, discrete mathematics, and the theory of computation, and we have used them for formative and summative assessment. In this report, we reflect on our experience. We aim to leverage our students' programming backgrounds by offering a code-based formalism for our mathematical syllabus. We find we can translate many traditional questions into programming problems of a special kind—calling for `programs' as simple as a single expression, such as a formula or automaton represented in code. A web-based platform enables self-paced learning with rapid contextual corrective feedback, and helps us scale summative assessment to the size of our cohort. We identify several barriers arising with our approach and discuss how we have attempted to negate them. We highlight the potential of programming problems as a digital learning activity even beyond a logic and computation course.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Teaching Simple Constructive Proofs with Haskell Programs.\n \n \n \n \n\n\n \n Farrugia-Roberts, M.; Jeffries, B.; and Søndergaard, H.\n\n\n \n\n\n\n In Achten, P.; and Machkasova, E., editor(s), Trends in Functional Programming in Education (TFPIE), volume 363, of Electronic Proceedings in Theoretical Computer Science, pages 54–73, 2022. \n \n\n\n\n
\n\n\n\n \n \n \"Teaching paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Far-Jef-Son_TFPIE21,\n  author    = {Matthew Farrugia-Roberts and\n\t\tBryn Jeffries and\n\t\tHarald S{\\o}ndergaard},\n  title     = {Teaching Simple Constructive Proofs with {Haskell} Programs},\n  editor    = {P. Achten and E. Machkasova},\n  booktitle = {Trends in Functional Programming in Education (TFPIE)},\n  series    = {Electronic Proceedings in Theoretical Computer Science},\n  volume    = {363},\n  pages     = {54--73},\n  year      = {2022},\n  url_Paper       = {https://arxiv.org/abs/2208.04699v1},\n  doi       = {10.4204/EPTCS.363.4},\n  abstract  = {In recent years we have explored using Haskell alongside a\n\t\ttraditional mathematical formalism in our large-enrolment \n\t\tuniversity course on topics including logic and formal \n\t\tlanguages, aiming to offer our students a programming \n\t\tperspective on these mathematical topics. We have found it \n\t\tpossible to offer almost all formative and summative \n\t\tassessment through an interactive learning platform, using \n\t\tHaskell as a lingua franca for digital exercises across our \n\t\tbroad syllabus. One of the hardest exercises to convert \n\t\tinto this format are traditional written proofs conveying \n\t\tconstructive arguments. In this paper we reflect on the \n\t\tdigitisation of this kind of exercise. We share many examples\n\t\tof Haskell exercises designed to target similar skills to \n\t\twritten proof exercises across topics in propositional logic \n\t\tand formal languages, discussing various aspects of the design \n\t\tof such exercises. We also catalogue a sample of student \n\t\tresponses to such exercises. This discussion contributes to \n\t\tour broader exploration of programming problems as a flexible\n\t\tdigital medium for learning and assessment.},\n  keywords  = {Education, Web-based learning, Haskell},\n}\n\n
\n
\n\n\n
\n In recent years we have explored using Haskell alongside a traditional mathematical formalism in our large-enrolment university course on topics including logic and formal languages, aiming to offer our students a programming perspective on these mathematical topics. We have found it possible to offer almost all formative and summative assessment through an interactive learning platform, using Haskell as a lingua franca for digital exercises across our broad syllabus. One of the hardest exercises to convert into this format are traditional written proofs conveying constructive arguments. In this paper we reflect on the digitisation of this kind of exercise. We share many examples of Haskell exercises designed to target similar skills to written proof exercises across topics in propositional logic and formal languages, discussing various aspects of the design of such exercises. We also catalogue a sample of student responses to such exercises. This discussion contributes to our broader exploration of programming problems as a flexible digital medium for learning and assessment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n String Abstract Domains and Their Combination.\n \n \n \n\n\n \n Søndergaard, H.\n\n\n \n\n\n\n In De Angelis, E.; and Vanhoof, W., editor(s), Logic-Based Program Synthesis and Transformation, volume 13290, of Lecture Notes in Computer Science, pages 1–15, 2022. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Son_LOPSTR21,\n  author    = {Harald S{\\o}ndergaard},\n  title     = {String Abstract Domains and Their Combination},\n  editor    = {E. {De Angelis} and W. Vanhoof},\n  booktitle = {Logic-Based Program Synthesis and Transformation},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {13290},\n  pages     = {1--15},\n  publisher = {Springer},\n  year      = {2022},\n  doi       = {10.1007/978-3-030-98869-2_1},\n  abstract  = {We survey recent developments in string static analysis, with\n\t\tan emphasis on how string abstract domains can be combined.\n\t\tThe paper has formed the basis for an invited presentation\n\t\tgiven to LOPSTR 2021 and PPDP 2021.},\n  keywords  = {Abstract domains, String analysis},\n}\n\n
\n
\n\n\n
\n We survey recent developments in string static analysis, with an emphasis on how string abstract domains can be combined. The paper has formed the basis for an invited presentation given to LOPSTR 2021 and PPDP 2021.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A Fresh Look at Zones and Octagons.\n \n \n \n\n\n \n Gange, G.; Ma, Z.; Navas, J. A.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n ACM Transactions on Programming Languages and Systems, 43(3): 11:1–11:51. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Gan-Ma-Nav-Sch-Son-Stu_TOPLAS21,\n  author    = {Graeme Gange and\n                Zequn Ma and\n                Jorge A. Navas and\n                Peter Schachte and\n                Harald S{\\o}ndergaard and\n                Peter J. Stuckey},\n  title     = {A Fresh Look at {Zones} and {Octagons}},\n  journal   = {ACM Transactions on Programming Languages and Systems},\n  volume    = {43},\n  number    = {3},\n  pages     = {11:1--11:51},\n  year      = {2021},\n  doi       = {10.1145/3457885},\n  abstract  = {Zones and Octagons are popular abstract domains for \n\t\tstatic program analysis. They enable the automated \n\t\tdiscovery of simple numerical relations that hold \n\t\tbetween pairs of program variables. Both domains \n\t\tare well understood mathematically but the detailed \n\t\timplementation of static analyses based on these \n\t\tdomains poses many interesting algorithmic challenges. \n\t\tIn this paper we study the two abstract domains, their \n\t\timplementation and use. Utilizing improved data \n\t\tstructures and algorithms for the manipulation of \n\t\tgraphs that represent difference-bound constraints, \n\t\twe present fast implementations of both abstract domains, \n\t\tbuilt around a common infrastructure. We compare the \n\t\tperformance of these implementations against alternative \n\t\tapproaches offering the same analysis precision. We \n\t\tquantify the differences in performance by measuring \n\t\ttheir speed and precision on standard benchmarks. We \n\t\talso assess, in the context of software verification, \n\t\tthe extent to which the improved precision translates \n\t\tto better verification outcomes. Experiments demonstrate \n\t\tthat our new implementations improve the state-of-the-art \n\t\tfor both Zones and Octagons significantly.},\n  keywords  = {Abstract domains, Abstract interpretation, Static analysis},\n}\n\n
\n
\n\n\n
\n Zones and Octagons are popular abstract domains for static program analysis. They enable the automated discovery of simple numerical relations that hold between pairs of program variables. Both domains are well understood mathematically but the detailed implementation of static analyses based on these domains poses many interesting algorithmic challenges. In this paper we study the two abstract domains, their implementation and use. Utilizing improved data structures and algorithms for the manipulation of graphs that represent difference-bound constraints, we present fast implementations of both abstract domains, built around a common infrastructure. We compare the performance of these implementations against alternative approaches offering the same analysis precision. We quantify the differences in performance by measuring their speed and precision on standard benchmarks. We also assess, in the context of software verification, the extent to which the improved precision translates to better verification outcomes. Experiments demonstrate that our new implementations improve the state-of-the-art for both Zones and Octagons significantly.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Disjunctive Interval Analysis.\n \n \n \n\n\n \n Gange, G.; Navas, J. A.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Drăgoi, C.; Mukherjee, S.; and Namjoshi, K., editor(s), Static Analysis, volume 12913, of Lecture Notes in Computer Science, pages 144–165, 2021. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gan-Nav-Sch-Son-Stu_SAS21,\n  author    = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Disjunctive Interval Analysis},\n  editor    = {C. Dr{\\u{a}}goi and S. Mukherjee and K. Namjoshi},\n  booktitle = {Static Analysis},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {12913},\n  pages     = {144--165},\n  publisher = {Springer},\n  year      = {2021},\n  doi       = {10.1007/978-3-030-88806-0_7},\n  abstract  = {We revisit disjunctive interval analysis based on the \n\t\tBoxes abstract domain. We propose the use of what we call \n\t\trange decision diagrams (RDDs) to implement Boxes, and we \n\t\tprovide algorithms for the necessary RDD operations.  \n\t\tRDDs tend to be more compact than the linear decision\n\t\tdiagrams (LDDs) that have traditionally been used for Boxes.\n\t\tRepresenting information more directly, RDDs also allow for \n\t\tthe implementation of more accurate abstract operations.\n\t\tThis comes at no cost in terms of analysis efficiency,\n\t\twhether LDDs utilise dynamic variable ordering or not.\n\t\tRDD and LDD implementations are available in the Crab analyzer,\n\t\tand our experiments confirm that RDDs are well suited for\n\t\tdisjunctive interval analysis.},\n  keywords  = {Abstract domains, Abstract interpretation, Static analysis, Interval analysis},\n}\n\n
\n
\n\n\n
\n We revisit disjunctive interval analysis based on the Boxes abstract domain. We propose the use of what we call range decision diagrams (RDDs) to implement Boxes, and we provide algorithms for the necessary RDD operations. RDDs tend to be more compact than the linear decision diagrams (LDDs) that have traditionally been used for Boxes. Representing information more directly, RDDs also allow for the implementation of more accurate abstract operations. This comes at no cost in terms of analysis efficiency, whether LDDs utilise dynamic variable ordering or not. RDD and LDD implementations are available in the Crab analyzer, and our experiments confirm that RDDs are well suited for disjunctive interval analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Lightweight Non-Termination Inference with CHCs.\n \n \n \n\n\n \n Kafle, B.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Calinescu, R.; and Păsăreanu, C., editor(s), Software Engineering and Formal Methods, volume 13085, of Lecture Notes in Computer Science, pages 383–402, 2021. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Kaf-Gan-Sch-Son-Stu_SEFM21,\n  author    = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n  title     = {Lightweight Non-Termination Inference with {CHCs}},\n  editor    = {R. Calinescu and C. P{\\u{a}}s{\\u{a}}reanu},\n  booktitle = {Software Engineering and Formal Methods},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {13085},\n  pages     = {383--402},\n  year      = {2021},\n  doi       = {10.1007/978-3-030-92124-8_22},\n  abstract  = {Non-termination is an unwanted program property (considered a \n\t\tbug) for some software systems, and a safety property for \n\t\tother systems. In either case, automated discovery of \n\t\tpreconditions for non-termination is of interest. We introduce\n\t\t\\textsc{NtHorn}, a fast lightweight non-termination analyser,\n\t\table to deduce non-trivial sufficient conditions for \n\t\tnon-termination. Using Constrained Horn Clauses (CHCs) as a \n\t\tvehicle, we show how established techniques for CHC program \n\t\ttransformation and abstract interpretation can be exploited \n\t\tfor the purpose of non-termination analysis. \\textsc{NtHorn} \n\t\tis comparable in power to the state-of-the-art\n\t\tnon-termination analysis tools, as measured on standard \n\t\tcompetition benchmark suites (consisting of integer \n\t\tmanipulating programs), while typically solving problems \n\t\tan order of magnitude faster.},\n  keywords  = {Program verification, Program transformation, Partial evaluation, Nontermination analysis, Horn clauses},\n}\n\n
\n
\n\n\n
\n Non-termination is an unwanted program property (considered a bug) for some software systems, and a safety property for other systems. In either case, automated discovery of preconditions for non-termination is of interest. We introduce \\textscNtHorn, a fast lightweight non-termination analyser, able to deduce non-trivial sufficient conditions for non-termination. Using Constrained Horn Clauses (CHCs) as a vehicle, we show how established techniques for CHC program transformation and abstract interpretation can be exploited for the purpose of non-termination analysis. \\textscNtHorn is comparable in power to the state-of-the-art non-termination analysis tools, as measured on standard competition benchmark suites (consisting of integer manipulating programs), while typically solving problems an order of magnitude faster.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Transformation-Enabled Precondition Inference.\n \n \n \n\n\n \n Kafle, B.; Gange, G.; Stuckey, P. J.; Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n Theory and Practice of Logic Programming, 21: 700–716. 2021.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{Kaf-Gan-Stu-Sch-Son_TPLP21,\n  author    = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter J. Stuckey and\n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Transformation-Enabled Precondition Inference},\n  journal   = {Theory and Practice of Logic Programming},\n  volume    = {21},\n  pages     = {700--716},\n  year      = {2021},\n  doi       = {10.1017/S1471068421000272},\n  abstract  = {Precondition inference is a non-trivial problem with \n\t\timportant applications in program analysis and verification. \n\t\tWe present a novel iterative method for automatically deriving \n\t\tpreconditions for the safety and unsafety of programs. \n\t\tEach iteration maintains over-approximations of the set of \n\t\tsafe and unsafe initial states; which are used to partition \n\t\tthe program's initial states into those known to be safe, \n\t\tknown to be unsafe and unknown. We then construct revised \n\t\tprograms with those unknown initial states and iterate the \n\t\tprocedure until the approximations are disjoint or some \n\t\ttermination criteria are met. An experimental evaluation of \n\t\tthe method on a set of software verification benchmarks shows \n\t\tthat it can infer precise preconditions (sometimes optimal) \n\t\tthat are not possible using previous methods.},\n  keywords  = {Program verification, Program transformation},\n}\n\n
\n
\n\n\n
\n Precondition inference is a non-trivial problem with important applications in program analysis and verification. We present a novel iterative method for automatically deriving preconditions for the safety and unsafety of programs. Each iteration maintains over-approximations of the set of safe and unsafe initial states; which are used to partition the program's initial states into those known to be safe, known to be unsafe and unknown. We then construct revised programs with those unknown initial states and iterate the procedure until the approximations are disjoint or some termination criteria are met. An experimental evaluation of the method on a set of software verification benchmarks shows that it can infer precise preconditions (sometimes optimal) that are not possible using previous methods.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Abstract Interpretation, Symbolic Execution and Constraints.\n \n \n \n \n\n\n \n Amadini, R.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In de Boer , F. S.; and Mauro, J., editor(s), Recent Developments in the Design and Implementation of Programming Languages, volume 86, of OpenAccess Series in Informatics, pages 7:1–7:19, 2020. Schloss Dagstuhl-Leibniz-Zentrum für Informatik\n \n\n\n\n
\n\n\n\n \n \n \"Abstract paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ama-Gan-Sch-Son-Stu_DIPL20,\n  author    = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey}, \n  title     = {Abstract Interpretation, Symbolic Execution and Constraints},\n  editor    = {F. S. {de Boer} and J. Mauro},\n  booktitle = {Recent Developments in the Design and Implementation of \n\t\tProgramming Languages},\n  series    = {OpenAccess Series in Informatics},\n  volume    = {86}, \n  pages     = {7:1--7:19},\n  publisher = {Schloss Dagstuhl-Leibniz-Zentrum f{\\"u}r Informatik},\n  year      = {2020},\n  doi       = {10.4230/OASIcs.Gabbrielli.2020.7},\n  url_Paper = {https://drops.dagstuhl.de/opus/volltexte/2020/13229/pdf/OASIcs-Gabbrielli-7.pdf},\n  abstract  = {Abstract interpretation is a static analysis framework for \n\t\tsound over-approximation of all possible runtime states of \n\t\ta program. Symbolic execution is a framework for reachability\n\t\tanalysis which tries to explore all possible execution paths \n\t\tof a program. A shared feature between abstract interpretation\n\t\tand symbolic execution is that each---implicitly or \n\t\texplicitly---maintains constraints during execution, \n\t\tin the form of invariants or path conditions.\n\t\tWe investigate the relations between the worlds of abstract \n\t\tinterpretation, symbolic execution and constraint solving, \n\t\tto expose potential synergies.},\n  keywords  = {Constraint programming, Symbolic execution, Abstract interpretation},\n}\n\n
\n
\n\n\n
\n Abstract interpretation is a static analysis framework for sound over-approximation of all possible runtime states of a program. Symbolic execution is a framework for reachability analysis which tries to explore all possible execution paths of a program. A shared feature between abstract interpretation and symbolic execution is that each—implicitly or explicitly—maintains constraints during execution, in the form of invariants or path conditions. We investigate the relations between the worlds of abstract interpretation, symbolic execution and constraint solving, to expose potential synergies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n String Constraint Solving: Past, Present and Future.\n \n \n \n \n\n\n \n Amadini, R.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In De Giacomo, G.; and others, editor(s), Proceedings of the 24th European Conference on Artificial Intelligence, pages 2875–2876, 2020. IOS Press\n \n\n\n\n
\n\n\n\n \n \n \"String paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Ama-Gan-Sch-Son-Stu_ECAI20,\n  author    = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey}, \n  title     = {String Constraint Solving: Past, Present and Future},\n  editor    = {G. {De Giacomo} and others},\n  booktitle = {Proceedings of the 24th European Conference on \n\t\tArtificial Intelligence},\n  pages     = {2875--2876},\n  publisher = {IOS Press},\n  year      = {2020},\n  doi       = {10.3233/FAIA200431},\n  url_Paper = {https://ebooks.iospress.nl/pdf/doi/10.3233/FAIA200431},\n  abstract  = {String constraint solving is an important emerging field, \n\t\tgiven the ubiquity of strings over different fields such as\n\t\tformal analysis, automated testing, database query processing,\n\t\tand cybersecurity. This paper highlights the current \n\t\tstate-of-the-art for string constraint solving, and \n\t\tidentifies future challenges in this field.},\n  keywords  = {String solvers},\n}\n\n
\n
\n\n\n
\n String constraint solving is an important emerging field, given the ubiquity of strings over different fields such as formal analysis, automated testing, database query processing, and cybersecurity. This paper highlights the current state-of-the-art for string constraint solving, and identifies future challenges in this field.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Algorithm Selection for Dynamic Symbolic Execution: A Preliminary Study.\n \n \n \n\n\n \n Amadini, R.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Fernández, M., editor(s), Logic-Based Program Synthesis and Transformation, volume 12561, of Lecture Notes in Computer Science, pages 192–209, 2020. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Ama-Gan-Sch-Son-Stu_LOPSTR20,\n  author    = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey}, \n  title     = {Algorithm Selection for Dynamic Symbolic Execution: \n\t\tA Preliminary Study},\n  editor    = {M. Fern{\\'a}ndez},\n  booktitle = {Logic-Based Program Synthesis and Transformation},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {12561}, \n  pages     = {192--209},\n  year      = {2020},\n  doi       = {10.1007/978-3-030-68446-4_10},\n  abstract  = {Given a portfolio of algorithms, the goal of Algorithm Selection\n\t\t(\\textsf{AS}) is to select the best algorithm(s) for a new, \n\t\tunseen problem instance. Dynamic Symbolic Execution \n\t\t(\\textsf{DSE}) brings together concrete and symbolic execution \n\t\tto maximise the program coverage. \\textsf{DSE} uses a \n\t\tconstraint solver to solve the path conditions and generate \n\t\tnew inputs to explore. In this paper we join these lines of \n\t\tresearch by introducing a model that combines \\textsf{DSE} and\n\t\t\\textsf{AS} approaches. The proposed \\textsf{AS/DSE} model is \n\t\ta generic and flexible framework enabling the \\dse engine to \n\t\tsolve the path conditions it collects with a portfolio of \n\t\tdifferent solvers, by exploiting and extending the well-known\n\t\t\\textsf{AS} techniques that have been developed over the last \n\t\tdecade. In this way, one can increase the coverage and \n\t\tsometimes even outperform the aggregate coverage achievable \n\t\tby running simultaneously all the solvers of the portfolio.},\n  keywords  = {Symbolic execution},\n}\n\n
\n
\n\n\n
\n Given a portfolio of algorithms, the goal of Algorithm Selection (\\textsfAS) is to select the best algorithm(s) for a new, unseen problem instance. Dynamic Symbolic Execution (\\textsfDSE) brings together concrete and symbolic execution to maximise the program coverage. \\textsfDSE uses a constraint solver to solve the path conditions and generate new inputs to explore. In this paper we join these lines of research by introducing a model that combines \\textsfDSE and \\textsfAS approaches. The proposed \\textsfAS/DSE model is a generic and flexible framework enabling the \\dse engine to solve the path conditions it collects with a portfolio of different solvers, by exploiting and extending the well-known \\textsfAS techniques that have been developed over the last decade. In this way, one can increase the coverage and sometimes even outperform the aggregate coverage achievable by running simultaneously all the solvers of the portfolio.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Constraint Programming for Dynamic Symbolic Execution of JavaScript.\n \n \n \n \n\n\n \n Amadini, R.; Andrlon, M.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Rousseau, L.; and Stergiou, K., editor(s), Integration of Constraint Programming, Artificial Intelligence, and Operations Research: Proceedings of the 16th International Conference (CPAIOR 2019), volume 11494, of Lecture Notes in Computer Science, pages 1–19, 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Constraint paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ama-And-Gan-Sch-Son-Stu_CPAIOR19,\n  author    = {Roberto Amadini and \n\t\tMak Andrlon and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Constraint Programming for Dynamic Symbolic Execution of\n                {JavaScript}},\n  editor    = {L.-M. Rousseau and K. Stergiou},\n  booktitle = {Integration of Constraint Programming, Artificial Intelligence,\n\t\tand Operations Research: Proceedings of the 16th International\n\t\tConference (CPAIOR 2019)},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {11494}, \n  pages     = {1--19},\n  year      = {2019},\n  doi       = {10.1007/978-3-030-19212-9_1},\n  url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/b999c802-90f6-5ce4-8b79-c298bfc62afa/retrieve},\n  abstract  = {Dynamic Symbolic Execution (DSE) combines concrete and symbolic \n\t\texecution, usually for the purpose of generating good test \n\t\tsuites automatically.  It relies on constraint solvers to solve\n\t\tpath conditions and to generate new inputs to explore. DSE \n\t\ttools usually make use of SMT solvers for constraint solving.\n\t\tIn this paper, we show that constraint programming (CP) is a\n\t\tpowerful alternative or complementary technique for DSE.\n\t\tSpecifically, we apply CP techniques for DSE of JavaScript, the\n\t\tde facto standard for web programming. We capture the JavaScript\n\t\tsemantics with MiniZinc and integrate this approach into a tool\n\t\twe call \\textsc{Aratha}. We use \\textsc{G-Strings}, a CP solver\n\t\tequipped with string variables, for solving path conditions, \n\t\tand we compare the performance of this approach against \n\t\tstate-of-the-art SMT solvers. Experimental results, in terms of\n\t\tboth speed and coverage, show the benefits of our approach,\n\t\tthus opening new research vistas for using CP techniques in \n\t\tthe service of program analysis.},\n  keywords  = {Symbolic execution, Constraint programming, String solvers, JavaScript},\n}\n\n
\n
\n\n\n
\n Dynamic Symbolic Execution (DSE) combines concrete and symbolic execution, usually for the purpose of generating good test suites automatically. It relies on constraint solvers to solve path conditions and to generate new inputs to explore. DSE tools usually make use of SMT solvers for constraint solving. In this paper, we show that constraint programming (CP) is a powerful alternative or complementary technique for DSE. Specifically, we apply CP techniques for DSE of JavaScript, the de facto standard for web programming. We capture the JavaScript semantics with MiniZinc and integrate this approach into a tool we call \\textscAratha. We use \\textscG-Strings, a CP solver equipped with string variables, for solving path conditions, and we compare the performance of this approach against state-of-the-art SMT solvers. Experimental results, in terms of both speed and coverage, show the benefits of our approach, thus opening new research vistas for using CP techniques in the service of program analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Optimal Bounds for Floating-Point Addition in Constant Time.\n \n \n \n\n\n \n Andrlon, M.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Takagi, N.; Boldo, S.; and Langhammer, M., editor(s), Proceedings of the 26th IEEE Symposium on Computer Arithmetic (ARITH 2019), pages 159–166, 2019. IEEE Conf. Publ.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{And-Sch-Son-Stu_ARITH19,\n  author    = {Mak Andrlon and\n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n  title     = {Optimal Bounds for Floating-Point Addition in Constant Time},\n  editor    = {N. Takagi and S. Boldo and M. Langhammer},\n  booktitle = {Proceedings of the 26th IEEE Symposium on Computer Arithmetic \n\t\t(ARITH 2019)},\n  pages     = {159--166},\n  publisher = {IEEE Conf. Publ.},\n  year      = {2019},\n  doi       = {10.1109/arith.2019.00038},\n  abstract  = {Reasoning about floating-point numbers is notoriously difficult,\n\t\towing to the lack of convenient algebraic properties such as \n\t\tassociativity. This poses a substantial challenge for program \n\t\tanalysis and verification tools which rely on precise \n\t\tfloating-point constraint solving. Currently, interval methods \n\t\tin this domain often exhibit slow convergence even on simple \n\t\texamples. We present a new theorem supporting efficient \n\t\tcomputation of exact bounds of the intersection of a rectangle \n\t\twith the preimage of an interval under floating-point addition,\n\t\tin any radix or rounding mode. We thus give an efficient method\n\t\tof deducing optimal bounds on the components of an addition, \n\t\tsolving the convergence problem.},\n  keywords  = {Constraint solving, Machine arithmetic},\n}\n\n
\n
\n\n\n
\n Reasoning about floating-point numbers is notoriously difficult, owing to the lack of convenient algebraic properties such as associativity. This poses a substantial challenge for program analysis and verification tools which rely on precise floating-point constraint solving. Currently, interval methods in this domain often exhibit slow convergence even on simple examples. We present a new theorem supporting efficient computation of exact bounds of the intersection of a rectangle with the preimage of an interval under floating-point addition, in any radix or rounding mode. We thus give an efficient method of deducing optimal bounds on the components of an addition, solving the convergence problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Dissecting Widening: Separating Termination from Information.\n \n \n \n\n\n \n Gange, G.; Navas, J. A.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Lin, A. W., editor(s), Proceedings of the 17th Asian Symposium on Programming Languages and Systems, volume 11893, of Lecture Notes in Computer Science, pages 95–114, 2019. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gan-Nav-Sch-Son-Stu_APLAS19,\n  author    = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Dissecting Widening: Separating Termination from Information},\n  editor    = {A. W. Lin},\n  booktitle = {Proceedings of the 17th Asian Symposium on Programming \n\t\tLanguages and Systems},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {11893},\n  pages     = {95--114},\n  publisher = {Springer},\n  year      = {2019},\n  doi       = {10.1007/978-3-030-34175-6_6},\n  abstract  = {Widening ensures or accelerates convergence of a program \n\t\tanalysis, and sometimes contributes a guarantee of soundness \n\t\tthat would otherwise be absent. In this paper we propose a \n\t\tgeneralised view of widening, in which widening operates on \n\t\tvalues that are not necessarily elements of the given abstract\n\t\tdomain, although they must be in a correspondence, the details\n\t\tof which we spell out. We show that the new view generalizes \n\t\tthe traditional view, and that at least three distinct \n\t\tadvantages flow from the generalization. First, it gives a \n\t\thandle on ``compositional safety'', the problem of creating \n\t\twidening operators for product domains. Second, it adds a \n\t\tdegree of flexibility, allowing us to define variants of \n\t\twidening, such as delayed widening, without resorting to \n\t\tintrusive surgery on an underlying fixpoint engine.  Third, \n\t\tit adds a degree of robustness, by making it difficult for\n\t\tan analysis implementor to make certain subtle (syntactic vs \n\t\tsemantic) category mistakes. The paper supports these claims \n\t\twith examples. Our proposal has been implemented in a \n\t\tstate-of-the-art abstract interpreter, and we briefly report \n\t\ton the changes that the revised view necessitated.},\n  keywords  = {Fixed points, Abstract domains, Abstract interpretation, Widening},\n}\n\n
\n
\n\n\n
\n Widening ensures or accelerates convergence of a program analysis, and sometimes contributes a guarantee of soundness that would otherwise be absent. In this paper we propose a generalised view of widening, in which widening operates on values that are not necessarily elements of the given abstract domain, although they must be in a correspondence, the details of which we spell out. We show that the new view generalizes the traditional view, and that at least three distinct advantages flow from the generalization. First, it gives a handle on ``compositional safety'', the problem of creating widening operators for product domains. Second, it adds a degree of flexibility, allowing us to define variants of widening, such as delayed widening, without resorting to intrusive surgery on an underlying fixpoint engine. Third, it adds a degree of robustness, by making it difficult for an analysis implementor to make certain subtle (syntactic vs semantic) category mistakes. The paper supports these claims with examples. Our proposal has been implemented in a state-of-the-art abstract interpreter, and we briefly report on the changes that the revised view necessitated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Wombit: A Portfolio Bit-Vector Solver Using Word-Level Propagation.\n \n \n \n\n\n \n Wang, W.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Journal of Automated Reasoning, 63(3): 723–762. 2019.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Wan-Son-Stu_JAR19,\n  author    = {Wenxi Wang and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Wombit: A Portfolio Bit-Vector Solver Using Word-Level \n\t\tPropagation},\n  journal   = {Journal of Automated Reasoning},\n  volume    = {63},\n  number    = {3},\n  pages     = {723--762},\n  year      = {2019},\n  doi       = {10.1007/s10817-018-9493-1},\n  abstract  = {We develop an idea originally proposed by Michel and Van \n\t\tHentenryck of how to perform bit-vector constraint propagation \n\t\ton the word level. Most operations are propagated in constant \n\t\ttime, assuming the bit-vector fits in a machine word.\n\t\tIn contrast, bit-vector SMT solvers usually solve bit-vector \n\t\tproblems by (ultimately) bit-blasting, that is, mapping the \n\t\tresulting operations to conjunctive normal form clauses, and \n\t\tusing SAT technology to solve them. Bit-blasting generates \n\t\tintermediate variables which can be an advantage, as these can \n\t\tbe searched on and learnt about. As each approach has \n\t\tadvantages, it makes sense to try to combine them. In this \n\t\tpaper, we describe an approach to bit-vector solving using\n\t\tword-level propagation with learning. We have designed \n\t\talternative word-level propagators to Michel and Van \n\t\tHentenryck's, and evaluated different variants of the approach.\n\t\tWe have also experimented with different approaches to learning\n\t\tand back-jumping in the solver. Based on the insights gained, \n\t\twe have built a portfolio solver, Wombit, which essentially \n\t\textends the STP bit-vector solver. Using machine learning \n\t\ttechniques, the solver makes a judicious up-front decision \n\t\tabout whether to use word-level propagation or fall back on \n\t\tbit-blasting.},\n  keywords  = {SMT solving, Constraint propagation, Program verification},\n}\n\n
\n
\n\n\n
\n We develop an idea originally proposed by Michel and Van Hentenryck of how to perform bit-vector constraint propagation on the word level. Most operations are propagated in constant time, assuming the bit-vector fits in a machine word. In contrast, bit-vector SMT solvers usually solve bit-vector problems by (ultimately) bit-blasting, that is, mapping the resulting operations to conjunctive normal form clauses, and using SAT technology to solve them. Bit-blasting generates intermediate variables which can be an advantage, as these can be searched on and learnt about. As each approach has advantages, it makes sense to try to combine them. In this paper, we describe an approach to bit-vector solving using word-level propagation with learning. We have designed alternative word-level propagators to Michel and Van Hentenryck's, and evaluated different variants of the approach. We have also experimented with different approaches to learning and back-jumping in the solver. Based on the insights gained, we have built a portfolio solver, Wombit, which essentially extends the STP bit-vector solver. Using machine learning techniques, the solver makes a judicious up-front decision about whether to use word-level propagation or fall back on bit-blasting.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Symbolic Execution with Invariant Inlay: Evaluating the Potential.\n \n \n \n \n\n\n \n Alatawi, E.; Miller, T.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 25th Australasian Software Engineering Conference (ASWEC 2018), pages 26–30, 2018. IEEE Computing Society\n \n\n\n\n
\n\n\n\n \n \n \"Symbolic paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ala-Mil-Son_ASWEC18,\n  author    = {Eman Alatawi and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Symbolic Execution with Invariant Inlay:\n                Evaluating the Potential},\n  booktitle = {Proceedings of the 25th Australasian Software Engineering\n                Conference (ASWEC 2018)},\n  pages     = {26--30},\n  publisher = {IEEE Computing Society},\n  year      = {2018},\n  doi       = {10.1109/ASWEC.2018.00012},\n  url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/38cf593c-2b7f-5e30-aa42-13b74180ed5d/retrieve},\n  abstract  = {Dynamic symbolic execution (DSE) is a non-standard execution \n\t\tmechanism which, loosely, executes a program symbolically and, \n\t\tsimultaneously, on concrete input. DSE is attractive because of\n\t\tseveral uses in software engineering, including the generation\n\t\tof test data suites with large coverage relative to test suite\n\t\tsize. However, DSE struggles in the face of execution path \n\t\texplosion, and is often unable to cover certain kinds of \n\t\tdifficult-to-reach program points. Invariant inlay is a \n\t\ttechnique that aims to improve a DSE tool by interspersing code\n\t\twith invariants, generated automatically using off-the-shelf \n\t\ttools for static program analysis. To capitalise fully on a \n\t\tstatic analyzer, invariant inlay first applies certain \n\t\ttestability transformations to the program source. In this \n\t\tpaper we account for how we have evaluated the idea \n\t\texperimentally, in order to determine its usefulness for\n\t\tprograms with complex control flow.},\n  keywords  = {Symbolic execution, Static analysis},\n}\n\n
\n
\n\n\n
\n Dynamic symbolic execution (DSE) is a non-standard execution mechanism which, loosely, executes a program symbolically and, simultaneously, on concrete input. DSE is attractive because of several uses in software engineering, including the generation of test data suites with large coverage relative to test suite size. However, DSE struggles in the face of execution path explosion, and is often unable to cover certain kinds of difficult-to-reach program points. Invariant inlay is a technique that aims to improve a DSE tool by interspersing code with invariants, generated automatically using off-the-shelf tools for static program analysis. To capitalise fully on a static analyzer, invariant inlay first applies certain testability transformations to the program source. In this paper we account for how we have evaluated the idea experimentally, in order to determine its usefulness for programs with complex control flow.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reference Abstract Domains and Applications to String Analysis.\n \n \n \n \n\n\n \n Amadini, R.; Gange, G.; Gauthier, F.; Jordan, A.; Schachte, P.; Søndergaard, H.; Stuckey, P. J.; and Zhang, C.\n\n\n \n\n\n\n Fundamenta Informaticae, 158(4): 297–326. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Reference paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Article{Ama-Gan-Gau-Jor-Sch-Son-Stu-Zha_FI18,\n  author    = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tFran{\\c{c}}ois Gauthier and\n\t\tAlexander Jordan and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and\n\t\tChenyi Zhang},\n  title     = {Reference Abstract Domains and Applications to String Analysis},\n  journal   = {Fundamenta Informaticae},\n  volume    = {158},\n  number    = {4},\n  pages     = {297--326},\n  year      = {2018},\n  doi       = {10.3233/FI-2018-1650},\n  url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/c17b2e2c-f7ea-57c4-8d54-926629bfa3f7/retrieve},\n  abstract  = {Abstract interpretation is a well established theory that \n\t\tsupports reasoning about the run-time behaviour of programs.\n\t\tIt achieves tractable reasoning by considering abstractions of\n\t\trun-time states, rather than the states themselves. The chosen\n\t\tset of abstractions is referred to as the abstract domain.\n\t\tWe develop a novel framework for combining (a possibly large \n\t\tnumber of) abstract domains. It achieves the effect of the \n\t\tso-called reduced product without requiring a quadratic number\n\t\tof functions to translate information among abstract domains. A\n\t\tcentral notion is a reference domain, a medium for information\n\t\texchange. Our approach suggests a novel and simpler way to \n\t\tmanage the integration of large numbers of abstract domains.\n\t\tWe instantiate our framework in the context of string analysis.\n\t\tBrowser-embedded dynamic programming languages such as \n\t\tJavaScript and PHP encourage the use of strings as a universal\n\t\tdata type for both code and data values. The ensuing \n\t\tvulnerabilities have made string analysis a focus of much\n\t\trecent research. String analysis tends to combine many \n\t\telementary string abstract domains, each designed to capture \n\t\ta specific aspect of strings. For this instance the set of \n\t\tregular languages, while too expensive to use directly for \n\t\tanalysis, provides an attractive reference domain, enabling \n\t\tthe efficient simulation of reduced products of multiple\n\t\tstring abstract domains.},\n  keywords  = {String analysis, Abstract interpretation},\n}\n\n
\n
\n\n\n
\n Abstract interpretation is a well established theory that supports reasoning about the run-time behaviour of programs. It achieves tractable reasoning by considering abstractions of run-time states, rather than the states themselves. The chosen set of abstractions is referred to as the abstract domain. We develop a novel framework for combining (a possibly large number of) abstract domains. It achieves the effect of the so-called reduced product without requiring a quadratic number of functions to translate information among abstract domains. A central notion is a reference domain, a medium for information exchange. Our approach suggests a novel and simpler way to manage the integration of large numbers of abstract domains. We instantiate our framework in the context of string analysis. Browser-embedded dynamic programming languages such as JavaScript and PHP encourage the use of strings as a universal data type for both code and data values. The ensuing vulnerabilities have made string analysis a focus of much recent research. String analysis tends to combine many elementary string abstract domains, each designed to capture a specific aspect of strings. For this instance the set of regular languages, while too expensive to use directly for analysis, provides an attractive reference domain, enabling the efficient simulation of reduced products of multiple string abstract domains.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Iterative Approach to Precondition Inference Using Constrained Horn Clauses.\n \n \n \n\n\n \n Kafle, B.; Gallagher, J.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Theory and Practice of Logic Programming, 18: 553–570. 2018.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Kaf-Gal-Gan-Sch-Son-Stu_TPLP18,\n  author    = {Bishoksan Kafle and \n\t\tJohn Gallagher and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {An Iterative Approach to Precondition Inference Using\n                Constrained {Horn} Clauses},\n  journal   = {Theory and Practice of Logic Programming},\n  volume    = {18},\n  pages     = {553--570},\n  year      = {2018},\n  doi       = {10.1017/S1471068418000091},\n  abstract  = {We present a method for automatic inference of conditions on \n\t\tthe initial states of a program that guarantee that the safety \n\t\tassertions in the program are not violated. Constrained Horn \n\t\tclauses (CHCs) are used to model the program and assertions in \n\t\ta uniform way, and we use standard abstract interpretations to \n\t\tderive an over-approximation of the set of unsafe initial states.\n\t\tThe precondition then is the constraint corresponding to the \n\t\tcomplement of that set, under-approximating the set of safe \n\t\tinitial states. This idea of complementation is not new, but \n\t\tprevious attempts to exploit it have suffered from loss of \n\t\tprecision. Here we develop an iterative specialisation algorithm\n\t\tto give more precise, and in some cases optimal safety \n\t\tconditions. The algorithm combines existing transformations, \n\t\tnamely constraint specialisation, partial evaluation and a \n\t\ttrace elimination transformation. The last two of these \n\t\ttransformations perform polyvariant specialisation, leading \n\t\tto disjunctive constraints which improve precision. The \n\t\talgorithm is implemented and tested on a benchmark suite of \n\t\tprograms from the literature in precondition inference and \n\t\tsoftware verification competitions.},\n  keywords  = {Program verification, Horn clauses, Program transformation, Partial evaluation},\n}\n\n
\n
\n\n\n
\n We present a method for automatic inference of conditions on the initial states of a program that guarantee that the safety assertions in the program are not violated. Constrained Horn clauses (CHCs) are used to model the program and assertions in a uniform way, and we use standard abstract interpretations to derive an over-approximation of the set of unsafe initial states. The precondition then is the constraint corresponding to the complement of that set, under-approximating the set of safe initial states. This idea of complementation is not new, but previous attempts to exploit it have suffered from loss of precision. Here we develop an iterative specialisation algorithm to give more precise, and in some cases optimal safety conditions. The algorithm combines existing transformations, namely constraint specialisation, partial evaluation and a trace elimination transformation. The last two of these transformations perform polyvariant specialisation, leading to disjunctive constraints which improve precision. The algorithm is implemented and tested on a benchmark suite of programs from the literature in precondition inference and software verification competitions.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Leveraging Abstract Interpretation for Efficient Dynamic Symbolic Execution.\n \n \n \n \n\n\n \n Alatawi, E.; Søndergaard, H.; and Miller, T.\n\n\n \n\n\n\n In Rosu, G.; Di Penta, M.; and Nguyen, T. N., editor(s), Proceedings of the 32nd ACM/IEEE International Conference on Automated Software Engineering, pages 619–624, 2017. IEEE Computing Society\n \n\n\n\n
\n\n\n\n \n \n \"Leveraging paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ala-Son-Mil_ASE17,\n  author    = {Eman Alatawi and \n\t\tHarald S{\\o}ndergaard and\n\t\tTim Miller},\n  title     = {Leveraging Abstract Interpretation for Efficient Dynamic \n\t\tSymbolic Execution},\n  editor    = {G. Rosu and  M. {Di Penta} and T. N. Nguyen},\n  booktitle = {Proceedings of the 32nd ACM/IEEE International Conference on \n\t\tAutomated Software Engineering},\n  pages     = {619--624},\n  publisher = {IEEE Computing Society},\n  year      = {2017},\n  doi       = {10.1109/ASE.2017.8115672},\n  url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/7d0c6934-364c-5596-b09d-d0e55f344c13/retrieve},\n  abstract  = {Dynamic Symbolic Execution (DSE) is a technique to automatically\n\t\tgenerate test inputs by executing the program simultaneously \n\t\twith concrete and symbolic values. A key challenge in DSE is \n\t\tscalability, as executing all feasible program paths is not \n\t\tpossible, owing to the possibly exponential or infinite number \n\t\tof program paths. Loops, in particular those where the number \n\t\tof iterations depends on an input of the program, are a source \n\t\tof path explosion. They cause problems because DSE maintains \n\t\tsymbolic values that capture only the data dependencies on\n\t\tsymbolic inputs. This ignores control dependencies, including \n\t\tloop dependencies that depend indirectly on the inputs. We \n\t\tpropose a method to increase the coverage achieved by DSE in \n\t\tthe presence of input-data dependent loops and loop dependent \n\t\tbranches. We combine DSE with abstract interpretation to find \n\t\tindirect control dependencies, including loop and branch \n\t\tindirect dependencies. Preliminary results show that this \n\t\tresults in better coverage, within considerably less time \n\t\tcompared to standard DSE.},\n  keywords  = {Symbolic execution, Static analysis, Abstract interpretation},\n}\n\n
\n
\n\n\n
\n Dynamic Symbolic Execution (DSE) is a technique to automatically generate test inputs by executing the program simultaneously with concrete and symbolic values. A key challenge in DSE is scalability, as executing all feasible program paths is not possible, owing to the possibly exponential or infinite number of program paths. Loops, in particular those where the number of iterations depends on an input of the program, are a source of path explosion. They cause problems because DSE maintains symbolic values that capture only the data dependencies on symbolic inputs. This ignores control dependencies, including loop dependencies that depend indirectly on the inputs. We propose a method to increase the coverage achieved by DSE in the presence of input-data dependent loops and loop dependent branches. We combine DSE with abstract interpretation to find indirect control dependencies, including loop and branch indirect dependencies. Preliminary results show that this results in better coverage, within considerably less time compared to standard DSE.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Combining String Abstract Domains for JavaScript Analysis: An Evaluation.\n \n \n \n\n\n \n Amadini, R.; Jordan, A.; Gange, G.; Gauthier, F.; Schachte, P.; Søndergaard, H.; Stuckey, P. J.; and Zhang, C.\n\n\n \n\n\n\n In Legay, A.; and Margaria, T., editor(s), TACAS 2017: Proceedings of the 23rd International Conference on Tools and Algorithms for the Construction and Analysis of Systems, volume 10205, of Lecture Notes in Computer Science, pages 41–57, 2017. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Ama-Jor-Gan-Gau-Sch-Son-Stu-Zha_TACAS17,\n  author    = {Roberto Amadini and \n\t\tAlexander Jordan and \n\t\tGraeme Gange and \n\t\tFran{\\c{c}}ois Gauthier and\n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and\n\t\tChenyi Zhang},\n  title     = {Combining String Abstract Domains for {JavaScript} Analysis: \n\t\tAn Evaluation},\n  editor    = {A. Legay and T. Margaria},\n  booktitle = {TACAS 2017: Proceedings of the 23rd International\n\t\tConference on Tools and Algorithms for the Construction \n\t\tand Analysis of Systems},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {10205},\n  pages     = {41--57},\n  publisher = {Springer},\n  year      = {2017},\n  doi       = {10.1007/978-3-662-54577-5_3},\n  abstract  = {Strings play a central role in JavaScript and similar scripting\n\t\tlanguages. Owing to dynamic features such as the eval function\n\t\tand dynamic property access, precise string analysis is a \n\t\tprerequisite for automated reasoning about practically any \n\t\tkind of runtime property. Although the literature presents \n\t\ta considerable number of abstract domains for capturing and \n\t\trepresenting specific aspect of strings, we are not aware of \n\t\ttools that allow flexible combination of string abstract \n\t\tdomains. Indeed, support for string analysis is often confined\n\t\tto a single, dedicated string domain. In this paper we \n\t\tdescribe a framework that allows us to combine multiple string\n\t\tabstract domains for the analysis of JavaScript programs. \n\t\tIt is implemented as an extension of SAFE, an open-source \n\t\tstatic analysis tool. We investigate different combinations \n\t\tof abstract domains that capture various aspects of strings.\n\t\tOur evaluation suggests that a combination of few, simple \n\t\tabstract domains suffice to outperform the precision of \n\t\tstate-of-the-art static analysis tools for JavaScript.},\n  keywords  = {String solvers, Abstract domains, Abstract interpretation, JavaScript},\n}\n\n
\n
\n\n\n
\n Strings play a central role in JavaScript and similar scripting languages. Owing to dynamic features such as the eval function and dynamic property access, precise string analysis is a prerequisite for automated reasoning about practically any kind of runtime property. Although the literature presents a considerable number of abstract domains for capturing and representing specific aspect of strings, we are not aware of tools that allow flexible combination of string abstract domains. Indeed, support for string analysis is often confined to a single, dedicated string domain. In this paper we describe a framework that allows us to combine multiple string abstract domains for the analysis of JavaScript programs. It is implemented as an extension of SAFE, an open-source static analysis tool. We investigate different combinations of abstract domains that capture various aspects of strings. Our evaluation suggests that a combination of few, simple abstract domains suffice to outperform the precision of state-of-the-art static analysis tools for JavaScript.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Benders Decomposition Approach to Deciding Modular Linear Integer Arithmetic.\n \n \n \n\n\n \n Kafle, B.; Gange, G.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Gaspers, S.; and Walsh, T., editor(s), Theory and Applications of Satisfiability Testing (SAT 2017), volume 10491, of Lecture Notes in Computer Science, pages 380–397, 2017. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Kaf-Gan-Sch-Son-Stu_SAT17,\n  author    = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter Schachte and\n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {A {Benders} Decomposition Approach to Deciding Modular\n                Linear Integer Arithmetic},\n  editor    = {S. Gaspers and T. Walsh},\n  booktitle = {Theory and Applications of Satisfiability Testing (SAT 2017)},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {10491},\n  pages     = {380--397},\n  publisher = {Springer International Publishing},\n  year      = {2017},\n  doi       = {10.1007/978-3-319-66263-3_24},\n  abstract  = {Verification tasks frequently require deciding systems of \n\t\tlinear constraints over modular (machine) arithmetic. \n\t\tExisting approaches for reasoning over modular arithmetic \n\t\tuse bit-vector solvers, or else approximate machine integers \n\t\twith mathematical integers and use arithmetic solvers. \n\t\tNeither is ideal; the first is sound but inefficient, and the \n\t\tsecond is efficient but unsound. We describe a linear encoding \n\t\twhich correctly describes modular arithmetic semantics, \n\t\tyielding an optimistic but sound approach. Our method \n\t\tabstracts the problem with linear arithmetic, but \n\t\tprogressively refines the abstraction when modular semantics \n\t\tis violated. This preserves soundness while exploiting the \n\t\tmostly integer nature of the constraint problem. We present \n\t\ta prototype implementation, which gives encouraging \n\t\texperimental results.},\n  keywords  = {Program verification, Constraint solving, Machine arithmetic},\n}\n\n
\n
\n\n\n
\n Verification tasks frequently require deciding systems of linear constraints over modular (machine) arithmetic. Existing approaches for reasoning over modular arithmetic use bit-vector solvers, or else approximate machine integers with mathematical integers and use arithmetic solvers. Neither is ideal; the first is sound but inefficient, and the second is efficient but unsound. We describe a linear encoding which correctly describes modular arithmetic semantics, yielding an optimistic but sound approach. Our method abstracts the problem with linear arithmetic, but progressively refines the abstraction when modular semantics is violated. This preserves soundness while exploiting the mostly integer nature of the constraint problem. We present a prototype implementation, which gives encouraging experimental results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Making Sense of Learning Management System's Quiz Analytics in Understanding Students' Learning Difficulties.\n \n \n \n\n\n \n Mendoza, A.; Søndergaard, H.; and Venables, A.\n\n\n \n\n\n\n In Proceedings of the 28th Annual Conference of the Australasian Association for Engineering Education (AAEE2017), pages 112–119, 2017. Australasian Association for Engineering Education\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Inproceedings{Men-Son-Ven_AAEE17,\n  author    = {Antonette Mendoza and\n\t\tHarald S{\\o}ndergaard and \n\t\tAnne Venables},\n  title     = {Making Sense of Learning Management System's Quiz Analytics\n        \tin Understanding Students' Learning Difficulties},\n  booktitle = {Proceedings of the 28th Annual Conference of the\n        \tAustralasian Association for Engineering Education (AAEE2017)},\n  pages     = {112--119},\n  location  = {Manly, New South Wales},\n  publisher = {Australasian Association for Engineering Education},\n  year      = {2017},\n  keywords  = {Education},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Generating Source Inputs for Metamorphic Testing Using Dynamic Symbolic Execution.\n \n \n \n\n\n \n Alatawi, E.; Miller, T.; and Søndergaard, H.\n\n\n \n\n\n\n In MET'16: Proceedings of the First International Workshop on Metamorphic Testing, pages 19–25, 2016. ACM\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ala-Mil-Son_MET16,\n  author    = {Eman Alatawi and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Generating Source Inputs for Metamorphic Testing\n                Using Dynamic Symbolic Execution},\n  booktitle = {MET'16: Proceedings of the First International Workshop on\n                Metamorphic Testing},\n  pages     = {19--25},\n  publisher = {ACM},\n  year      = {2016},\n  doi       = {10.1145/2896971.2896980},\n  isbn      = {978-1-4503-4163-9},\n  abstract  = {Metamorphic testing uses domain-specific properties about\n                a program's intended behaviour to alleviate the oracle\n                problem. From a given set of source test inputs, a set of\n                follow-up test inputs are generated which have some\n                relation to the source inputs, and their outputs are\n                compared to outputs from the source tests, using\n                metamorphic relations.  We evaluate the use of an\n                automated test input generation technique called dynamic\n                symbolic execution (DSE) to generate the source test\n                inputs for metamorphic testing. We investigate whether\n                DSE increases source-code coverage and fault finding\n                effectiveness of metamorphic testing compared to the use\n                of random testing, and whether the use of metamorphic\n                relations as a supportive technique improves the test\n                inputs generated by DSE. Our results show that DSE\n                improves the coverage and fault detection rate of\n                metamorphic testing compared to random testing using\n                significantly smaller test suites, and the use of\n                metamorphic relations increases code coverage of both\n                DSE and random tests considerably, but the improvement\n                in the fault detection rate may be marginal and depends\n                on the used metamorphic relations.},\n  keywords  = {Symbolic execution, Testing},\n}\n\n
\n
\n\n\n
\n Metamorphic testing uses domain-specific properties about a program's intended behaviour to alleviate the oracle problem. From a given set of source test inputs, a set of follow-up test inputs are generated which have some relation to the source inputs, and their outputs are compared to outputs from the source tests, using metamorphic relations. We evaluate the use of an automated test input generation technique called dynamic symbolic execution (DSE) to generate the source test inputs for metamorphic testing. We investigate whether DSE increases source-code coverage and fault finding effectiveness of metamorphic testing compared to the use of random testing, and whether the use of metamorphic relations as a supportive technique improves the test inputs generated by DSE. Our results show that DSE improves the coverage and fault detection rate of metamorphic testing compared to random testing using significantly smaller test suites, and the use of metamorphic relations increases code coverage of both DSE and random tests considerably, but the improvement in the fault detection rate may be marginal and depends on the used metamorphic relations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exploiting Sparsity in Difference-Bound Matrices.\n \n \n \n\n\n \n Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Rival, X., editor(s), Static Analysis: Proceedings of the 23rd International Symposium, volume 9837, of Lecture Notes in Computer Science, pages 189–211, 2016. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Gan-Nav-Sch-Son-Stu_SAS16,\n  author    = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Exploiting Sparsity in Difference-Bound Matrices},\n  editor    = {X. Rival},\n  booktitle = {Static Analysis:\n                Proceedings of the 23rd International Symposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {9837},\n  pages     = {189--211},\n  publisher = {Springer},\n  year      = {2016},\n  doi       = {10.1007/978-3-662-53413-7},\n  abstract  = {Relational numeric abstract domains are very important in \n\t\tprogram analysis. Common domains, such as Zones and Octagons, \n\t\tare usually conceptualised with weighted digraphs and \n\t\timplemented using difference-bound matrices (DBMs). \n\t\tUnfortunately, though conceptually simple, direct \n\t\timplementations of graph-based domains tend to perform\n\t\tpoorly in practice, and are impractical for analyzing large \n\t\tcode-bases. We propose new DBM algorithms that exploit \n\t\tsparsity and closed operands. In particular, a new \n\t\trepresentation which we call split normal form reduces graph\n\t\tdensity on typical abstract states. We compare the resulting \n\t\timplementation with several existing DBM-based abstract \n\t\tdomains, and show that we can substantially reduce the time\n\t\tto perform full DBM analysis, without sacrificing precision.},\n  keywords  = {Abstract domains, Abstract interpretation, Static analysis},\n}\n\n
\n
\n\n\n
\n Relational numeric abstract domains are very important in program analysis. Common domains, such as Zones and Octagons, are usually conceptualised with weighted digraphs and implemented using difference-bound matrices (DBMs). Unfortunately, though conceptually simple, direct implementations of graph-based domains tend to perform poorly in practice, and are impractical for analyzing large code-bases. We propose new DBM algorithms that exploit sparsity and closed operands. In particular, a new representation which we call split normal form reduces graph density on typical abstract states. We compare the resulting implementation with several existing DBM-based abstract domains, and show that we can substantially reduce the time to perform full DBM analysis, without sacrificing precision.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Complete Refinement Procedure for Regular Separability of Context-Free Languages.\n \n \n \n\n\n \n Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Theoretical Computer Science, 625: 1–24. April 2016.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Gan-Nav-Sch-Son-Stu_TCS16,\n  author    = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {A Complete Refinement Procedure for Regular Separability\n                of Context-Free Languages},\n  journal   = {Theoretical Computer Science},\n  volume    = {625},\n  pages     = {1--24},\n  month     = {April},\n  year      = {2016},\n  doi       = {10.1016/j.tcs.2016.01.026},\n  abstract  = {Often, when analyzing the behaviour of systems modelled \n\t\tas context-free languages, we wish to know if two \n\t\tlanguages overlap.  To this end, we present a class of \n\t\tsemi-decision procedures for regular separability of \n\t\tcontext-free languages, based on counter-example guided \n\t\tabstraction refinement.  We propose two effective\n\t\tinstances of this approach, one that is complete but \n\t\trelatively expensive, and one that is inexpensive and \n\t\tsound, but for which we do not have a completeness proof.\n\t\tThe complete method will prove disjointness whenever the \n\t\tinput languages are regularly separable. Both methods \n\t\twill terminate whenever the input languages overlap. \n\t\tWe provide an experimental evaluation of these procedures,\n\t\tand demonstrate their practicality on a range of \n\t\tverification and language-theoretic instances.},\n  keywords  = {String analysis, Formal languages, Program verification},\n}\n\n
\n
\n\n\n
\n Often, when analyzing the behaviour of systems modelled as context-free languages, we wish to know if two languages overlap. To this end, we present a class of semi-decision procedures for regular separability of context-free languages, based on counter-example guided abstraction refinement. We propose two effective instances of this approach, one that is complete but relatively expensive, and one that is inexpensive and sound, but for which we do not have a completeness proof. The complete method will prove disjointness whenever the input languages are regularly separable. Both methods will terminate whenever the input languages overlap. We provide an experimental evaluation of these procedures, and demonstrate their practicality on a range of verification and language-theoretic instances.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Abstract Domain of Uninterpreted Functions.\n \n \n \n\n\n \n Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Jobstmann, B.; and Leino, K. R. M., editor(s), Verification, Model Checking and Abstract Interpretation: Proceedings of the 17th International Conference, volume 9583, of Lecture Notes in Computer Science, pages 85–103, 2016. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Gan-Nav-Sch-Son-Stu_VMCAI15,\n  author    = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {An Abstract Domain of Uninterpreted Functions},\n  editor    = {B. Jobstmann and K. R. M. Leino},\n  booktitle = {Verification, Model Checking and Abstract Interpretation:\n                Proceedings of the 17th International Conference},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {9583},\n  pages     = {85--103},\n  publisher = {Springer},\n  year      = {2016},\n  isbn      = {978-3-662-49121-8},\n  doi       = {10.1007/978-3-662-49122-5},\n  abstract  = {We revisit relational static analysis of numeric variables.\n\t\tSuch analyses face two difficulties. First, even inexpensive \n\t\trelational domains scale too poorly to be practical for large \n\t\tcode-bases. Second, to remain tractable they have extremely \n\t\tcoarse handling of non-linear relations. In this paper, we \n\t\tintroduce the subterm domain, a weakly relational abstract \n\t\tdomain for inferring equivalences amongst sub-expressions,\n\t\tbased on the theory of uninterpreted functions. This provides \n\t\tan extremely cheap approach for enriching non-relational \n\t\tdomains with relational information, and enhances precision \n\t\tof both relational and non-relational domains in the presence\n\t\tof non-linear operations. We evaluate the idea in the context \n\t\tof the software verification tool SeaHorn.},\n  keywords  = {Abstract domains, Abstract interpretation, Static analysis, Program verification},\n}\n\n
\n
\n\n\n
\n We revisit relational static analysis of numeric variables. Such analyses face two difficulties. First, even inexpensive relational domains scale too poorly to be practical for large code-bases. Second, to remain tractable they have extremely coarse handling of non-linear relations. In this paper, we introduce the subterm domain, a weakly relational abstract domain for inferring equivalences amongst sub-expressions, based on the theory of uninterpreted functions. This provides an extremely cheap approach for enriching non-relational domains with relational information, and enhances precision of both relational and non-relational domains in the presence of non-linear operations. We evaluate the idea in the context of the software verification tool SeaHorn.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Compositional Symbolic Execution: Incremental Solving Revisited.\n \n \n \n\n\n \n Lin, Y.; Miller, T.; and Søndergaard, H.\n\n\n \n\n\n\n In Potanin, A.; and Murphy, G., editor(s), APSEC'16: Proceedings of the 23rd Asia-Pacific Software Engineering Conference, pages 273–280, 2016. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Lin-Mil-Son_APSEC16,\n  author    = {Yude Lin and \n\t\tTim Miller and\n\t\tHarald S{\\o}ndergaard},\n  title     = {Compositional Symbolic Execution: Incremental Solving Revisited},\n  editor    = {A. Potanin and G. Murphy},\n  booktitle = {APSEC'16: Proceedings of the 23rd Asia-Pacific Software \n\t\tEngineering Conference},\n  pages     = {273--280},\n  location  = {Hamilton, New Zealand},\n  year      = {2016},\n  doi       = {10.1109/APSEC.2016.046},\n  abstract  = {Symbolic execution can automatically explore different execution\n  \t\tpaths in a system under test and generate tests to precisely \n\t\tcover them. It has two main advantages---being automatic and \n\t\tthorough within a theory---and has many successful applications.\n\t\tThe bottleneck of symbolic execution currently is the \n\t\tcomputation consumption for complex systems. Compositional \n\t\tSymbolic Execution (CSE) introduces a summarisation module to \n\t\teliminate the redundancy in the exploration of repeatedly \n\t\tencountered code. In our previous work, we generalised the \n\t\tsummarisation for any code fragments instead of functions. \n\t\tIn this paper, we transplant this idea onto LLVM with many \n\t\tadditional features, one of them being the use of incremental \n\t\tsolving. We show that the combination of CSE and incremental \n\t\tsolving is mutually beneficial. The obvious weakness of CSE is \n\t\tthe lack of context during summarisation. We discuss the use\n  \t\tof assumption-based features, available in modern constraint\n  \t\tsolvers, as a way to overcome this problem.},\n  keywords  = {Symbolic execution, LLVM},\n}\n\n
\n
\n\n\n
\n Symbolic execution can automatically explore different execution paths in a system under test and generate tests to precisely cover them. It has two main advantages—being automatic and thorough within a theory—and has many successful applications. The bottleneck of symbolic execution currently is the computation consumption for complex systems. Compositional Symbolic Execution (CSE) introduces a summarisation module to eliminate the redundancy in the exploration of repeatedly encountered code. In our previous work, we generalised the summarisation for any code fragments instead of functions. In this paper, we transplant this idea onto LLVM with many additional features, one of them being the use of incremental solving. We show that the combination of CSE and incremental solving is mutually beneficial. The obvious weakness of CSE is the lack of context during summarisation. We discuss the use of assumption-based features, available in modern constraint solvers, as a way to overcome this problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Bit-Vector Solver with Word-Level Propagation.\n \n \n \n\n\n \n Wang, W.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Quimper, C., editor(s), Integration of AI and OR Techniques in Constraint Programming: Proceedings of the 13th International Conference, volume 9676, of Lecture Notes in Computer Science, pages 374–391, 2016. Springer International Publishing\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Wan-Son-Stu_CPAIOR16,\n  author    = {Wenxi Wang and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {A Bit-Vector Solver with Word-Level Propagation},\n  editor    = {C.-G. Quimper},\n  booktitle = {Integration of AI and OR Techniques in Constraint Programming:\n                Proceedings of the 13th International Conference},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {9676},\n  pages     = {374--391},\n  publisher = {Springer International Publishing},\n  year      = {2016},\n  isbn      = {978-3-319-33953-5},\n  doi       = {10.1007/978-3-319-33954-2_27},\n  abstract  = {Reasoning with bit-vectors arises in a variety of \n\t\tapplications in verification and cryptography. \n\t\tMichel and Van Hentenryck have proposed an interesting \n\t\tapproach to bit-vector constraint propagation on the \n\t\tword level. Each of the operations except comparison \n\t\tare constant time, assuming the bit-vector fits in a \n\t\tmachine word. In contrast, bit-vector SMT solvers \n\t\tusually solve bit-vector problems by bit-blasting, \n\t\tthat is, mapping the resulting operations to conjunctive \n\t\tnormal form clauses, and using SAT technology to solve \n\t\tthem. This also means generating intermediate variables \n\t\twhich can be an advantage, as these can be searched on \n\t\tand learnt about. Since each approach has advantages \n\t\tit is important to see whether we can benefit from \n\t\tthese advantages by using a word-level propagation \n\t\tapproach with learning. In this paper we describe an \n\t\tapproach to bit-vector solving using word-level \n\t\tpropagation with learning. We provide alternative \n\t\tword-level propagators to Michel and Van Hentenryck, \n\t\tand give the first empirical evaluation of their approach\n\t\tthat we are aware of. We show that, with careful \n\t\tengineering, a word-level propagation based approach \n\t\tcan compete with (or complement) bit-blasting.},\n  keywords  = {SMT solving, Constraint propagation, Program verification},\n}\n\n
\n
\n\n\n
\n Reasoning with bit-vectors arises in a variety of applications in verification and cryptography. Michel and Van Hentenryck have proposed an interesting approach to bit-vector constraint propagation on the word level. Each of the operations except comparison are constant time, assuming the bit-vector fits in a machine word. In contrast, bit-vector SMT solvers usually solve bit-vector problems by bit-blasting, that is, mapping the resulting operations to conjunctive normal form clauses, and using SAT technology to solve them. This also means generating intermediate variables which can be an advantage, as these can be searched on and learnt about. Since each approach has advantages it is important to see whether we can benefit from these advantages by using a word-level propagation approach with learning. In this paper we describe an approach to bit-vector solving using word-level propagation with learning. We provide alternative word-level propagators to Michel and Van Hentenryck, and give the first empirical evaluation of their approach that we are aware of. We show that, with careful engineering, a word-level propagation based approach can compete with (or complement) bit-blasting.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Using Metamorphic Testing to Improve Dynamic Symbolic Execution.\n \n \n \n\n\n \n Alatawi, E.; Miller, T.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 24th Australasian Software Engineering Conference (ASWEC 2015), pages 38–47, 2015. IEEE Computing Society\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Ala-Mil-Son_ASWEC15,\n  author    = {Eman Alatawi and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Using Metamorphic Testing to Improve Dynamic Symbolic Execution},\n  booktitle = {Proceedings of the 24th Australasian Software Engineering\n                Conference (ASWEC 2015)},\n  pages     = {38--47},\n  publisher = {IEEE Computing Society},\n  year      = {2015},\n  doi       = {10.1109/ASWEC.2015.16},\n  abstract  = {Dynamic symbolic execution (DSE) is an approach for automatically\n\t\tgenerating test inputs from source code using constraint \n\t\tinformation. It is used in \\emph{fuzzing}: the execution of \n\t\ttests while monitoring for generic properties such as buffer \n\t\toverflows and other security violations. Limitations of DSE for\n\t\tfuzzing are two-fold: (1) only generic properties are checked: \n\t\tmany deviations from specified behaviour are not found; and \n\t\t(2) many programs are not entirely amenable to DSE because they\n\t\tgive rise to hard constraints, so that some parts of a program \n\t\tremain uncovered. In this paper, we discuss how to mitigate \n\t\tthese problems using \\emph{metamorphic testing} (MT). \n\t\tMetamorphic testing uses domain-specific properties about \n\t\tprogram behaviour, relating pairs of inputs to pairs of outputs.\n\t\tFrom a given test suite, \\emph{follow-up tests inputs} are \n\t\tgenerated, and their outputs are compared to outputs from the \n\t\toriginal tests, using \\emph{metamorphic relations}. Our \n\t\thypothesis is that using metamorphic testing increases the \n\t\tability of a DSE test suite to find faults, and that the \n\t\tfollow-up tests execute some previously-uncovered segments. \n\t\tWe have experimented with seven small but non-trivial libraries,\n\t\tcomparing DSE test suites with DSE+MT test suites, demonstrating\n\t\tthat DSE+MT test suites improve coverage marginally, but find \n\t\tmore faults.},\n  keywords  = {Symbolic execution, Testing},\n}\n\n
\n
\n\n\n
\n Dynamic symbolic execution (DSE) is an approach for automatically generating test inputs from source code using constraint information. It is used in \\emphfuzzing: the execution of tests while monitoring for generic properties such as buffer overflows and other security violations. Limitations of DSE for fuzzing are two-fold: (1) only generic properties are checked: many deviations from specified behaviour are not found; and (2) many programs are not entirely amenable to DSE because they give rise to hard constraints, so that some parts of a program remain uncovered. In this paper, we discuss how to mitigate these problems using \\emphmetamorphic testing (MT). Metamorphic testing uses domain-specific properties about program behaviour, relating pairs of inputs to pairs of outputs. From a given test suite, \\emphfollow-up tests inputs are generated, and their outputs are compared to outputs from the original tests, using \\emphmetamorphic relations. Our hypothesis is that using metamorphic testing increases the ability of a DSE test suite to find faults, and that the follow-up tests execute some previously-uncovered segments. We have experimented with seven small but non-trivial libraries, comparing DSE test suites with DSE+MT test suites, demonstrating that DSE+MT test suites improve coverage marginally, but find more faults.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analyzing Array Manipulating Programs by Program Transformation.\n \n \n \n\n\n \n Cornish, J. R. M.; Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Proietti, M.; and Seki, H., editor(s), Logic-Based Program Synthesis and Transformation: Proceedings of the 24th International Symposium, volume 8981, of Lecture Notes in Computer Science, pages 3–20, 2015. Springer International\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Cor-Gan-Nav-Sch-Son-Stu_LOPSTR14,\n  author    = {J. Robert M. Cornish and \n\t\tGraeme Gange and \n\t\tJorge Navas and\n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Analyzing Array Manipulating Programs by Program Transformation},\n  editor    = {M. Proietti and H. Seki},\n  booktitle = {Logic-Based Program Synthesis and Transformation:\n                Proceedings of the 24th International Symposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {8981},\n  pages     = {3--20},\n  publisher = {Springer International},\n  year      = {2015},\n  doi       = {10.1007/978-3-319-17822-6_1},\n  abstract  = {We explore a transformational approach to the problem \n\t\tof verifying simple array-manipulating programs.  \n\t\tTraditionally, verification of such programs requires \n\t\tintricate analysis machinery to reason with universally\n\t\tquantified statements about symbolic array segments, \n\t\tsuch as ``every data item stored in the segment A[i] \n\t\tto A[j] is equal to the corresponding item stored in the\n\t\tsegment B[i] to B[j].'' We define a simple abstract \n\t\tmachine which allows for set-valued variables and we \n\t\tshow how to translate programs with array operations to\n\t\tarray-free code for this machine.  For the purpose of \n\t\tprogram analysis, the translated program remains faithful\n\t\tto the semantics of array manipulation.  Based on our \n\t\timplementation in LLVM, we evaluate the approach with\n\t\trespect to its ability to extract useful invariants and \n\t\tthe cost in terms of code size.},\n  keywords  = {Program transformation, Static analysis, Array analysis},\n}\n\n
\n
\n\n\n
\n We explore a transformational approach to the problem of verifying simple array-manipulating programs. Traditionally, verification of such programs requires intricate analysis machinery to reason with universally quantified statements about symbolic array segments, such as ``every data item stored in the segment A[i] to A[j] is equal to the corresponding item stored in the segment B[i] to B[j].'' We define a simple abstract machine which allows for set-valued variables and we show how to translate programs with array operations to array-free code for this machine. For the purpose of program analysis, the translated program remains faithful to the semantics of array manipulation. Based on our implementation in LLVM, we evaluate the approach with respect to its ability to extract useful invariants and the cost in terms of code size.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimisation and Relaxation for Planning in the Situation Calculus.\n \n \n \n \n\n\n \n Davies, T. O.; Pearce, A. R.; Stuckey, P. J.; and Søndergaard, H.\n\n\n \n\n\n\n In Bordini, R. H.; Elkind, E.; Weiss, G.; and Yolum, P., editor(s), Proceedings of the 14th International Conference on Autonomous Agents and Multiagent Systems (AAMAS 2015), pages 1141–1149, 2015. IFAAMAS\n \n\n\n\n
\n\n\n\n \n \n \"Optimisation paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Dav-Pea-Stu-Son_AAMAS15,\n  author    = {Toby O. Davies and \n\t\tAdrian R. Pearce and\n                Peter J. Stuckey and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Optimisation and Relaxation for Planning in the Situation\n                Calculus},\n  editor    = {R. H. Bordini and E. Elkind and G. Weiss and P. Yolum},\n  booktitle = {Proceedings of the 14th International Conference\n                on Autonomous Agents and Multiagent Systems (AAMAS 2015)},\n  pages     = {1141--1149},\n  publisher = {IFAAMAS},\n  year      = {2015},  \n  url_Paper = {http://www.aamas2015.com/en/AAMAS_2015_USB/aamas/p1141.pdf},\n  abstract  = {The situation calculus can express rich agent behaviours and \n\t\tgoals and facilitates the reduction of complex planning \n\t\tproblems to theorem proving. However, in many planning \n\t\tproblems, solution quality is critically important, and the \n\t\tachievable quality is not necessarily known in advance.\n\t\tExisting \\textsc{Golog} implementations merely search for a \n\t\t\\emph{legal} plan, typically relying on depth-first search to \n\t\tfind an execution. We illustrate where existing strategies \n\t\twill not terminate when quality is considered, and to overcome\n\t\tthis limitation we formally introduce the notion of \\emph{cost}\n\t\tto simplify the search for a solution. The main contribution is\n\t\ta new class of relaxations of the planning problem, termed \n\t\t\\emph{precondition relaxations}, based on Lagrangian relaxation.\n\t\tWe show how this facilitates optimisation of a restricted class\n\t\tof \\textsc{Golog} programs for which plan existence (under a \n\t\tcost budget) is decidable. It allows for tractably computing \n\t\trelaxations to the planning problem and leads to a general, \n\t\t\\emph{blackbox}, approach to optimally solving multi-agent \n\t\tplanning problems without explicit reference to the semantics \n\t\tof interleaved concurrency.},\n  keywords  = {Planning, Artificial intelligence},\n}\n\n
\n
\n\n\n
\n The situation calculus can express rich agent behaviours and goals and facilitates the reduction of complex planning problems to theorem proving. However, in many planning problems, solution quality is critically important, and the achievable quality is not necessarily known in advance. Existing \\textscGolog implementations merely search for a \\emphlegal plan, typically relying on depth-first search to find an execution. We illustrate where existing strategies will not terminate when quality is considered, and to overcome this limitation we formally introduce the notion of \\emphcost to simplify the search for a solution. The main contribution is a new class of relaxations of the planning problem, termed \\emphprecondition relaxations, based on Lagrangian relaxation. We show how this facilitates optimisation of a restricted class of \\textscGolog programs for which plan existence (under a cost budget) is decidable. It allows for tractably computing relaxations to the planning problem and leads to a general, \\emphblackbox, approach to optimally solving multi-agent planning problems without explicit reference to the semantics of interleaved concurrency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Tool for Intersecting Context-Free Grammars and Its Applications.\n \n \n \n\n\n \n Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Havelund, K.; Holzmann, G.; and Joshi, R., editor(s), NASA Formal Methods: Proceedings of the Seventh International Symposium, volume 9058, of Lecture Notes in Computer Science, pages 422–428, 2015. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Gan-Nav-Sch-Son-Stu_NFM15,\n  author    = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {A Tool for Intersecting Context-Free Grammars and Its\n                Applications},\n  editor    = {K. Havelund and G. Holzmann and R. Joshi},\n  booktitle = {NASA Formal Methods:\n                Proceedings of the Seventh International Symposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {9058},\n  pages     = {422--428},\n  publisher = {Springer},\n  year      = {2015},\n  doi       = {10.1007/978-3-319-17524-9},\n  abstract  = {This paper describes a tool for intersecting context-free\n\t\tgrammars. Since this problem is undecidable the tool \n\t\tfollows a refinement-based approach and implements a \n\t\tnovel refinement which is complete for regularly \n\t\tseparable grammars. We show its effectiveness for safety\n\t\tverification of recursive multi-threaded programs.},\n  keywords  = {String analysis, Formal languages, Program verification},\n}\n\n
\n
\n\n\n
\n This paper describes a tool for intersecting context-free grammars. Since this problem is undecidable the tool follows a refinement-based approach and implements a novel refinement which is complete for regularly separable grammars. We show its effectiveness for safety verification of recursive multi-threaded programs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Interval Analysis and Machine Arithmetic: Why Signedness Ignorance Is Bliss.\n \n \n \n\n\n \n Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n ACM Transactions on Programming Languages and Systems, 37(1): 1:1–1:35. january 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Gan-Nav-Sch-Son-Stu_TOPLAS15,\n  author    = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and \n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Interval Analysis and Machine Arithmetic: \n                Why Signedness Ignorance Is Bliss},\n  journal   = {ACM Transactions on Programming Languages and Systems},\n  volume    = {37},\n  number    = {1},\n  pages     = {1:1--1:35},\n  month     = {january},\n  year      = {2015},\n  doi       = {http://dx.doi.org/10.1145/2651360},\n  abstract  = {The most commonly used integer types have fixed bit-width,\n\t\tmaking it possible for computations to ``wrap around'',\n\t\tand many programs depend on this behaviour.  Yet much \n\t\twork to date on program analysis and verification of \n\t\tinteger computations treats integers as having infinite \n\t\tprecision, and most analyses that do respect fixed width\n\t\tlose precision when overflow is possible.  We present a \n\t\tnovel integer interval abstract domain that correctly \n\t\thandles wrap-around.  The analysis is signedness agnostic.\n\t\tBy treating integers as strings of bits, only considering\n\t\tsignedness for operations that treat them differently, \n\t\twe produce precise, correct results at a modest cost in \n\t\texecution time.},\n  keywords  = {Abstract domains, Abstract interpretation, Interval analysis, Machine arithmetic, C analysis, LLVM},\n}\n\n
\n
\n\n\n
\n The most commonly used integer types have fixed bit-width, making it possible for computations to ``wrap around'', and many programs depend on this behaviour. Yet much work to date on program analysis and verification of integer computations treats integers as having infinite precision, and most analyses that do respect fixed width lose precision when overflow is possible. We present a novel integer interval abstract domain that correctly handles wrap-around. The analysis is signedness agnostic. By treating integers as strings of bits, only considering signedness for operations that treat them differently, we produce precise, correct results at a modest cost in execution time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Horn Clauses As an Intermediate Representation for Program Analysis and Transformation.\n \n \n \n\n\n \n Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Theory and Practice of Logic Programming, 15(4–5): 526–542. 2015.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Gan-Nav-Sch-Son-Stu_TPLP15,\n  author    = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Horn Clauses As an Intermediate Representation for \n                Program Analysis and Transformation},\n  journal   = {Theory and Practice of Logic Programming},\n  volume    = {15},\n  number    = {4--5},\n  pages     = {526--542},\n  year      = {2015},\n  doi       = {http://dx.doi.org/10.1017/S1471068415000204},\n  abstract  = {Many recent analyses for conventional imperative programs \n\t\tbegin by transforming programs into logic programs, \n\t\tcapitalising on existing LP analyses and simple LP \n\t\tsemantics.  We propose using logic programs as an \n\t\tintermediate program representation throughout the \n\t\tcompilation process.  With restrictions ensuring \n\t\tdeterminism and single-modedness, a logic program can \n\t\teasily be transformed to machine language or other \n\t\tlow-level language, while maintaining the simple semantics\n  \t\tthat makes it suitable as a language for program analysis\n\t\tand transformation.  We present a simple LP language that\n\t\tenforces determinism and single-modedness, and show that\n\t\tit makes a convenient program representation for analysis\n\t\tand transformation.},\n  keywords  = {Static analysis, Intermediate representations, Horn clauses, Logic programming},\n}\n\n
\n
\n\n\n
\n Many recent analyses for conventional imperative programs begin by transforming programs into logic programs, capitalising on existing LP analyses and simple LP semantics. We propose using logic programs as an intermediate program representation throughout the compilation process. With restrictions ensuring determinism and single-modedness, a logic program can easily be transformed to machine language or other low-level language, while maintaining the simple semantics that makes it suitable as a language for program analysis and transformation. We present a simple LP language that enforces determinism and single-modedness, and show that it makes a convenient program representation for analysis and transformation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Compositional Symbolic Execution Using Fine-Grained Summaries.\n \n \n \n \n\n\n \n Lin, Y.; Miller, T.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 24th Australasian Software Engineering Conference (ASWEC 2015), pages 213–222, 2015. IEEE Computing Society\n \n\n\n\n
\n\n\n\n \n \n \"Compositional paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{Lin-Mil-Son_ASWEC15,\n  author    = {Yude Lin and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Compositional Symbolic Execution Using Fine-Grained Summaries},\n  booktitle = {Proceedings of the 24th Australasian Software Engineering\n                Conference (ASWEC 2015)},\n  pages     = {213--222},\n  publisher = {IEEE Computing Society},\n  year      = {2015},\n  doi       = {10.1109/ASWEC.2015.32},\n  url_Paper = {http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=7365810},\n  abstract  = {Compositional symbolic execution has been proposed as a way to \n\t\tincrease the efficiency of symbolic execution. Essentially, \n\t\twhen a function is symbolically executed, a \\emph{summary} of \n\t\tthe path that was executed is stored. This summary records the \n\t\tprecondition and postcondition of the path, and on subsequent \n\t\tcalls that satisfy that precondition, the corresponding \n\t\tpostcondition can be returned instead of executing the function\n\t\tagain. However, using functions as the unit of summarisation \n\t\tleaves the symbolic execution tool at the mercy of a program \n\t\tdesigner, essentially resulting in an arbitrary summarisation \n\t\tstrategy. In this paper, we explore the use of \n\t\t\\emph{fine-grained} summaries, in which blocks \\emph{within} \n\t\tfunctions are summarised. We propose three types of \n\t\tsummarisation and demonstrate how to generate these. At such a \n\t\tfine-grained level, symbolic execution of a path effectively \n\t\tbecomes the concatenation of the summaries along that path. \n\t\tUsing a prototype symbolic execution tool, we perform a \n\t\tpreliminary experimental evaluation of our summary approaches, \n\t\tdemonstrating that they can improve the speed of symbolic \n\t\texecution by reducing the number of calls sent to the \n\t\tunderlying constraint solver.},\n  keywords  = {Symbolic execution},\n}\n\n
\n
\n\n\n
\n Compositional symbolic execution has been proposed as a way to increase the efficiency of symbolic execution. Essentially, when a function is symbolically executed, a \\emphsummary of the path that was executed is stored. This summary records the precondition and postcondition of the path, and on subsequent calls that satisfy that precondition, the corresponding postcondition can be returned instead of executing the function again. However, using functions as the unit of summarisation leaves the symbolic execution tool at the mercy of a program designer, essentially resulting in an arbitrary summarisation strategy. In this paper, we explore the use of \\emphfine-grained summaries, in which blocks \\emphwithin functions are summarised. We propose three types of summarisation and demonstrate how to generate these. At such a fine-grained level, symbolic execution of a path effectively becomes the concatenation of the summaries along that path. Using a prototype symbolic execution tool, we perform a preliminary experimental evaluation of our summary approaches, demonstrating that they can improve the speed of symbolic execution by reducing the number of calls sent to the underlying constraint solver.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Fragment-Based Planning Using Column Generation.\n \n \n \n \n\n\n \n Davies, T. O.; Pearce, A. R.; Stuckey, P. J.; and Søndergaard, H.\n\n\n \n\n\n\n In Chien, S.; Do, M.; Fern, A.; and Ruml, W., editor(s), Proceedings of the 24th International Conference on Automated Planning and Scheduling, pages 83–91, 2014. AAAI\n \n\n\n\n
\n\n\n\n \n \n \"Fragment-Based paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Dav-Pea-Stu-Son_ICAPS14,\n  author    = {Toby O. Davies and \n\t\tAdrian R. Pearce and\n                Peter J. Stuckey and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Fragment-Based Planning Using Column Generation},\n  editor    = {S. Chien and M. Do and A. Fern and W. Ruml},\n  booktitle = {Proceedings of the 24th International Conference\n                on Automated Planning and Scheduling},\n  pages     = {83--91},\n  publisher = {AAAI},\n  year      = {2014},  \n  url_Paper = {www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7907/8015},\n  doi       = {10.1609/icaps.v24i1.13628},\n  abstract  = {We introduce a novel algorithm for temporal planning in \n\t\t\\textsc{Golog} using shared resources, and describe the \n\t\tBulk Freight Rail Scheduling Problem, a motivating example\n\t\tof such a temporal domain. We use the framework of column \n\t\tgeneration to tackle complex resource constrained temporal \n\t\tplanning problems that are beyond the scope of current planning\n\t\ttechnology by combining: the global view of a linear programming\n\t\trelaxation of the problem; the strength of search in finding \n\t\taction sequences; and the domain knowledge that can be encoded \n\t\tin a \\textsc{Golog} program. We show that our approach \n\t\tsignificantly outperforms state-of-the-art temporal planning \n\t\tand constraint programming approaches in this domain, in \n\t\taddition to existing temporal \\textsc{Golog} implementations.\n\t\tWe also apply our algorithm to a temporal variant of \n\t\tblocks-world where our decomposition speeds proof of optimality\n\t\tsignificantly compared to other anytime algorithms. We discuss \n\t\tthe potential of the underlying algorithm being applicable to\n\t\tSTRIPS planning, with further work.},\n  keywords  = {Planning, Artificial intelligence},\n}\n\n
\n
\n\n\n
\n We introduce a novel algorithm for temporal planning in \\textscGolog using shared resources, and describe the Bulk Freight Rail Scheduling Problem, a motivating example of such a temporal domain. We use the framework of column generation to tackle complex resource constrained temporal planning problems that are beyond the scope of current planning technology by combining: the global view of a linear programming relaxation of the problem; the strength of search in finding action sequences; and the domain knowledge that can be encoded in a \\textscGolog program. We show that our approach significantly outperforms state-of-the-art temporal planning and constraint programming approaches in this domain, in addition to existing temporal \\textscGolog implementations. We also apply our algorithm to a temporal variant of blocks-world where our decomposition speeds proof of optimality significantly compared to other anytime algorithms. We discuss the potential of the underlying algorithm being applicable to STRIPS planning, with further work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Four-Valued Reasoning and Cyclic Circuits.\n \n \n \n \n\n\n \n Gange, G.; Horsfall, B.; Naish, L.; and Søndergaard, H.\n\n\n \n\n\n\n IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 33(7): 1003–1016. july 2014.\n \n\n\n\n
\n\n\n\n \n \n \"Four-Valued paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Gan-Hor-Nai-Son_TCAD14,\n  author    = {Graeme Gange and \n\t\tBenjamin Horsfall and \n\t\tLee Naish and\n                Harald S{\\o}ndergaard},\n  title     = {Four-Valued Reasoning and Cyclic Circuits},\n  journal   = {IEEE Transactions on Computer-Aided Design of Integrated\n                Circuits and Systems},\n  volume    = {33},\n  number    = {7},\n  pages     = {1003--1016},\n  month     = {july},\n  year      = {2014},\n  url_Paper = {http://dx.doi.org/10.1109/TCAD.2014.2304176},\n  doi       = {10.1109/TCAD.2014.2304176},\n  abstract  = {Allowing cycles in a logic circuit can be advantageous,\n\t\tfor example, by reducing the number of gates required to \n\t\timplement a given Boolean function, or set of functions.\n\t\tHowever, a cyclic circuit may easily be ill-behaved.\n\t\tFor instance it may have some output wire oscillate \n\t\tinstead of reaching a steady state.  Propositional \n\t\tthree-valued logic has long been used in tests for\n\t\twell-behaviour of cyclic circuits: a symbolic evaluation \n\t\tmethod known as ternary analysis provides one criterion \n\t\tfor well-behaviour under certain assumptions about wire \n\t\tand gate delay.  We revisit ternary analysis and argue \n\t\tfor the use of four truth values.  The fourth truth \n\t\tvalue allows for the distinction of ``undefined'' and\n\t\t``underspecified'' behaviour.  Ability to under-specify \n\t\tbehaviour is useful, because, in a quest for smaller \n\t\tcircuits, an implementor can capitalize on degrees of \n\t\tfreedom offered in the specification.  Moreover, a fourth\n\t\ttruth value is attractive because, rather than complicating\n\t\t(ternary) circuit analysis, it introduces a pleasant \n\t\tsymmetry, in the form of contra-duality, as well as \n\t\tproviding a convenient framework for manipulating\n\t\tspecifications.  We use this symmetry to provide fixed \n\t\tpoint results that clarify how two-, three-, and four-valued\n\t\tanalyses are related, and to explain some observations about\n\t\tternary analysis.},\n  keywords  = {Fixed points, Many-valued logic, Logic circuits, Circuit synthesis},\n}\n\n
\n
\n\n\n
\n Allowing cycles in a logic circuit can be advantageous, for example, by reducing the number of gates required to implement a given Boolean function, or set of functions. However, a cyclic circuit may easily be ill-behaved. For instance it may have some output wire oscillate instead of reaching a steady state. Propositional three-valued logic has long been used in tests for well-behaviour of cyclic circuits: a symbolic evaluation method known as ternary analysis provides one criterion for well-behaviour under certain assumptions about wire and gate delay. We revisit ternary analysis and argue for the use of four truth values. The fourth truth value allows for the distinction of ``undefined'' and ``underspecified'' behaviour. Ability to under-specify behaviour is useful, because, in a quest for smaller circuits, an implementor can capitalize on degrees of freedom offered in the specification. Moreover, a fourth truth value is attractive because, rather than complicating (ternary) circuit analysis, it introduces a pleasant symmetry, in the form of contra-duality, as well as providing a convenient framework for manipulating specifications. We use this symmetry to provide fixed point results that clarify how two-, three-, and four-valued analyses are related, and to explain some observations about ternary analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Partial-Order Approach to Array Content Analysis.\n \n \n \n\n\n \n Gange, G.; Navas, J.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n 2014.\n ArXiv preprint https://arxiv.org/abs/1408.1754\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@misc{Gan-Nav-Sch-Son-Stu_ArXiv14,\n  author    = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n                Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {A Partial-Order Approach to Array Content Analysis},\n  note      = {ArXiv preprint {https://arxiv.org/abs/1408.1754}},\n  year      = {2014},\n  doi       = {10.48550/arxiv.1408.1754},\n  abstract  = {We present a parametric abstract domain for array content \n\t\tanalysis. The method maintains invariants for contiguous \n\t\tregions of the array, similar to the methods of Gopan, Reps \n\t\tand Sagiv, and of Halbwachs and Peron. However, it introduces \n\t\ta novel concept of an array content graph, avoiding the need \n\t\tfor an up-front factorial partitioning step. The resulting \n\t\tanalysis can be used with arbitrary numeric relational \n\t\tabstract domains; we evaluate the domain on a range of \n\t\tarray manipulating program fragments.},\n  keywords  = {Abstract domains, Array analysis},\n}\n\n
\n
\n\n\n
\n We present a parametric abstract domain for array content analysis. The method maintains invariants for contiguous regions of the array, similar to the methods of Gopan, Reps and Sagiv, and of Halbwachs and Peron. However, it introduces a novel concept of an array content graph, avoiding the need for an up-front factorial partitioning step. The resulting analysis can be used with arbitrary numeric relational abstract domains; we evaluate the domain on a range of array manipulating program fragments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Synthesizing Optimal Switching Lattices.\n \n \n \n\n\n \n Gange, G.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n ACM Transactions on Design Automation of Electronic Systems, 20(1): 6:1–6:14. november 2014.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Gan-Son-Stu_TODAES14,\n  author    = {Graeme Gange and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Synthesizing Optimal Switching Lattices},\n  journal   = {ACM Transactions on Design Automation of Electronic Systems},\n  volume    = {20},\n  number    = {1},\n  pages     = {6:1--6:14},\n  month     = {november},\n  year      = {2014},\n  doi       = {10.1145/2661632},\n  abstract  = {The use of nanoscale technologies to create electronic devices \n\t\thas revived interest in the use of regular structures for \n\t\tdefining complex logic functions. One such structure is the \n\t\tswitching lattice, a two-dimensional lattice of four-terminal \n\t\tswitches. We show how to directly construct switching lattices \n\t\tof polynomial size from arbitrary logic functions; we also show\n\t\thow to synthesize minimal-sized lattices by translating the \n\t\tproblem to the satisfiability problem for a restricted class of\n\t\tquantified Boolean formulas. The synthesis method is an anytime\n\t\talgorithm which uses modern SAT solving technology and \n\t\tdichotomic search. It improves considerably on an earlier \n\t\tproposal for creating switching lattices for arbitrary logic \n\t\tfunctions.},\n  keywords  = {Boolean logic, Logic circuits, Circuit synthesis},\n}\n\n
\n
\n\n\n
\n The use of nanoscale technologies to create electronic devices has revived interest in the use of regular structures for defining complex logic functions. One such structure is the switching lattice, a two-dimensional lattice of four-terminal switches. We show how to directly construct switching lattices of polynomial size from arbitrary logic functions; we also show how to synthesize minimal-sized lattices by translating the problem to the satisfiability problem for a restricted class of quantified Boolean formulas. The synthesis method is an anytime algorithm which uses modern SAT solving technology and dichotomic search. It improves considerably on an earlier proposal for creating switching lattices for arbitrary logic functions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Truth versus Information in Logic Programming.\n \n \n \n \n\n\n \n Naish, L.; and Søndergaard, H.\n\n\n \n\n\n\n Theory and Practice of Logic Programming, 14(6): 803–840. november 2014.\n \n\n\n\n
\n\n\n\n \n \n \"Truth paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Nai-Son_TPLP14,\n  author    = {Lee Naish and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Truth versus Information in Logic Programming},\n  journal   = {Theory and Practice of Logic Programming},\n  volume    = {14},\n  number    = {6},\n  pages     = {803--840},\n  month     = {november},\n  year      = {2014},\n  url_Paper = {http://journals.cambridge.org/repo_A89XyMV9},\n  doi       = {10.1017/S1471068413000069},\n  abstract  = {The semantics of logic programs was originally described in \n\t\tterms of two-valued logic. Soon, however, it was realised that\n\t\tthree-valued logic had some natural advantages, as it provides \n\t\tdistinct values not only for truth and falsehood, but also for \n\t\t``undefined''. The three-valued semantics proposed by Fitting \n\t\tand by Kunen are closely related to what is computed by a \n\t\tlogic program, the third truth value being associated with \n\t\tnon-termination. A different three-valued semantics, proposed \n\t\tby Naish, shared much with those of Fitting and Kunen but \n\t\tincorporated allowances for programmer intent, the third truth \n\t\tvalue being associated with underspecification. Naish used an \n\t\t(apparently) novel ``arrow'' operator to relate the intended\n\t\tmeaning of left and right sides of predicate definitions. In \n\t\tthis paper we suggest that the additional truth values of \n\t\tFitting/Kunen and Naish are best viewed as duals. We use \n\t\tBelnap's four-valued logic, also used elsewhere by Fitting, \n\t\tto unify the two three-valued approaches. The truth values are \n\t\tarranged in a bilattice which supports the classical ordering \n\t\ton truth values as well as the ``information ordering''. We \n\t\tnote that the ``arrow'' operator of Naish (and our four-valued\n\t\textension) is essentially the information ordering, whereas the\n\t\tclassical arrow denotes the truth ordering. This allows us to \n\t\tshed new light on many aspects of logic programming, including \n\t\tprogram analysis, type and mode systems, declarative debugging \n\t\tand the relationships between specifications and programs, and \n\t\tsuccessive executions states of a program.},\n  keywords  = {Logic programming, Semantics, Many-valued logic},\n}\n\n
\n
\n\n\n
\n The semantics of logic programs was originally described in terms of two-valued logic. Soon, however, it was realised that three-valued logic had some natural advantages, as it provides distinct values not only for truth and falsehood, but also for ``undefined''. The three-valued semantics proposed by Fitting and by Kunen are closely related to what is computed by a logic program, the third truth value being associated with non-termination. A different three-valued semantics, proposed by Naish, shared much with those of Fitting and Kunen but incorporated allowances for programmer intent, the third truth value being associated with underspecification. Naish used an (apparently) novel ``arrow'' operator to relate the intended meaning of left and right sides of predicate definitions. In this paper we suggest that the additional truth values of Fitting/Kunen and Naish are best viewed as duals. We use Belnap's four-valued logic, also used elsewhere by Fitting, to unify the two three-valued approaches. The truth values are arranged in a bilattice which supports the classical ordering on truth values as well as the ``information ordering''. We note that the ``arrow'' operator of Naish (and our four-valued extension) is essentially the information ordering, whereas the classical arrow denotes the truth ordering. This allows us to shed new light on many aspects of logic programming, including program analysis, type and mode systems, declarative debugging and the relationships between specifications and programs, and successive executions states of a program.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A Low Overhead Method for Recovering Unused Memory Inside Regions.\n \n \n \n\n\n \n Davis, M.; Schachte, P.; Somogyi, Z.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 2013 ACM Workshop on Memory Systems Performance and Correctness (MSPC 2013), 2013. ACM Press\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Dav-Sch-Som-Son_MSPC13,\n  author    = {Matthew Davis and \n\t\tPeter Schachte and \n\t\tZoltan Somogyi and\n\t\tHarald S{\\o}ndergaard},\n  title     = {A Low Overhead Method for Recovering Unused Memory \n\t\tInside Regions},\n  booktitle = {Proceedings of the 2013 ACM Workshop on Memory Systems \n\t\tPerformance and Correctness (MSPC 2013)},\n  publisher = {ACM Press},\n  year      = {2013},\n  doi       = {10.1145/2492408.2492415},\n  abstract  = {Automating memory management improves both resource safety\n\t\tand programmer productivity.  One approach, region-based\n\t\tmemory management (RBMM), applies compile-time reasoning\n\t\tto identify points in a program at which memory can be \n\t\tsafely reclaimed.  The main advantage of RBMM over \n\t\ttraditional garbage collection (GC) is the avoidance of \n\t\texpensive runtime analysis, which makes reclaiming memory\n\t\tmuch faster.  On the other hand, GC requires no static \n\t\tanalysis, and, operating at runtime, can have \n\t\tsignificantly more accurate information about object \n\t\tlifetimes.  In this paper we propose a hybrid system\n\t\tthat seeks to combine the advantages of both methods\n\t\twhile avoiding the overheads that previous hybrid systems\n\t\tincurred.  Our system can also reclaim array segments \n\t\twhose elements are no longer reachable.},\n  keywords  = {Memory management, Compilation, Go},\n}\n\n
\n
\n\n\n
\n Automating memory management improves both resource safety and programmer productivity. One approach, region-based memory management (RBMM), applies compile-time reasoning to identify points in a program at which memory can be safely reclaimed. The main advantage of RBMM over traditional garbage collection (GC) is the avoidance of expensive runtime analysis, which makes reclaiming memory much faster. On the other hand, GC requires no static analysis, and, operating at runtime, can have significantly more accurate information about object lifetimes. In this paper we propose a hybrid system that seeks to combine the advantages of both methods while avoiding the overheads that previous hybrid systems incurred. Our system can also reclaim array segments whose elements are no longer reachable.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Abstract Interpretation over Non-Lattice Abstract Domains.\n \n \n \n\n\n \n Gange, G.; Navas, J. A.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Logozzo, F.; and Fähndrich, M., editor(s), Static Analysis, volume 7935, of Lecture Notes in Computer Science, pages 6–24, 2013. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gan-Nav-Sch-Son-Stu_SAS13,\n  author    = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Abstract Interpretation over Non-Lattice Abstract Domains},\n  editor    = {F. Logozzo and M. F{\\"a}hndrich},\n  booktitle = {Static Analysis},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {7935},\n  pages     = {6--24},\n  publisher = {Springer},\n  year      = {2013},\n  doi       = {10.1007/978-3-642-38856-9_3},\n  abstract  = {The classical theoretical framework for static analysis of\n\t\tprograms is abstract interpretation. Much of the power and \n\t\telegance of that framework rests on the assumption that an \n\t\tabstract domain is a lattice. Nonetheless, and for good reason,\n\t\tthe literature on program analysis provides many examples of \n\t\tnon-lattice domains, including non-convex numeric domains. \n\t\tThe lack of domain structure, however, has negative \n\t\tconsequences, both for the precision of program analysis and \n\t\tfor the termination of standard Kleene iteration. In this paper \n\t\twe explore these consequences and present general remedies.},\n  keywords  = {Fixed points, Abstract domains, Lattice theory},\n}\n\n
\n
\n\n\n
\n The classical theoretical framework for static analysis of programs is abstract interpretation. Much of the power and elegance of that framework rests on the assumption that an abstract domain is a lattice. Nonetheless, and for good reason, the literature on program analysis provides many examples of non-lattice domains, including non-convex numeric domains. The lack of domain structure, however, has negative consequences, both for the precision of program analysis and for the termination of standard Kleene iteration. In this paper we explore these consequences and present general remedies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Failure Tabled Constraint Logic Programming by Interpolation.\n \n \n \n\n\n \n Gange, G.; Navas, J. A.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Theory and Practice of Logic Programming, 13(4–5): 593–607. 2013.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Article{Gan-Nav-Sch-Son-Stu_TPLP13,\n  author    = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Failure Tabled Constraint Logic Programming by Interpolation},\n  journal   = {Theory and Practice of Logic Programming},\n  volume    = {13},\n  number    = {4--5},\n  pages     = {593--607},\n  year      = {2013},\n  doi       = {10.1017/S1471068413000379},\n  abstract  = {We present a new execution strategy for constraint logic\n\t\tprograms called \\emph{Failure Tabled CLP}. Similarly to\n\t\t\\emph{Tabled CLP} our strategy records certain derivations\n\t\tin order to prune further derivations. However, our method\n\t\tonly learns from \\emph{failed derivations}. This allows us\n\t\tto compute \\emph{interpolants} rather than \n\t\t\\emph{constraint projection} for generation of \n\t\t\\emph{reuse conditions}. As a result, our technique can\n\t\tbe used where projection is too expensive or does not \n\t\texist. Our experiments indicate that Failure Tabling can\n\t\tspeed up the execution of programs with many redundant \n\t\tfailed derivations as well as achieve termination in the\n\t\tpresence of infinite executions.},\n  keywords  = {Logic programming, Interpolants},\n}\n\n
\n
\n\n\n
\n We present a new execution strategy for constraint logic programs called \\emphFailure Tabled CLP. Similarly to \\emphTabled CLP our strategy records certain derivations in order to prune further derivations. However, our method only learns from \\emphfailed derivations. This allows us to compute \\emphinterpolants rather than \\emphconstraint projection for generation of \\emphreuse conditions. As a result, our technique can be used where projection is too expensive or does not exist. Our experiments indicate that Failure Tabling can speed up the execution of programs with many redundant failed derivations as well as achieve termination in the presence of infinite executions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Unbounded Model-Checking with Interpolation for Regular Language Constraints.\n \n \n \n\n\n \n Gange, G.; Navas, J. A.; Stuckey, P. J.; Søndergaard, H.; and Schachte, P.\n\n\n \n\n\n\n In Piterman, N.; and Smolka, S., editor(s), TACAS 2013: Proceedings of the 19th International Conference on Tools and Algorithms for the Construction and Analysis of Systems, volume 7795, of Lecture Notes in Computer Science, pages 277–291, 2013. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gan-Nav-Stu-Son-Sch_TACAS13,\n  author    = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter J. Stuckey and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter Schachte},\n  title     = {Unbounded Model-Checking with Interpolation for Regular\n                Language Constraints},\n  editor    = {N. Piterman and S. Smolka},\n  booktitle = {TACAS 2013: Proceedings of the 19th International\n\t\tConference on Tools and Algorithms for the Construction \n\t\tand Analysis of Systems},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {7795},\n  pages     = {277--291},\n  publisher = {Springer},\n  year      = {2013},\n  doi       = {10.1007/978-3-642-36742-7_20},\n  abstract  = {We present a decision procedure for the problem of, \n\t\tgiven a set of regular expressions $R_1, \\ldots, R_n$,\n\t\tdetermining whether $R = R_1 \\cap \\cdots \\cap R_n$ is \n\t\tempty.  Our solver, \\sys{revenant}, finitely unrolls \n\t\tautomata for $R_1, \\ldots, R_n$, encoding each as a \n\t\tset of propositional constraints.  If a SAT solver \n\t\tdetermines satisfiability then $R$ is non-empty. Otherwise\n\t\tour solver uses unbounded model checking techniques \n\t\tto extract an interpolant from the bounded proof.  \n\t\tThis interpolant serves as an overapproximation of $R$.  \n\t\tIf the solver reaches a fixed-point with the constraints\n\t\tremaining unsatisfiable, it has proven $R$ to be empty.\n\t\tOtherwise, it increases the unrolling depth and repeats.\n\t\tWe compare \\textsc{revenant} with other state-of-the-art \n\t\tstring solvers.  Evaluation suggests that it behaves\n\t\tbetter for constraints that express the intersection of\n\t\tsets of regular languages, a case of interest in the \n\t\tcontext of verification.},\n  keywords  = {Static analysis, Formal languages, Model checking, Interpolants, String constraints},\n}\n\n
\n
\n\n\n
\n We present a decision procedure for the problem of, given a set of regular expressions $R_1, …, R_n$, determining whether $R = R_1 ∩ ⋯ ∩ R_n$ is empty. Our solver, \\sysrevenant, finitely unrolls automata for $R_1, …, R_n$, encoding each as a set of propositional constraints. If a SAT solver determines satisfiability then $R$ is non-empty. Otherwise our solver uses unbounded model checking techniques to extract an interpolant from the bounded proof. This interpolant serves as an overapproximation of $R$. If the solver reaches a fixed-point with the constraints remaining unsatisfiable, it has proven $R$ to be empty. Otherwise, it increases the unrolling depth and repeats. We compare \\textscrevenant with other state-of-the-art string solvers. Evaluation suggests that it behaves better for constraints that express the intersection of sets of regular languages, a case of interest in the context of verification.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Solving Difference Constraints over Modular Arithmetic.\n \n \n \n\n\n \n Gange, G.; Søndergaard, H.; Stuckey, P. J.; and Schachte, P.\n\n\n \n\n\n\n In Bonacina, M. P., editor(s), CADE 2013: Proceedings of the 24th International Conference on Automated Deduction, volume 7898, of Lecture Notes in Artificial Intelligence, pages 215–230, 2013. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gan-Son-Stu-Sch_CADE13,\n  author    = {Graeme Gange and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and \n\t\tPeter Schachte},\n  title     = {Solving Difference Constraints over Modular Arithmetic},\n  editor    = {M. P. Bonacina},\n  booktitle = {CADE 2013: Proceedings of the 24th International\n\t\tConference on Automated Deduction},\n  series    = {Lecture Notes in Artificial Intelligence},\n  volume    = {7898},\n  pages     = {215--230},\n  publisher = {Springer},\n  year      = {2013},\n  doi       = {10.1007/978-3-642-38574-2_15},\n  abstract  = {Difference logic is commonly used in program verification \n\t\tand analysis. In the context of fixed-precision integers,\n\t\tas used in assembly languages for example, the use of \n\t\tclassical difference logic is unsound. We study the problem\n\t\tof deciding difference constraints in the context of modular\n\t\tarithmetic and show that it is strongly NP-complete. We \n\t\tdiscuss the applicability of the Bellman-Ford algorithm and \n\t\trelated shortest-distance algorithms to the context of modular\n\t\tarithmetic. We explore two approaches, namely a complete \n\t\tmethod implemented using SMT technology and an incomplete \n\t\tfixpoint-based method, and the two are experimentally \n\t\tevaluated. The incomplete method performs considerably \n\t\tfaster while maintaining acceptable accuracy on a range\n\t\tof instances.},\n  keywords  = {Constraint solving, Program verification, Static analysis, Machine arithmetic, Fixed points},\n}\n\n
\n
\n\n\n
\n Difference logic is commonly used in program verification and analysis. In the context of fixed-precision integers, as used in assembly languages for example, the use of classical difference logic is unsound. We study the problem of deciding difference constraints in the context of modular arithmetic and show that it is strongly NP-complete. We discuss the applicability of the Bellman-Ford algorithm and related shortest-distance algorithms to the context of modular arithmetic. We explore two approaches, namely a complete method implemented using SMT technology and an incomplete fixpoint-based method, and the two are experimentally evaluated. The incomplete method performs considerably faster while maintaining acceptable accuracy on a range of instances.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Towards Region Based Memory Management for Go.\n \n \n \n\n\n \n Davis, M.; Schachte, P.; Somogyi, Z.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 2012 ACM Workshop on Memory Systems Performance and Correctness (MSPC 2012), pages 58–67, 2012. ACM Press\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Dav-Sch-Som-Son_MSPC12,\n  author    = {Matthew Davis and \n\t\tPeter Schachte and \n\t\tZoltan Somogyi and\n\t\tHarald S{\\o}ndergaard},\n  title     = {Towards Region Based Memory Management for {Go}},\n  booktitle = {Proceedings of the 2012 ACM Workshop on Memory Systems \n\t\tPerformance and Correctness (MSPC 2012)},\n  pages     = {58--67},\n  publisher = {ACM Press},\n  year      = {2012},\n  doi       = {10.1145/2247684.2247695},\n  abstract  = {Region-based memory management aims to lower the cost of \n\t\tdeallocation through bulk processing: instead of \n\t\trecovering the memory of each object separately, it\n\t\trecovers the memory of a region containing many objects.\n\t\tIt relies on static analysis to determinethe set of \n\t\tmemory regions needed by a program, the program points at\n\t\twhich each region should be created and removed, and, for\n\t\teach memory allocation, the region that should supply the\n\t\tmemory.  The concurrent language Go has features that pose\n\t\tinteresting challenges for this analysis.  We present a \n\t\tnovel design for region-based memory management for Go,\n\t\tcombining static analysis, to guide region creation, and\n\t\tlightweight runtime bookkeeping, to help control \n\t\treclamation.  The main advantage of our approach is that\n\t\tit greatly limits the amount of re-work that must be done\n\t\tafter eachchange to the program source code, making our\n\t\tapproach more practical than existing RBMM systems.  Our\n\t\tprototype implementation covers most of the sequential\n\t\tfragment of Go, and preliminary results are encouraging.},\n  keywords  = {Memory management, Go},\n}\n\n
\n
\n\n\n
\n Region-based memory management aims to lower the cost of deallocation through bulk processing: instead of recovering the memory of each object separately, it recovers the memory of a region containing many objects. It relies on static analysis to determinethe set of memory regions needed by a program, the program points at which each region should be created and removed, and, for each memory allocation, the region that should supply the memory. The concurrent language Go has features that pose interesting challenges for this analysis. We present a novel design for region-based memory management for Go, combining static analysis, to guide region creation, and lightweight runtime bookkeeping, to help control reclamation. The main advantage of our approach is that it greatly limits the amount of re-work that must be done after eachchange to the program source code, making our approach more practical than existing RBMM systems. Our prototype implementation covers most of the sequential fragment of Go, and preliminary results are encouraging.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Logic Programming: From Underspecification to Undefinedness.\n \n \n \n \n\n\n \n Naish, L.; Søndergaard, H.; and Horsfall, B.\n\n\n \n\n\n\n In Mestre, J., editor(s), Theory of Computing 2012, volume 128, of Conferences in Research and Practice in Information Technology, pages 49–58, 2012. \n \n\n\n\n
\n\n\n\n \n \n \"LogicPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Nai-Son-Hor_CATS12,\n  author    = {Lee Naish and \n\t\tHarald S{\\o}ndergaard and \n\t\tBenjamin Horsfall},\n  title     = {Logic Programming: From Underspecification to Undefinedness},\n  editor    = {J. Mestre},\n  booktitle = {Theory of Computing 2012},\n  series    = {Conferences in Research and Practice in Information Technology},\n  volume    = {128},\n  pages     = {49--58},\n  year      = {2012},\n  url       = {http://crpit.com/Vol128.html},\n  abstract  = {The semantics of logic programs was originally described in \n\t\tterms of two-valued logic. Soon, however, it was realised that \n\t\tthree-valued logic had some natural advantages, as it provides \n\t\tdistinct values not only for truth and falsehood, but also for\n\t\t"undefined". The three-valued semantics proposed by Fitting and\n\t\tby Kunen are closely related to what is computed by a logic \n\t\tprogram, the third truth value being associated with \n\t\tnon-termination. A different three-valued semantics, proposed \n\t\tby Naish, shared much with those of Fitting and Kunen but \n\t\tincorporated allowances for programmer intent, the third truth \n\t\tvalue being associated with underspecification. Naish used an \n\t\t(apparently) novel ``arrow'' operator to relate the intended \n\t\tmeaning of left and right sides of predicate definitions. In \n\t\tthis paper we suggest that the additional truth values of\n\t\tFitting/Kunen and Naish are best viewed as duals. We use \n\t\tFitting's later four-valued approach to unify the two \n\t\tthree-valued approaches. The additional truth value has very \n\t\tlittle affect on the Fitting three-valued semantics, though it\n\t\tcan be useful when finding approximations to this semantics for\n\t\tprogram analysis. For the Naish semantics, the extra truth \n\t\tvalue allows intended interpretations to be more expressive, \n\t\tallowing us to verify and debug a larger class of programs. \n\t\tWe also explain that the "arrow" operator of Naish (and our \n\t\tfour-valued extension) is essentially the information ordering.\n\t\tThis sheds new light on the relationships between \n\t\tspecifications and programs, and successive executions states \n\t\tof a program.},\n  keywords  = {Logic programming, Semantics, Many-valued logic},\n}\n\n
\n
\n\n\n
\n The semantics of logic programs was originally described in terms of two-valued logic. Soon, however, it was realised that three-valued logic had some natural advantages, as it provides distinct values not only for truth and falsehood, but also for \"undefined\". The three-valued semantics proposed by Fitting and by Kunen are closely related to what is computed by a logic program, the third truth value being associated with non-termination. A different three-valued semantics, proposed by Naish, shared much with those of Fitting and Kunen but incorporated allowances for programmer intent, the third truth value being associated with underspecification. Naish used an (apparently) novel ``arrow'' operator to relate the intended meaning of left and right sides of predicate definitions. In this paper we suggest that the additional truth values of Fitting/Kunen and Naish are best viewed as duals. We use Fitting's later four-valued approach to unify the two three-valued approaches. The additional truth value has very little affect on the Fitting three-valued semantics, though it can be useful when finding approximations to this semantics for program analysis. For the Naish semantics, the extra truth value allows intended interpretations to be more expressive, allowing us to verify and debug a larger class of programs. We also explain that the \"arrow\" operator of Naish (and our four-valued extension) is essentially the information ordering. This sheds new light on the relationships between specifications and programs, and successive executions states of a program.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Signedness-Agnostic Program Analysis: Precise Integer Bounds for Low-Level Code.\n \n \n \n\n\n \n Navas, J. A.; Schachte, P.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Jhala, R.; and Igarashi, A., editor(s), APLAS 2012: Proceedings of the 10th Asian Symposium on Programming Languages and Systems, volume 7705, of Lecture Notes in Computer Science, pages 115–130, 2012. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Nav-Sch-Son-Stu_APLAS12,\n  author    = {Jorge A. Navas and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n  title     = {Signedness-Agnostic Program Analysis: \n\t\tPrecise Integer Bounds for Low-Level Code},\n  editor    = {R. Jhala and A. Igarashi},\n  booktitle = {APLAS 2012: Proceedings of the 10th Asian Symposium \n        \ton Programming Languages and Systems},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {7705},\n  pages     = {115--130},\n  publisher = {Springer},\n  year      = {2012},\n  doi       = {10.1007/978-3-642-35182-2_9},\n  abstract  = {Many compilers target common back-ends, thereby \n\t\tavoiding the need to implement the same analyses \n\t\tfor many different source languages. This has led\n\t\tto interest in static analysis of LLVM code. \n\t\tIn LLVM (and similar languages) most signedness \n\t\tinformation associated with variables has been \n\t\tcompiled away. Current analyses of LLVM code tend\n\t\tto assume that either all values are signed or all\n\t\tare unsigned (except where the code specifies the \n\t\tsignedness). We show how program analysis can \n\t\tsimultaneously consider each bit-string to be both \n\t\tsigned and unsigned, thus improving precision, \n\t\tand we implement the idea for the specific case of \n\t\tinteger bounds analysis. Experimental evaluation\n\t\tshows that this provides higher precision at little\n\t\textra cost. Our approach turns out to be beneficial\n\t\teven when all signedness information is available,\n\t\tsuch as when analysing C or Java code.},\n  keywords  = {Abstract domains, Abstract interpretation, Interval analysis, Machine arithmetic, C analysis, LLVM},\n}\n\n
\n
\n\n\n
\n Many compilers target common back-ends, thereby avoiding the need to implement the same analyses for many different source languages. This has led to interest in static analysis of LLVM code. In LLVM (and similar languages) most signedness information associated with variables has been compiled away. Current analyses of LLVM code tend to assume that either all values are signed or all are unsigned (except where the code specifies the signedness). We show how program analysis can simultaneously consider each bit-string to be both signed and unsigned, thus improving precision, and we implement the idea for the specific case of integer bounds analysis. Experimental evaluation shows that this provides higher precision at little extra cost. Our approach turns out to be beneficial even when all signedness information is available, such as when analysing C or Java code.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Collaborative Learning through Formative Peer Review: Pedagogy, Programs and Potential.\n \n \n \n\n\n \n Søndergaard, H.; and Mulder, R.\n\n\n \n\n\n\n Computer Science Education, 22(4): 343–367. 2012.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@article{Son-Mul_CSE12,\n  author    = {Harald S{\\o}ndergaard and \n\t\tRaoul Mulder},\n  title     = {Collaborative Learning through Formative Peer Review:\n\t\tPedagogy, Programs and Potential},\n  journal   = {Computer Science Education},\n  volume    = {22},\n  number    = {4},\n  pages     = {343--367},\n  year      = {2012},\n  doi       = {10.1080/08993408.2012.728041},\n  abstract  = {We examine student peer review, with an emphasis on \n\t\tformative practice and collaborative learning, rather \n\t\tthan peer grading. Opportunities to engage students in \n\t\tsuch formative peer assessment are growing, as a range \n\t\tof online tools become available to manage and simplify \n\t\tthe process of administering student peer review. \n\t\tWe consider whether pedagogical requirements for student\n\t\tpeer review are likely to be discipline-specific, taking\n\t\tcomputer science and software engineering as an example.\n\t\tWe then summarise what attributes are important for a \n\t\tmodern generic peer review tool, and classify tools \n\t\taccording to four prevalent emphases, using currently \n\t\tavailable, mature tools to illustrate each. We conclude \n\t\tby identifying some gaps in current understanding of \n\t\tformative peer review, and discuss how online tools for \n\t\tstudent peer review can help create opportunities to \n\t\tanswer some of these questions.},\n  keywords  = {Education, Peer review},\n}\n\n
\n
\n\n\n
\n We examine student peer review, with an emphasis on formative practice and collaborative learning, rather than peer grading. Opportunities to engage students in such formative peer assessment are growing, as a range of online tools become available to manage and simplify the process of administering student peer review. We consider whether pedagogical requirements for student peer review are likely to be discipline-specific, taking computer science and software engineering as an example. We then summarise what attributes are important for a modern generic peer review tool, and classify tools according to four prevalent emphases, using currently available, mature tools to illustrate each. We conclude by identifying some gaps in current understanding of formative peer review, and discuss how online tools for student peer review can help create opportunities to answer some of these questions.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n An Algorithm for Affine Approximation of Binary Decision Diagrams.\n \n \n \n\n\n \n Henshall, K.; Schachte, P.; Søndergaard, H.; and Whiting, L.\n\n\n \n\n\n\n Chicago Journal of Theoretical Computer Science, 2010(11). June 2010.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Hen-Sch-Son-Whi_CJTCS10,\n  author    = {Kevin Henshall and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and\n\t\tLeigh Whiting},\n  title     = {An Algorithm for Affine Approximation of Binary Decision \n\t\tDiagrams},\n  journal   = {Chicago Journal of Theoretical Computer Science},\n  volume    = {2010},\n  number    = {11},\n  publisher = {University of Chicago},\n  month     = {June},\n  year      = {2010},\n  doi       = {10.4086/cjtcs.2010.011},\n  abstract  = {This paper is concerned with the problem of Boolean approximation\n\t\tin the following sense: given a Boolean function class and an\n\t\tarbitrary Boolean function, what is the function's best proxy in\n\t\tthe class? Specifically, what is its strongest logical \n\t\tconsequence (or \\emph{envelope}) in the class of \\emph{affine} \n\t\tBoolean functions. We prove various properties of affine Boolean\n\t\tfunctions and their representation as ROBDDs. Using these \n\t\tproperties, we develop an ROBDD algorithm to find the affine \n\t\tenvelope of a Boolean function.},\n  keywords  = {Boolean logic, Boolean approximation, Knowledge compilation},\n}\n\n
\n
\n\n\n
\n This paper is concerned with the problem of Boolean approximation in the following sense: given a Boolean function class and an arbitrary Boolean function, what is the function's best proxy in the class? Specifically, what is its strongest logical consequence (or \\emphenvelope) in the class of \\emphaffine Boolean functions. We prove various properties of affine Boolean functions and their representation as ROBDDs. Using these properties, we develop an ROBDD algorithm to find the affine envelope of a Boolean function.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic Abstraction for Congruences.\n \n \n \n\n\n \n King, A.; and Søndergaard, H.\n\n\n \n\n\n\n In Barthe, G.; and Hermenegildo, M., editor(s), Verification, Model Checking and Abstract Interpretation, volume 5944, of Lecture Notes in Computer Science, pages 197–213, 2010. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Kin-Son_VMCAI10,\n  author    = {Andy King and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Automatic Abstraction for Congruences},\n  editor    = {G. Barthe and M. Hermenegildo},\n  booktitle = {Verification, Model Checking and Abstract Interpretation},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {5944},\n  pages     = {197--213},\n  publisher = {Springer},\n  year      = {2010},\n  doi       = {10.1007/978-3-642-11319-2_16},\n  abstract  = {One approach to verifying bit-twiddling algorithms is to derive \n\t\tinvariants between the bits that constitute the variables of a \n\t\tprogram. Such invariants can often be described with systems\n\t\tof congruences where in each equation \n\t\t$\\vec{c} \\cdot \\vec{x} = d \\mod m$, $m$ is a power of two, \n\t\t$\\vec{c}$ is a vector of integer coefficients, and $\\vec{x}$ is\n\t\ta vector of propositional variables (bits). Because of the \n\t\tlow-level nature of these invariants and the large number of\n\t\tbits that are involved, it is important that the transfer \n\t\tfunctions can be derived automatically. We address this problem,\n\t\tshowing how an analysis for bit-level congruence relationships \n\t\tcan be decoupled into two parts:\n\t\t(1) a SAT-based abstraction (compilation) step which can be \n\t\tautomated, and\n\t\t(2) an interpretation step that requires no SAT-solving.\n\t\tWe exploit triangular matrix forms to derive transfer functions\n\t\tefficiently, even in the presence of large numbers of bits. \n\t\tFinally we propose program transformations that improve the \n\t\tanalysis results.},\n  keywords  = {Congruence analysis, Abstract interpretation, Abstract domains, Boolean logic},\n}\n\n
\n
\n\n\n
\n One approach to verifying bit-twiddling algorithms is to derive invariants between the bits that constitute the variables of a program. Such invariants can often be described with systems of congruences where in each equation $ěc{c} · ěc{x} = d \\mod m$, $m$ is a power of two, $ěc{c}$ is a vector of integer coefficients, and $ěc{x}$ is a vector of propositional variables (bits). Because of the low-level nature of these invariants and the large number of bits that are involved, it is important that the transfer functions can be derived automatically. We address this problem, showing how an analysis for bit-level congruence relationships can be decoupled into two parts: (1) a SAT-based abstraction (compilation) step which can be automated, and (2) an interpretation step that requires no SAT-solving. We exploit triangular matrix forms to derive transfer functions efficiently, even in the presence of large numbers of bits. Finally we propose program transformations that improve the analysis results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Information Loss in Knowledge Compilation: A Comparison of Boolean Envelopes.\n \n \n \n\n\n \n Schachte, P.; Søndergaard, H.; Whiting, L.; and Henshall, K.\n\n\n \n\n\n\n Artificial Intelligence, 174(9–10): 585–596. 2010.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Sch-Son-Whi-Hen_AIJ10,\n  author    = {Peter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tLeigh Whiting and\n\t\tKevin Henshall},\n  title     = {Information Loss in Knowledge Compilation: \n\t\tA Comparison of {Boolean} Envelopes},\n  journal   = {Artificial Intelligence},\n  volume    = {174},\n  number    = {9--10},\n  pages     = {585--596},\n  year      = {2010},\n  doi       = {10.1016/j.artint.2010.03.003},\n  abstract  = {Since Selman and Kautz's seminal work on the use of Horn \n\t\tapproximation to speed up the querying of knowledge bases, \n\t\tthere has been great interest in Boolean approximation for \n\t\tAI applications. There are several Boolean classes with \n\t\tdesirable computational properties similar to those of the\n\t\tHorn class. The class of affine Boolean functions, for example,\n\t\thas been proposed as an interesting alternative to Horn for \n\t\tknowledge compilation. To investigate the trade-offs between\n\t\tprecision and efficiency in knowledge compilation, we compare, \n\t\tanalytically and empirically, four well-known Boolean classes,\n\t\tand their combinations, for ability to preserve information.\n\t\tWe note that traditional evaluation which explores unit-clause\n\t\tconsequences of random hard 3-CNF formulas does not tell the \n\t\tfull story, and we complement that evaluation with experiments \n\t\tbased on a variety of assumptions about queries and the \n\t\tunderlying knowledge base.},\n  keywords  = {Boolean logic, Boolean approximation, Knowledge compilation, Artificial intelligence},\n}\n\n
\n
\n\n\n
\n Since Selman and Kautz's seminal work on the use of Horn approximation to speed up the querying of knowledge bases, there has been great interest in Boolean approximation for AI applications. There are several Boolean classes with desirable computational properties similar to those of the Horn class. The class of affine Boolean functions, for example, has been proposed as an interesting alternative to Horn for knowledge compilation. To investigate the trade-offs between precision and efficiency in knowledge compilation, we compare, analytically and empirically, four well-known Boolean classes, and their combinations, for ability to preserve information. We note that traditional evaluation which explores unit-clause consequences of random hard 3-CNF formulas does not tell the full story, and we complement that evaluation with experiments based on a variety of assumptions about queries and the underlying knowledge base.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Places for Learning Engineering: A Preliminary Report on Informal Learning Spaces.\n \n \n \n\n\n \n Chang, R.; Stern, L.; Søndergaard, H.; and Hadgraft, R.\n\n\n \n\n\n\n In Proceedings of the 2009 Research in Engineering Education Symposium, 2009. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Inproceedings{Cha-Ste-Son-Had_REES09,\n  author    = {Rosemary Chang and\n\t\tLinda Stern and\n\t\tHarald S{\\o}ndergaard and\n\t\tRoger Hadgraft},\n  title     = {Places for Learning Engineering: \n\t\tA Preliminary Report on Informal Learning Spaces},\n  booktitle = {Proceedings of the 2009 Research in Engineering Education \n\t\tSymposium},\n  location  = {Palm Cove, Queensland},\n  year      = {2009},\n  keywords  = {Education},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n State Joining and Splitting for the Symbolic Execution of Binaries.\n \n \n \n\n\n \n Hansen, T.; Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Bensalem, S.; and Peled, D. A., editor(s), Runtime Verification, volume 5779, of Lecture Notes in Computer Science, pages 76–92, 2009. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Inproceedings{Han-Sch-Son_RV09,\n  author    = {Trevor Hansen and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title     = {State Joining and Splitting for the Symbolic Execution \n\t\tof Binaries},\n  editor    = {S. Bensalem and D. A. Peled},\n  booktitle = {Runtime Verification},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {5779},\n  pages     = {76--92},\n  publisher = {Springer},\n  year      = {2009},\n  doi       = {10.1007/978-3-642-04694-0_6},\n  abstract  = {Symbolic execution can be used to explore the possible run-time \n\t\tstates of a program. It makes use of a concept of ``state'' \n\t\twhere a variable's value has been replaced by an expression \n\t\tthat gives the value as a function of program input. \n\t\tAdditionally, a state can be equipped with a summary of \n\t\tcontrol-flow history: a ``path constraint'' keeps track of the\n\t\tclass of inputs that would have caused the same flow of control.\n\t\tBut even simple programs can have trillions of paths, so a \n\t\tpath-by-path analysis is impractical. We investigate a \n\t\t``state joining'' approach to making symbolic execution more \n\t\tpractical and describe the challenges of applying state joining\n\t\tto the analysis of unmodified Linux x86 executables. The results\n\t\tso far are mixed, with good results for some code. On other\n\t\texamples, state joining produces cumbersome constraints that \n\t\tare more expensive to solve than those generated by normal \n\t\tsymbolic execution.},\n  keywords  = {Symbolic execution},\n}\n\n
\n
\n\n\n
\n Symbolic execution can be used to explore the possible run-time states of a program. It makes use of a concept of ``state'' where a variable's value has been replaced by an expression that gives the value as a function of program input. Additionally, a state can be equipped with a summary of control-flow history: a ``path constraint'' keeps track of the class of inputs that would have caused the same flow of control. But even simple programs can have trillions of paths, so a path-by-path analysis is impractical. We investigate a ``state joining'' approach to making symbolic execution more practical and describe the challenges of applying state joining to the analysis of unmodified Linux x86 executables. The results so far are mixed, with good results for some code. On other examples, state joining produces cumbersome constraints that are more expensive to solve than those generated by normal symbolic execution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Boolean Affine Approximation with Binary Decision Diagrams.\n \n \n \n \n\n\n \n Henshall, K.; Schachte, P.; Søndergaard, H.; and Whiting, L.\n\n\n \n\n\n\n In Downey, R.; and Manyem, P., editor(s), Theory of Computing 2009, volume 94, of Conferences in Research and Practice in Information Technology, pages 121–129, 2009. \n \n\n\n\n
\n\n\n\n \n \n \"BooleanPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Hen-Sch-Son-Whi_CATS09,\n  author    = {Kevin Henshall and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tLeigh Whiting},\n  title     = {Boolean Affine Approximation with Binary Decision Diagrams},\n  editor    = {R. Downey and P. Manyem},\n  booktitle = {Theory of Computing 2009},\n  series    = {Conferences in Research and Practice in Information Technology},\n  volume    = {94},\n  pages     = {121--129},\n  year      = {2009},\n  url       = {http://crpit.com/Vol94.html},\n  abstract  ={Selman and Kautz's work on knowledge compilation has\n\t\testablished how approximation (strengthening and/or\n\t\tweakening) of a propositional knowledge-base can be used to\n\t\tspeed up query processing, at the expense of completeness.\n\t\tIn the classical approach, the knowledge-base is assumed\n\t\tto be presented as a propositional formula in conjunctive\n\t\tnormal form (CNF), and Horn functions are used to over-\n\t\tand under-approximate it (in the hope that many queries\n\t\tcan be answered efficiently using the approximations only).\n\t\tHowever, other representations are possible, and functions\n\t\tother than Horn can be used for approximations, as long\n\t\tas they have deduction-computational properties similar to\n\t\tthose of the Horn functions. Zanuttini has suggested that\n\t\tthe class of affine Boolean functions would be especially\n\t\tuseful in knowledge compilation and has presented various\n\t\taffine approximation algorithms. Since CNF is awkward\n\t\tfor presenting affine functions, Zanuttini considers both\n\t\ta sets-of-models representation and the use of modulo 2\n\t\tcongruence equations. Here we consider the use of reduced\n\t\tordered binary decision diagrams (ROBDDs), a representation\n\t\twhich is more compact than the sets of models and which\n\t\t(unlike modulo 2 congruences) can express any source\n\t\tknowledge-base. We present an ROBDD algorithm to find\n\t\tstrongest affine upper-approximations of a Boolean function\n\t\tand we argue its correctness.},\n  keywords  = {Boolean logic, Boolean approximation, Knowledge compilation, ROBDDs},\n}\n\n
\n
\n\n\n
\n Selman and Kautz's work on knowledge compilation has established how approximation (strengthening and/or weakening) of a propositional knowledge-base can be used to speed up query processing, at the expense of completeness. In the classical approach, the knowledge-base is assumed to be presented as a propositional formula in conjunctive normal form (CNF), and Horn functions are used to over- and under-approximate it (in the hope that many queries can be answered efficiently using the approximations only). However, other representations are possible, and functions other than Horn can be used for approximations, as long as they have deduction-computational properties similar to those of the Horn functions. Zanuttini has suggested that the class of affine Boolean functions would be especially useful in knowledge compilation and has presented various affine approximation algorithms. Since CNF is awkward for presenting affine functions, Zanuttini considers both a sets-of-models representation and the use of modulo 2 congruence equations. Here we consider the use of reduced ordered binary decision diagrams (ROBDDs), a representation which is more compact than the sets of models and which (unlike modulo 2 congruences) can express any source knowledge-base. We present an ROBDD algorithm to find strongest affine upper-approximations of a Boolean function and we argue its correctness.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Learning From and With Peers: The Different Roles of Student Peer Reviewing.\n \n \n \n\n\n \n Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 14th Annual SIGCSE/SIGCUE Conference on Innovation and Technology in Computer Science Education, pages 31–35, New York, NY, 2009. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Son_ITiCSE09,\n  author    = {Harald S{\\o}ndergaard},\n  title     = {Learning From and With Peers:\n\t\tThe Different Roles of Student Peer Reviewing},\n  booktitle = {Proceedings of the 14th Annual SIGCSE/SIGCUE Conference on\n\t\tInnovation and Technology in Computer Science Education},\n  pages     = {31--35},\n  publisher = {Association for Computing Machinery},\n  address   = {New York, NY},\n  year      = {2009},\n  location  = {Paris, France},\n  doi       = {10.1145/3263191},\n  abstract  = {There are many different approaches to student peer assessment.\n\t\tIn this paper I lay out the pedagogical philosophy behind my \n\t\town use of student peer reviews. These should not only be \n\t\tseen as adding to the amount of formative feedback in a class,\n\t\tnor are they only about the development of certain higher-order\n\t\tcognitive skills. Properly aligned with an overall assessment\n\t\tstrategy, peer reviewing can help build a stronger learning \n\t\tcommunity. I describe such a strategy and my experience using\n\t\tPRAZE, an online tool for student peer reviewing, as well as \n\t\tstudents' response to the tool and its use.},\n  keywords  = {Education, Peer review},\n}\n\n
\n
\n\n\n
\n There are many different approaches to student peer assessment. In this paper I lay out the pedagogical philosophy behind my own use of student peer reviews. These should not only be seen as adding to the amount of formative feedback in a class, nor are they only about the development of certain higher-order cognitive skills. Properly aligned with an overall assessment strategy, peer reviewing can help build a stronger learning community. I describe such a strategy and my experience using PRAZE, an online tool for student peer reviewing, as well as students' response to the tool and its use.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Principles and Practice of Declarative Programming.\n \n \n \n\n\n \n Søndergaard, H.,\n editor.\n \n\n\n \n\n\n\n ACM Press. 2009.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Proceedings{Son_PPDP01,\n  editor    = {Harald S{\\o}ndergaard},\n  title     = {Principles and Practice of Declarative Programming},\n  publisher = {ACM Press},\n  year      = {2009},\n  keywords  = {Logic programming, Functional programming},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Inferring Congruence Equations with SAT.\n \n \n \n\n\n \n King, A.; and Søndergaard, H.\n\n\n \n\n\n\n In Gupta, A.; and Malik, S., editor(s), Computer Aided Verification, volume 5123, of Lecture Notes in Computer Science, pages 281–293, 2008. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Kin-Son_CAV08,\n  author    = {Andy King and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Inferring Congruence Equations with {SAT}},\n  editor    = {A. Gupta and S. Malik},\n  booktitle = {Computer Aided Verification},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {5123},\n  pages     = {281--293},\n  publisher = {Springer},\n  year      = {2008},\n  doi       = {10.1007/978-3-540-70545-1_26},\n  abstract  = {This paper proposes a new approach for deriving invariants \n\t\tthat are systems of congruence equations where the modulo\n\t\tis a power of 2. The technique is an amalgam of SAT-solving,\n\t\twhere a propositional formula is used to encode the semantics \n\t\tof a basic block, and abstraction, where the solutions to the\n\t\tformula are systematically combined and summarised as a system\n\t\tof congruence equations. The resulting technique is more\n\t\tprecise than existing congruence analyses since a single optimal\n\t\ttransfer function is derived for a basic block as a whole.},\n  keywords  = {Congruence analysis, Abstract interpretation, Abstract domains, Boolean logic},\n}\n\n
\n
\n\n\n
\n This paper proposes a new approach for deriving invariants that are systems of congruence equations where the modulo is a power of 2. The technique is an amalgam of SAT-solving, where a propositional formula is used to encode the semantics of a basic block, and abstraction, where the solutions to the formula are systematically combined and summarised as a system of congruence equations. The resulting technique is more precise than existing congruence analyses since a single optimal transfer function is derived for a basic block as a whole.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Un-Kleene Boolean Equation Solving.\n \n \n \n\n\n \n Herlihy, B.; Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n International Journal on Foundations of Computer Science, 18(2): 227–250. 2007.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Article{Her-Sch-Son_IJFCS07,\n  author    = {Brian Herlihy and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Un-{Kleene} {Boolean} Equation Solving},\n  journal   = {International Journal on Foundations of Computer Science},\n  volume    = {18},\n  number    = {2},\n  pages     = {227--250},\n  year      = {2007},\n  doi       = {10.1142/S0129054107004668},\n  abstract  = {We present a new method for finding closed forms of recursive \n\t\tBoolean function definitions. Traditionally, these closed forms\n\t\tare found by Kleene iteration: iterative approximation until a \n\t\tfixed point is reached. Conceptually, our new method replaces \n\t\teach $k$-ary function by $2^k$ Boolean constants defined by \n\t\tmutual recursion. The introduction of an exponential number of\n\t\tconstants is mitigated by the simplicity of their definitions \n\t\tand by the use of a novel variant of ROBDDs to avoid repeated \n\t\tcomputation. Experiments suggest that this approach is \n\t\tsignificantly faster than Kleene iteration for examples that \n\t\trequire many Kleene iteration steps.},\n  keywords  = {Fixed points, ROBDDs},\n}\n\n
\n
\n\n\n
\n We present a new method for finding closed forms of recursive Boolean function definitions. Traditionally, these closed forms are found by Kleene iteration: iterative approximation until a fixed point is reached. Conceptually, our new method replaces each $k$-ary function by $2^k$ Boolean constants defined by mutual recursion. The introduction of an exponential number of constants is mitigated by the simplicity of their definitions and by the use of a novel variant of ROBDDs to avoid repeated computation. Experiments suggest that this approach is significantly faster than Kleene iteration for examples that require many Kleene iteration steps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Boolean Approximation Revisited.\n \n \n \n\n\n \n Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Miguel, I.; and Ruml, W., editor(s), Abstraction, Reformulation and Approximation: Proceedings of SARA 2007, volume 4612, of Lecture Notes in Artificial Intelligence, pages 329–343, 2007. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Sch-Son_SARA07,\n  author    = {Peter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Boolean Approximation Revisited},\n  editor    = {I. Miguel and W. Ruml},\n  booktitle = {Abstraction, Reformulation and Approximation:\n         \tProceedings of {SARA} 2007},\n  series    = {Lecture Notes in Artificial Intelligence},\n  volume    = {4612},\n  pages     = {329--343},\n  publisher = {Springer},\n  year      = {2007},\n  doi       = {10.1007/978-3-540-73580-9_26},\n  abstract  = {Most work to date on Boolean approximation assumes that\n\t\tBoolean functions are represented by formulas in\n\t\tconjunctive normal form. That assumption is appropriate \n\t\tfor the classical applications of Boolean approximation \n\t\tbut potentially limits wider use. We revisit, in a \n\t\tlattice-theoretic setting, so-called envelopes and cores\n\t\tin propositional logic, identifying them with upper and\n\t\tlower closure operators, respectively. This leads to \n\t\trecursive representation-independent characterisations\n\t\tof Boolean approximation for a large class of classes.\n\t\tWe show that Boolean development can be applied in a \n\t\trepresentation-independent setting to develop approximation\n\t\talgorithms for a broad range of Boolean classes, including\n\t\tHorn and Krom functions.},\n  keywords  = {Boolean logic, Boolean approximation, Lattice theory},\n}\n\n
\n
\n\n\n
\n Most work to date on Boolean approximation assumes that Boolean functions are represented by formulas in conjunctive normal form. That assumption is appropriate for the classical applications of Boolean approximation but potentially limits wider use. We revisit, in a lattice-theoretic setting, so-called envelopes and cores in propositional logic, identifying them with upper and lower closure operators, respectively. This leads to recursive representation-independent characterisations of Boolean approximation for a large class of classes. We show that Boolean development can be applied in a representation-independent setting to develop approximation algorithms for a broad range of Boolean classes, including Horn and Krom functions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings of the 18th Conference of the Australasian Association for Engineering Education.\n \n \n \n\n\n \n Søndergaard, H.; and Hadgraft, R.,\n editors.\n \n\n\n \n\n\n\n Department of Computer Science and Software Engineering, The University of Melbourne, Vic 3010, Australia. December 2007.\n Published on CD-ROM and online\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Proceedings{Son-Had_AAEE07,\n  editor    = {Harald S{\\o}ndergaard and \n\t\tRoger Hadgraft},\n  title     = {Proceedings of the 18th Conference of the \n\t\tAustralasian Association for Engineering Education},\n  publisher = {Department of Computer Science and Software Engineering,\n\t\tThe University of Melbourne, Vic 3010, Australia},\n  month     = {December},\n  year      = {2007},\n  isbn      = {978--0--9757172--1--9},\n  note      = {Published on CD-ROM and online},\n  keywords  = {Education},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Boolean Equation Solving as Graph Traversal.\n \n \n \n \n\n\n \n Herlihy, B.; Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Gudmundsson, J.; and Jay, B., editor(s), Theory of Computing 2006, volume 51, of Conferences in Research and Practice in Information Technology, pages 123–132, 2006. \n \n\n\n\n
\n\n\n\n \n \n \"BooleanPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Her-Sch-Son_CATS06,\n  author    = {Brian Herlihy and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Boolean Equation Solving as Graph Traversal},\n  editor    = {J. Gudmundsson and B. Jay},\n  booktitle = {Theory of Computing 2006},\n  series    = {Conferences in Research and Practice in Information Technology},\n  volume    = {51},\n  pages     = {123--132},\n  year      = {2006},\n  url       = {http://crpit.com/Vol51.html},\n  abstract  = {We present a new method for finding closed forms of recursive \n\t\tBoolean function definitions. Traditionally, these closed forms\n\t\tare found by iteratively approximating until a fixed point is \n\t\treached. Conceptually, our new method replaces each $k$-ary \n\t\tfunction by $2^k$ Boolean variables defined by mutual recursion.\n\t\tThe introduction of an exponential number of variables is \n\t\tmitigated by the simplicity of their definitions and by the use\n\t\tof a novel variant of ROBDDs to avoid repeated computation.\n\t\tExperimental evaluation suggests that this approach is \n\t\tsignificantly faster than Kleene iteration for examples that \n\t\twould require many Kleene iteration steps.},\n  keywords  = {Fixed points, ROBDDs},\n}\n\n
\n
\n\n\n
\n We present a new method for finding closed forms of recursive Boolean function definitions. Traditionally, these closed forms are found by iteratively approximating until a fixed point is reached. Conceptually, our new method replaces each $k$-ary function by $2^k$ Boolean variables defined by mutual recursion. The introduction of an exponential number of variables is mitigated by the simplicity of their definitions and by the use of a novel variant of ROBDDs to avoid repeated computation. Experimental evaluation suggests that this approach is significantly faster than Kleene iteration for examples that would require many Kleene iteration steps.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Closure Operators for ROBDDs.\n \n \n \n\n\n \n Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Emerson, E. A.; and Namjoshi, K. S., editor(s), Proceedings of the Seventh International Conference on Verification, Model Checking and Abstract Interpretation, volume 3855, of Lecture Notes in Computer Science, pages 1–16, 2006. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Sch-Son_VMCAI06,\n  author    = {Peter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Closure Operators for {ROBDDs}},\n  editor    = {E. A. Emerson and K. S. Namjoshi},\n  booktitle = {Proceedings of the Seventh International Conference on\n        \tVerification, Model Checking and Abstract Interpretation},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {3855},\n  pages     = {1--16},\n  publisher = {Springer},\n  year      = {2006},\n  doi       = {10.1007/11609773_1},\n  abstract  = {Program analysis commonly makes use of Boolean functions to \n\t\texpress information about run-time states. Many important \n\t\tclasses of Boolean functions used this way, such as the\n\t\tmonotone functions and the Boolean Horn functions, have\n\t\tsimple semantic characterisations. They also have well-known \n\t\tsyntactic characterisations in terms of Boolean formulae, say,\n\t\tin conjunctive normal form. Here we are concerned with \n\t\tcharacterisations using binary decision diagrams. Over the \n\t\tlast decade, ROBDDs have become popular as representations of \n\t\tBoolean functions, mainly for their algorithmic properties.\n\t\tAssuming ROBDDs as representation, we address the following \n\t\tproblems: Given a function $\\psi$ and a class of functions \n\t\t$\\class$, how to find the strongest $\\varphi \\in \\class$ \n\t\tentailed by $\\psi$ (when such a $\\varphi$ is known to exist)?\n\t\tHow to find the weakest $\\varphi \\in \\class$ that entails \n\t\t$\\psi$? How to determine that a function $\\psi$ belongs to a \n\t\tclass $\\class$? Answers are important, not only for several \n\t\tprogram analyses, but for other areas of computer science, \n\t\twhere Boolean approximation is used. We give, for many commonly\n\t\tused classes $\\class$ of Boolean functions, algorithms to \n\t\tapproximate functions represented as ROBDDs, in the sense \n\t\tdescribed above. The algorithms implement upper closure \n\t\toperators, familiar from abstract interpretation.  They \n\t\timmediately lead to algorithms for deciding class membership.},\n  keywords  = {Boolean logic, Boolean approximation, ROBDDs},\n}\n\n
\n
\n\n\n
\n Program analysis commonly makes use of Boolean functions to express information about run-time states. Many important classes of Boolean functions used this way, such as the monotone functions and the Boolean Horn functions, have simple semantic characterisations. They also have well-known syntactic characterisations in terms of Boolean formulae, say, in conjunctive normal form. Here we are concerned with characterisations using binary decision diagrams. Over the last decade, ROBDDs have become popular as representations of Boolean functions, mainly for their algorithmic properties. Assuming ROBDDs as representation, we address the following problems: Given a function $ψ$ and a class of functions $ļass$, how to find the strongest $φ ∈ ļass$ entailed by $ψ$ (when such a $φ$ is known to exist)? How to find the weakest $φ ∈ ļass$ that entails $ψ$? How to determine that a function $ψ$ belongs to a class $ļass$? Answers are important, not only for several program analyses, but for other areas of computer science, where Boolean approximation is used. We give, for many commonly used classes $ļass$ of Boolean functions, algorithms to approximate functions represented as ROBDDs, in the sense described above. The algorithms implement upper closure operators, familiar from abstract interpretation. They immediately lead to algorithms for deciding class membership.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Making Connections: First Year Transition for Computer Science and Software Engineering Students.\n \n \n \n\n\n \n Moffat, A.; Hughes, B.; Søndergaard, H.; and Gruba, P.\n\n\n \n\n\n\n In Young, A.; and Tolhurst, D., editor(s), Proceedings of the Seventh Australasian Computing Education Conference (ACE2005), volume 42, of Conferences in Research and Practice in Information Technology, pages 229–238, 2005. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Mof-Hug-Son-Gru_ACE05,\n  author    = {Alistair Moffat and \n\t\tBaden Hughes and \n\t\tHarald S{\\o}ndergaard and \n\t\tPaul Gruba},\n  title     = {Making Connections: First Year Transition for Computer\n\t\tScience and Software Engineering Students},\n  editor    = {A. Young and D. Tolhurst},\n  booktitle = {Proceedings of the Seventh Australasian Computing Education \n\t\tConference (ACE2005)},\n  series    = {Conferences in Research and Practice in Information Technology},\n  volume    = {42},\n  pages     = {229--238},\n  year      = {2005},\n  abstract  = {During the last decade, an increasing emphasis has been placed \n\t\ton the need for carefully planned transition programs to help \n\t\tfirst-year students integrate into university. In this paper \n\t\twe critically examine our experiences in designing and running\n\t\tsuccessive transition programs for Computer Science and \n\t\tSoftware Engineering students. Over the last three years we \n\t\thave trialled several models. At present, our program requires \n\t\tall entering students to be enrolled in a transition subject,\n\t\t``Making Connections'', which runs for half a semester. The\n\t\tsubject, led by designated academic staff, serves as a forum \n\t\tfor students to learn about each other, the department and the \n\t\tuniversity. The program includes a computer-based language and\n\t\tstudy skills assessment component, including self-assessment \n\t\ttasks. Students can extend the subject by taking academic \n\t\tskills workshops run by the university's student support \n\t\tservices. We have found compulsion to be a useful facilitator \n\t\tof student engagement, and the addition of an objective \n\t\tassessment task has been beneficial.},\n  keywords  = {Education, Student transition programs},\n}\n\n
\n
\n\n\n
\n During the last decade, an increasing emphasis has been placed on the need for carefully planned transition programs to help first-year students integrate into university. In this paper we critically examine our experiences in designing and running successive transition programs for Computer Science and Software Engineering students. Over the last three years we have trialled several models. At present, our program requires all entering students to be enrolled in a transition subject, ``Making Connections'', which runs for half a semester. The subject, led by designated academic staff, serves as a forum for students to learn about each other, the department and the university. The program includes a computer-based language and study skills assessment component, including self-assessment tasks. Students can extend the subject by taking academic skills workshops run by the university's student support services. We have found compulsion to be a useful facilitator of student engagement, and the addition of an objective assessment task has been beneficial.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n What Drives Curriculum Change?.\n \n \n \n\n\n \n Gruba, P.; Moffat, A.; Søndergaard, H.; and Zobel, J.\n\n\n \n\n\n\n In Lister, R.; and Young, A., editor(s), Proceedings of the Sixth Australasian Computing Education Conference (ACE2004), pages 109–117, jan 2004. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gru-Mof-Son-Zob_ACE04,\n  author    = {Paul Gruba and \n\t\tAlistair Moffat and \n\t\tHarald S{\\o}ndergaard and \n\t\tJustin Zobel},\n  title     = {What Drives Curriculum Change?},\n  editor    = {R. Lister and A. Young},\n  booktitle = {Proceedings of the Sixth Australasian Computing\n                        Education Conference (ACE2004)},\n  location  = {Dunedin, New Zealand},\n  pages     = {109--117},\n  month     = {jan},\n  year      = {2004},\n  abstract  = {While promotional literature about computer science programs \n\t\tmay claim that curricula are determined by the needs of the \n\t\tstudents and by international best practice, the reality is \n\t\toften different. In this paper we reflect on the factors \n\t\tunderlying curriculum change in computer science departments \n\t\tand schools, from institutional requirements and financial \n\t\tpressures to purely academic considerations. We have used \n\t\tthese reflections as the basis of an investigation of \n\t\tcurriculum management practices at institutions in Australasia,\n\t\tvia a survey instrument sent to a range of colleagues. Our \n\t\tfindings from the survey are consistent with our own \n\t\texperiences, namely, that curriculum change is driven or \n\t\tinhibited by factors such as vocal individuals and practical\n\t\tconstraints rather than higher academic motives.},\n  keywords  = {Education, Computer science curricula},\n}\n\n
\n
\n\n\n
\n While promotional literature about computer science programs may claim that curricula are determined by the needs of the students and by international best practice, the reality is often different. In this paper we reflect on the factors underlying curriculum change in computer science departments and schools, from institutional requirements and financial pressures to purely academic considerations. We have used these reflections as the basis of an investigation of curriculum management practices at institutions in Australasia, via a survey instrument sent to a range of colleagues. Our findings from the survey are consistent with our own experiences, namely, that curriculum change is driven or inhibited by factors such as vocal individuals and practical constraints rather than higher academic motives.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Effective Feedback to Small and Large Classes.\n \n \n \n\n\n \n Søndergaard, H.; and Thomas, D.\n\n\n \n\n\n\n In Proceedings of the 2004 ASEE/IEEE Frontiers in Education Conference (FIE2004), pages F1E-9–F1E-14, oct 2004. \n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{Son-Tho_FIE04,\n  author    = {Harald S{\\o}ndergaard and \n\t\tDoreen Thomas},\n  title     = {Effective Feedback to Small and Large Classes},\n  booktitle = {Proceedings of the 2004 ASEE/IEEE Frontiers in\n\t\tEducation Conference (FIE2004)},\n  pages     = {F1E-9--F1E-14},\n  location  = {Savannah GA},\n  month     = {oct},\n  year      = {2004},\n  doi       = {10.1109/FIE.2004.1408573},\n  abstract  = {Educational experts appear to be in broad agreement when it \n\t\tcomes to the importance of feedback for effective learning.  \n\t\tStudents benefit from plenty of opportunity and encouragement \n\t\tto express their understanding, and from informed, supportive, \n\t\tpossibly challenging, feedback. At the same time, we observe \n\t\tthat many students at our university do not find that they \n\t\treceive helpful feedback. One in three Engineering students \n\t\tdisagree or strongly disagree with the Quality of Teaching \n\t\tquestionnaire's ``I received helpful feedback on how I was \n\t\tgoing'' in the individual course, and most other disciplines \n\t\tfind themselves in a similar situation. For the university as \n\t\ta whole, student responses to this question are clearly less \n\t\tpositive than to other questions on quality of teaching,\n\t\tintellectual stimulation, staff interest, workload, and so on,\n\t\tand this state of affairs seems quite common in the Australian\n\t\tcontext. We discuss best practice in feedback provision, partly\n\t\tbased on our interviews with students and staff. We have been \n\t\tparticularly interested in identifying cost-effective ways of \n\t\tproviding informed and constructive feedback to large classes.\n\t\tFeedback is often understood, by Engineering students and staff\n\t\talike, simply as comments on submitted work---typically written\n\t\tassignments. We argue in favour of a broader concept that \n\t\tcovers a multitude of ways for a student to develop deep \n\t\tlearning through conversation, including questions and answers\n\t\tprovided by others, team work, study groups, and formative\n\t\tteacher-provided feedback during an assessment task. We \n\t\temphasise the coaching role of the teacher, and feedback \n\t\tdesigned to encourage students to monitor own learning. Large \n\t\tclasses pose particular logistic problems. We identify staff \n\t\tdevelopment as a crucial factor for consistent, effective \n\t\tfeedback, and point to web-based feedback provision as a \n\t\tworkable solution to some logistic problems. We briefly discuss\n\t\tthe role of information technology more broadly, both for \n\t\tlearning enhancement and for automated feedback provision.},\n  keywords  = {Education},\n}\n\n
\n
\n\n\n
\n Educational experts appear to be in broad agreement when it comes to the importance of feedback for effective learning. Students benefit from plenty of opportunity and encouragement to express their understanding, and from informed, supportive, possibly challenging, feedback. At the same time, we observe that many students at our university do not find that they receive helpful feedback. One in three Engineering students disagree or strongly disagree with the Quality of Teaching questionnaire's ``I received helpful feedback on how I was going'' in the individual course, and most other disciplines find themselves in a similar situation. For the university as a whole, student responses to this question are clearly less positive than to other questions on quality of teaching, intellectual stimulation, staff interest, workload, and so on, and this state of affairs seems quite common in the Australian context. We discuss best practice in feedback provision, partly based on our interviews with students and staff. We have been particularly interested in identifying cost-effective ways of providing informed and constructive feedback to large classes. Feedback is often understood, by Engineering students and staff alike, simply as comments on submitted work—typically written assignments. We argue in favour of a broader concept that covers a multitude of ways for a student to develop deep learning through conversation, including questions and answers provided by others, team work, study groups, and formative teacher-provided feedback during an assessment task. We emphasise the coaching role of the teacher, and feedback designed to encourage students to monitor own learning. Large classes pose particular logistic problems. We identify staff development as a crucial factor for consistent, effective feedback, and point to web-based feedback provision as a workable solution to some logistic problems. We briefly discuss the role of information technology more broadly, both for learning enhancement and for automated feedback provision.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Meta-Circular Abstract Interpretation in Prolog.\n \n \n \n\n\n \n Codish, M.; and Søndergaard, H.\n\n\n \n\n\n\n In Mogensen, T.; Schmidt, D.; and Sudborough, I. H., editor(s), The Essence of Computation: Complexity, Analysis, Transformation, volume 2566, of Lecture Notes in Computer Science, pages 109–134. Springer, 2002.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Incollection{Cod-Son_JonesFest02,\n  author    = {Michael Codish and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Meta-Circular Abstract Interpretation in {Prolog}},\n  editor    = {T. Mogensen and D. Schmidt and I. H. Sudborough},\n  booktitle = {The Essence of Computation: Complexity, Analysis, Transformation},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {2566},\n  pages     = {109--134},\n  publisher = {Springer},\n  year      = {2002},\n  doi       = {10.1007/3-540-36377-7_6},\n  abstract  = {We give an introduction to the meta-circular approach to the\n\t\tabstract interpretation of logic programs.  This approach is\n\t\tparticularly useful for prototyping and for introductory \n\t\tclasses on abstract interpretation.  Using interpreters, \n\t\tstudents can immediately write, adapt, and experiment with \n\t\tinterpreters and working dataflow analysers.  We use a simple\n\t\tmeta-circular interpreter, based on a ``non-ground $T_P$'' \n\t\tsemantics, as a generic analysis engine.  Instantiating the\n\t\tengine is a matter of providing an appropriate domain of\n\t\tapproximations, together with definitions of ``abstract'' \n\t\tunification and disjunction.  Small changes of the interpreter\n\t\tlet us vary both what can be ``observed'' by an analyser, and\n\t\thow fixed point computation is done.  Amongst the dataflow \n\t\tanalyses used to exemplify this approach are a parity analysis,\n\t\tgroundness dependency analysis, call patterns, depth-$k$ \n\t\tanalysis, and a ``pattern'' analysis to establish most specific\n\t\tgeneralisations of calls and success sets.},\n  keywords  = {Abstract interpretation, Abstract domains, Logic programming, Prolog},\n}\n\n
\n
\n\n\n
\n We give an introduction to the meta-circular approach to the abstract interpretation of logic programs. This approach is particularly useful for prototyping and for introductory classes on abstract interpretation. Using interpreters, students can immediately write, adapt, and experiment with interpreters and working dataflow analysers. We use a simple meta-circular interpreter, based on a ``non-ground $T_P$'' semantics, as a generic analysis engine. Instantiating the engine is a matter of providing an appropriate domain of approximations, together with definitions of ``abstract'' unification and disjunction. Small changes of the interpreter let us vary both what can be ``observed'' by an analyser, and how fixed point computation is done. Amongst the dataflow analyses used to exemplify this approach are a parity analysis, groundness dependency analysis, call patterns, depth-$k$ analysis, and a ``pattern'' analysis to establish most specific generalisations of calls and success sets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Exception Analysis for Non-Strict Languages.\n \n \n \n\n\n \n Glynn, K.; Stuckey, P. J.; Sulzmann, M.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the 2002 ACM SIGPLAN International Conference on Functional Programming, pages 98–109, 2002. ACM Press\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Gly-Stu-Sul-Son_ICFP02,\n  author    = {Kevin Glynn and \n\t\tPeter J. Stuckey and \n\t\tMartin Sulzmann and\n\t\tHarald S{\\o}ndergaard},\n  title     = {Exception Analysis for Non-Strict Languages},\n  booktitle = {Proceedings of the 2002 ACM SIGPLAN International Conference on\n        \tFunctional Programming},\n  pages     = {98--109},\n  publisher = {ACM Press},\n  year      = {2002},\n  doi       = {10.1145/581478.581488},\n  abstract  = {In this paper we present the first exception analysis\n\t\tfor a non-strict language. We augment a simply-typed \n\t\tfunctional language with exceptions, and show that we \n\t\tcan define a type-based inference system to detect \n\t\tuncaught exceptions. We have implemented this exception\n\t\tanalysis in the GHC compiler for Haskell, which has been\n\t\trecently extended with exceptions. We give empirical \n\t\tevidence that the analysis is practical.},\n  keywords  = {Exception analysis, Functional programming},\n}\n\n
\n
\n\n\n
\n In this paper we present the first exception analysis for a non-strict language. We augment a simply-typed functional language with exceptions, and show that we can define a type-based inference system to detect uncaught exceptions. We have implemented this exception analysis in the GHC compiler for Haskell, which has been recently extended with exceptions. We give empirical evidence that the analysis is practical.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2001\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Higher-Precision Groundness Analysis.\n \n \n \n\n\n \n Codish, M.; Genaim, S.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Codognet, P., editor(s), Logic Programming: Proceedings of the 17th International Conference, volume 2237, of Lecture Notes in Computer Science, pages 135–149, 2001. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Cod-Gen-Son-Stu_ICLP01,\n  author    = {Michael Codish and \n\t\tSamir Genaim and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Higher-Precision Groundness Analysis},\n  editor    = {P. Codognet},\n  booktitle = {Logic Programming: Proceedings of the 17th International \n\t\tConference},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {2237},\n  pages     = {135--149},\n  publisher = {Springer},\n  year      = {2001},\n  doi       = {10.1007/3-540-45635-x_17},\n  abstract  = {Groundness analysis of logic programs using Pos-based abstract\n\t\tinterpretation is one of the clear success stories of the last\n\t\tdecade in the area of logic program analysis. In this work we\n\t\tidentify two problems with the Pos domain, the multiplicity and\n\t\tsign problems, that arise independently in groundness and \n\t\tuniqueness analysis. We describe how these problems can be \n\t\tsolved using an analysis based on a domain Size for inferring \n\t\tterm size relations.  However this solution has its own \n\t\tshortcomings because it involves a widening operator which leads\n\t\tto a loss of Pos information. Inspired by Pos, Size and the\n\t\tLsign domain for abstract linear arithmetic constraints we\n\t\tintroduce a new domain Lpos, and show how it can be used for\n\t\tgroundness and uniqueness analysis. The idea is to use the sign\n\t\tinformation of Lsign to improve the widening of Size so that\n\t\tit does not lose Pos information. We prove that the resulting \n\t\tanalyses using Lpos are uniformly more precise than those using\n\t\tPos.},\n  keywords  = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n
\n
\n\n\n
\n Groundness analysis of logic programs using Pos-based abstract interpretation is one of the clear success stories of the last decade in the area of logic program analysis. In this work we identify two problems with the Pos domain, the multiplicity and sign problems, that arise independently in groundness and uniqueness analysis. We describe how these problems can be solved using an analysis based on a domain Size for inferring term size relations. However this solution has its own shortcomings because it involves a widening operator which leads to a loss of Pos information. Inspired by Pos, Size and the Lsign domain for abstract linear arithmetic constraints we introduce a new domain Lpos, and show how it can be used for groundness and uniqueness analysis. The idea is to use the sign information of Lsign to improve the widening of Size so that it does not lose Pos information. We prove that the resulting analyses using Lpos are uniformly more precise than those using Pos.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Boolean Constraints for Binding-Time Analysis.\n \n \n \n\n\n \n Glynn, K.; Stuckey, P. J.; Sulzmann, M.; and Søndergaard, H.\n\n\n \n\n\n\n In Danvy, O.; and Filinsky, A., editor(s), Programs as Data Objects: Proceedings of the Second Symposium, volume 2053, of Lecture Notes in Computer Science, pages 39–63, 2001. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Gly-Stu-Sul-Son_PADO01,\n  author    = {Kevin Glynn and \n\t\tPeter J. Stuckey and \n\t\tMartin Sulzmann and\n\t\tHarald S{\\o}ndergaard},\n  title     = {Boolean Constraints for Binding-Time Analysis},\n  editor    = {O. Danvy and A. Filinsky},\n  booktitle = {Programs as Data Objects: Proceedings of the Second Symposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {2053},\n  pages     = {39--63},\n  publisher = {Springer},\n  year      = {2001},\n  doi       = {10.1007/3-540-44978-7_4},\n  abstract  = {To achieve acceptable accuracy, many program analyses for\n\t\tfunctional programs are ``property polymorphic''.\n\t\tThat is, they can infer different input-output relations \n\t\tfor a function at separate applications of the function, \n\t\tin a manner similar to type inference for a polymorphic \n\t\tlanguage. We extend a property polymorphic (or ``polyvariant'')\n\t\tmethod for binding-time analysis, due to Dussart, Henglein, \n\t\tand Mossin, so that it applies to languages with ML-style \n\t\ttype polymorphism. The extension is non-trivial \n\t\tand we have implemented it for Haskell. While we follow \n\t\tothers in specifying the analysis as a non-standard type\n\t\tinference, we argue that it should be realised through a \n\t\ttranslation into the well-understood domain of Boolean \n\t\tconstraints. The expressiveness offered by Boolean \n\t\tconstraints opens the way for smooth extensions to \n\t\tsophisticated language features and it allows for \n\t\tmore accurate analysis.},\n  keywords  = {Boolean logic, Binding-time analysis, Haskell},\n}\n\n
\n
\n\n\n
\n To achieve acceptable accuracy, many program analyses for functional programs are ``property polymorphic''. That is, they can infer different input-output relations for a function at separate applications of the function, in a manner similar to type inference for a polymorphic language. We extend a property polymorphic (or ``polyvariant'') method for binding-time analysis, due to Dussart, Henglein, and Mossin, so that it applies to languages with ML-style type polymorphism. The extension is non-trivial and we have implemented it for Haskell. While we follow others in specifying the analysis as a non-standard type inference, we argue that it should be realised through a translation into the well-understood domain of Boolean constraints. The expressiveness offered by Boolean constraints opens the way for smooth extensions to sophisticated language features and it allows for more accurate analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Constructivist Approach to Communication Skills Instruction in Computer Science.\n \n \n \n\n\n \n Gruba, P.; and Søndergaard, H.\n\n\n \n\n\n\n Computer Science Education, 11(3): 203–219. 2001.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Article{Gru-Son_CSE01,\n  author    = {Paul Gruba and \n\t\tHarald S{\\o}ndergaard},\n  title     = {A Constructivist Approach to Communication Skills Instruction \n\t\tin Computer Science},\n  journal   = {Computer Science Education},\n  volume    = {11},\n  number    = {3},\n  pages     = {203--219},\n  year      = {2001},\n  doi       = {10.1076/csed.11.3.203.3833},\n  abstract  = {From the social constructivist perspective of education, \n\t\tlearning is best achieved when students face complex, real \n\t\tworld problems in which there are no clear answers. Faced with\n\t\ta sizable common goal, students work collaboratively towards \n\t\toutcomes and maintain ownership over key decisions. The role \n\t\tof staff is that of facilitators whose role is to challenge l\n\t\tearners to explore multiple aspects of the problem as they go \n\t\tabout reaching viable solutions. Such a role contrasts, for \n\t\texample, to an approach which sets out to lead students to a \n\t\tpresumed correct solution that is already possessed by the\n\t\tinstructor. Based on these principles we designed and \n\t\timplemented a course on communication skills in Computer \n\t\tScience. Here, we describe our experiences using a student-run \n\t\tconference as a means to teach communication skills. In this \n\t\tapproach, students were charged with the task of planning and \n\t\torganising a conference, including peer review, publicity, \n\t\tbudget, sponsorship, web design, conference program, \n\t\tpresentation schedule, speaker support, and catering. We \n\t\tdescribe the principles and their implementation and reflect \n\t\ton the outcome.},\n  keywords  = {Education, Communication skills instruction},\n}\n\n
\n
\n\n\n
\n From the social constructivist perspective of education, learning is best achieved when students face complex, real world problems in which there are no clear answers. Faced with a sizable common goal, students work collaboratively towards outcomes and maintain ownership over key decisions. The role of staff is that of facilitators whose role is to challenge l earners to explore multiple aspects of the problem as they go about reaching viable solutions. Such a role contrasts, for example, to an approach which sets out to lead students to a presumed correct solution that is already possessed by the instructor. Based on these principles we designed and implemented a course on communication skills in Computer Science. Here, we describe our experiences using a student-run conference as a means to teach communication skills. In this approach, students were charged with the task of planning and organising a conference, including peer review, publicity, budget, sponsorship, web design, conference program, presentation schedule, speaker support, and catering. We describe the principles and their implementation and reflect on the outcome.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Transforming Communication Skills Instruction: The Conference Approach.\n \n \n \n\n\n \n Gruba, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Edwards, J., editor(s), Proceedings of the 23rd Australasian Computer Science Conference, of Australian Computer Science Communications 22, pages 88–94, 2000. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gru-Son_ACSC00,\n  author    = {Paul Gruba and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Transforming Communication Skills Instruction:\n                        The Conference Approach},\n  editor    = {J. Edwards},\n  booktitle = {Proceedings of the 23rd Australasian Computer Science \n\t\tConference},\n  series    = {Australian Computer Science Communications 22},\n  number    = {1},\n  pages     = {88--94},\n  year      = {2000},\n  publisher = {IEEE Computer Society},\n  location  = {Canberra, Australia},\n  doi       = {10.1109/ACSC.2000.824385},\n  abstract  = {From the social constructivist perspective of education, \n\t\tlearning is best achieved when students face complex, real \n\t\tworld problems in which there are no clear answers. Faced with\n\t\ta sizable common goal, students work collaboratively towards \n\t\toutcomes and maintain ownership over key decisions. The role \n\t\tof staff is that of facilitators whose role is to challenge \n\t\tlearners to explore multiple aspects of the problem as they go \n\t\tabout reaching viable solutions. Such a role contrasts, for \n\t\texample, to an approach which sets out to lead students to a \n\t\tpresumed correct solution that is already possessed by the\n\t\tinstructor. Based on these principles we designed and \n\t\timplemented a course on communication skills in Computer \n\t\tScience. Here, we describe our experiences using a student-run \n\t\tconference as a means to teach communication skills. In this \n\t\tapproach, students were charged with the task of planning and \n\t\torganising a conference, including peer review, publicity, \n\t\tbudget, sponsorship, web design, conference program, \n\t\tpresentation schedule, speaker support, and catering.\n\t\tWe describe the principles and their implementation and \n\t\treflect on the outcome.},\n  keywords  = {Education, Communication skills instruction},\n}\n\n
\n
\n\n\n
\n From the social constructivist perspective of education, learning is best achieved when students face complex, real world problems in which there are no clear answers. Faced with a sizable common goal, students work collaboratively towards outcomes and maintain ownership over key decisions. The role of staff is that of facilitators whose role is to challenge learners to explore multiple aspects of the problem as they go about reaching viable solutions. Such a role contrasts, for example, to an approach which sets out to lead students to a presumed correct solution that is already possessed by the instructor. Based on these principles we designed and implemented a course on communication skills in Computer Science. Here, we describe our experiences using a student-run conference as a means to teach communication skills. In this approach, students were charged with the task of planning and organising a conference, including peer review, publicity, budget, sponsorship, web design, conference program, presentation schedule, speaker support, and catering. We describe the principles and their implementation and reflect on the outcome.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1999\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Sharing and Groundness Dependencies in Logic Programs.\n \n \n \n\n\n \n Codish, M.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n ACM Transactions on Programming Languages and Systems, 21(5): 948–976. 1999.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Cod-Son-Stu_TOPLAS99,\n  author   = {Michael Codish and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title    = {Sharing and Groundness Dependencies in Logic Programs},\n  journal  = {ACM Transactions on Programming Languages and Systems},\n  volume   = {21},\n  number   = {5},\n  pages    = {948--976},\n  year     = {1999},\n  doi      = {10.1145/330249.330252},\n  abstract = {We investigate Jacobs and Langen's \\textsf{Sharing} domain, \n\t\tintroduced for the analysis of variable sharing in logic \n\t\tprograms, and show that it is isomorphic to Marriott and \n\t\tS{\\o}ndergaard's \\textsf{Pos} domain, introduced for the \n\t\tanalysis of groundness dependencies.  Our key idea\n\t\tis to view the sets of variables in a \\textsf{Sharing} \n\t\tdomain element as the models of a corresponding Boolean \n\t\tfunction.  This leads to a recasting of sharing analysis \n\t\tin terms of the property of ``not being affected by the \n\t\tbinding of a \\emph{single} variable.''  Such an\n\t\t``unaffectedness dependency'' analysis has close \n\t\tconnections with groundness dependency analysis using \n\t\tpositive Boolean functions.  This new view improves our \n\t\tunderstanding of sharing analysis, and leads to an elegant\n\t\texpression of its combination with groundness dependency\n\t\tanalysis based on the reduced product of \\textsf{Sharing} \n\t\tand \\textsf{Pos}.  It also opens up new avenues for the \n\t\tefficient implementation of sharing analysis, for example \n\t\tusing reduced order binary decision diagrams, as well as\n\t\tefficient implementation of the reduced product, using \n\t\tdomain factorizations.},\n  keywords  = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n
\n
\n\n\n
\n We investigate Jacobs and Langen's \\textsfSharing domain, introduced for the analysis of variable sharing in logic programs, and show that it is isomorphic to Marriott and Søndergaard's \\textsfPos domain, introduced for the analysis of groundness dependencies. Our key idea is to view the sets of variables in a \\textsfSharing domain element as the models of a corresponding Boolean function. This leads to a recasting of sharing analysis in terms of the property of ``not being affected by the binding of a \\emphsingle variable.'' Such an ``unaffectedness dependency'' analysis has close connections with groundness dependency analysis using positive Boolean functions. This new view improves our understanding of sharing analysis, and leads to an elegant expression of its combination with groundness dependency analysis based on the reduced product of \\textsfSharing and \\textsfPos. It also opens up new avenues for the efficient implementation of sharing analysis, for example using reduced order binary decision diagrams, as well as efficient implementation of the reduced product, using domain factorizations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Strictness Analysis as Finite-Domain Constraint Solving.\n \n \n \n\n\n \n Gabrić, T.; Glynn, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Flener, P., editor(s), Logic-Based Program Synthesis and Transformation, volume 1559, of Lecture Notes in Computer Science, pages 255–270, Berlin, Germany, 1999. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Gab-Gly-Son_LOPSTR98,\n  author    = {Tihomir Gabri{\\'c} and \n\t\tKevin Glynn and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Strictness Analysis as Finite-Domain Constraint Solving},\n  editor    = {P. Flener},\n  booktitle = {Logic-Based Program Synthesis and Transformation},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {1559},\n  pages     = {255--270},\n  publisher = {Springer},\n  address   = {Berlin, Germany},\n  year      = {1999},\n  doi       = {10.1007/3-540-48958-4_14},\n  abstract  = {It has become popular to express dataflow analyses in logical \n\t\tform. In this paper we investigate a new approach to the \n\t\tanalysis of functional programs, based on synthesis of \n\t\tconstraint logic programs. We sketch how the language Toupie,\n\t\toriginally designed with logic program analysis as one \n\t\tobjective, lends itself also to sophisticated strictness \n\t\tanalysis. Strictness analysis is straightforward in the \n\t\tsimplest case, that of analysing a first-order functional \n\t\tlanguage using just two strictness values, namely divergence \n\t\tand ``don't know''. Mycroft's classical translation immediately\n\t\tyields perfectly valid Boolean constraint logic programs, \n\t\twhich, when run, provide the desired strictness information.\n\t\tHowever, more sophisticated analysis requires more complex \n\t\tdomains of strictness values. We recast Wadler's classical \n\t\tanalysis over a $2n$-point domain as finite-domain \n\t\tconstraint solving. This approach has several advantages.\n\t\tFirst, the translation is relatively simple. We translate a \n\t\trecursive function definition into one or two constraint \n\t\tprogram clauses, in a manner which naturally extends Mycroft's\n\t\ttranslation for the 2-point case, where the classical approach\n\t\ttranslate the definition of an $n$-place function over lists \n\t\tinto $4^n$ mutually recursive equations. Second, the resulting \n\t\tprogram produces \\emph{relational} information, allowing for \n\t\texample to ask which combinations of properties of input will \n\t\tproduce a given output. Third, the approach allows us to \n\t\tleverage from established technology, for solving finite-domain \n\t\tconstraints, as well as for finding fixed points. Finally, the\n\t\tuse of (disjunctive) constraints can yield a higher precision \n\t\tin the analysis of some programs.},\n  keywords  = {Strictness analysis, Abstract interpretation, Abstract domains, Functional programming, Constraint programming},\n}\n\n
\n
\n\n\n
\n It has become popular to express dataflow analyses in logical form. In this paper we investigate a new approach to the analysis of functional programs, based on synthesis of constraint logic programs. We sketch how the language Toupie, originally designed with logic program analysis as one objective, lends itself also to sophisticated strictness analysis. Strictness analysis is straightforward in the simplest case, that of analysing a first-order functional language using just two strictness values, namely divergence and ``don't know''. Mycroft's classical translation immediately yields perfectly valid Boolean constraint logic programs, which, when run, provide the desired strictness information. However, more sophisticated analysis requires more complex domains of strictness values. We recast Wadler's classical analysis over a $2n$-point domain as finite-domain constraint solving. This approach has several advantages. First, the translation is relatively simple. We translate a recursive function definition into one or two constraint program clauses, in a manner which naturally extends Mycroft's translation for the 2-point case, where the classical approach translate the definition of an $n$-place function over lists into $4^n$ mutually recursive equations. Second, the resulting program produces \\emphrelational information, allowing for example to ask which combinations of properties of input will produce a given output. Third, the approach allows us to leverage from established technology, for solving finite-domain constraints, as well as for finding fixed points. Finally, the use of (disjunctive) constraints can yield a higher precision in the analysis of some programs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Immediate Fixpoints for Strictness Analysis.\n \n \n \n\n\n \n Glynn, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Edwards, J., editor(s), Proceedings of the 22nd Australasian Computer Science Conference, volume 21, of Australian Computer Science Communications, pages 336–347, 1999. Springer\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Gly-Son_ACSC99,\n  author    = {Kevin Glynn and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Immediate Fixpoints for Strictness Analysis},\n  editor    = {J. Edwards},\n  booktitle = {Proceedings of the 22nd Australasian Computer Science \n\t\tConference},\n  series    = {Australian Computer Science Communications},\n  volume    = {21},\n  number    = {1},\n  year      = {1999},\n  pages     = {336--347},\n  publisher = {Springer},\n  abstract  = {This paper is concerned with the problem of finding fixpoints\n                without resorting to Kleene iteration. In many cases, a simple\n\t\tsyntactic test suffices to establish that a closed form can be \n\t\tfound without iteration. For example, if the function $F$, \n\t\twhose fixpoint we want to determine, is idempotent, then its \n\t\tleast fixpoint is simply the instantiation $F(0)$, where $0$ \n\t\tis the least element of the lattice. In many cases, idempotence\n\t\tcan be gleaned easily from $F$'s definition. In other cases, \n\t\tsimple symbolic manipulation of a recursive definition may \n\t\treveal shortcuts to a closed form. We explore such cases and \n\t\tshow how the faster fixpoint calculation can have applications\n\t\tin strictness analysis.},\n  keywords  = {Fixed points, Strictness analysis, Functional programming},\n}\n\n
\n
\n\n\n
\n This paper is concerned with the problem of finding fixpoints without resorting to Kleene iteration. In many cases, a simple syntactic test suffices to establish that a closed form can be found without iteration. For example, if the function $F$, whose fixpoint we want to determine, is idempotent, then its least fixpoint is simply the instantiation $F(0)$, where $0$ is the least element of the lattice. In many cases, idempotence can be gleaned easily from $F$'s definition. In other cases, simple symbolic manipulation of a recursive definition may reveal shortcuts to a closed form. We explore such cases and show how the faster fixpoint calculation can have applications in strictness analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Strategy for Managing Content Complexity in Algorithm Animation.\n \n \n \n\n\n \n Stern, L.; Søndergaard, H.; and Naish, L.\n\n\n \n\n\n\n In Manaris, B., editor(s), Proceedings of the Fourth Annual SIGCSE/SIGCUE Conference on Innovation and Technology in Computer Science Education, pages 127–130, New York, NY, 1999. Association for Computing Machinery\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@InProceedings{Ste-Son-Nai_ITiCSE99,\n  author    = {Linda Stern and \n\t\tHarald S{\\o}ndergaard and \n\t\tLee Naish},\n  title     = {A Strategy for Managing Content Complexity in\n\t\tAlgorithm Animation},\n  editor    = {B. Manaris},\n  booktitle = {Proceedings of the Fourth Annual SIGCSE/SIGCUE Conference on\n\t\tInnovation and Technology in Computer Science Education},\n  pages     = {127--130},\n  publisher = {Association for Computing Machinery},\n  address   = {New York, NY},\n  year      = {1999},\n  location  = {Cracow, Poland},\n  doi       = {10.1145/305786.305891},\n  abstract  = {Computer animation is an excellent medium for capturing \n\t\tthe dynamic nature of data structure manipulations, and \n\t\tcan be used to advantage in the teaching of algorithms \n\t\tand data structures. A major educational issue is the \n\t\tnecessity of providing a means for the student to manage \n\t\tthe complexity of the material. We have addressed this\n\t\tissue in a multimedia teaching tool called ``Algorithms in\n\t\tAction'' by allowing students to view an algorithm at \n\t\tvarying levels of detail. Starting with a high level \n\t\tpseudocode description of the algorithm, with accompanying \n\t\thigh level animation and textual explanation, students can \n\t\texpand sections of the pseudocode to expose more detail.\n\t\tAnimation and explanation are controlled in coordinate\n\t\tfashion, becoming correspondingly more detailed as the \n\t\tpseudocode is expanded. Student feedback suggests that the\n\t\tavailability of multiple levels detail and the facility\n\t\tfor the user to control the level of detail being viewed\n\t\tis an effective way to manage content complexity.},\n  keywords  = {Education, Algorithm visualisation, Web-based learning},\n}\n\n
\n
\n\n\n
\n Computer animation is an excellent medium for capturing the dynamic nature of data structure manipulations, and can be used to advantage in the teaching of algorithms and data structures. A major educational issue is the necessity of providing a means for the student to manage the complexity of the material. We have addressed this issue in a multimedia teaching tool called ``Algorithms in Action'' by allowing students to view an algorithm at varying levels of detail. Starting with a high level pseudocode description of the algorithm, with accompanying high level animation and textual explanation, students can expand sections of the pseudocode to expose more detail. Animation and explanation are controlled in coordinate fashion, becoming correspondingly more detailed as the pseudocode is expanded. Student feedback suggests that the availability of multiple levels detail and the facility for the user to control the level of detail being viewed is an effective way to manage content complexity.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1998\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Two Classes of Boolean Functions for Dependency Analysis.\n \n \n \n\n\n \n Armstrong, T.; Marriott, K.; Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n Science of Computer Programming, 31(1): 3–45. 1998.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Arm-Mar-Sch-Son_SCP98,\n  author   = {Tania Armstrong and \n\t\tKim Marriott and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title\t   = {Two Classes of {Boolean} Functions for Dependency Analysis},\n  journal  = {Science of Computer Programming},\n  volume   = {31},\n  number   = {1},\n  pages    = {3--45},\n  year\t   = {1998},\n  doi       = {10.1016/S0167-6423(96)00039-1},\n  abstract = {Many static analyses for declarative programming/database \n\t\tlanguages use Boolean functions to express dependencies \n\t\tamong variables or argument positions. Examples include \n\t\tgroundness analysis, arguably the most important analysis for \n\t\tlogic programs, finiteness analysis and functional dependency \n\t\tanalysis for databases. We identify two classes of Boolean \n\t\tfunctions that have been used: positive and definite functions,\n\t\tand we systematically investigate these classes and their \n\t\tefficient implementation for dependency analyses. On the \n\t\ttheoretical side we provide syntactic characterizations and\n\t\tstudy the expressiveness and algebraic properties of the \n\t\tclasses. In particular, we show that both are closed under \n\t\texistential quantification. On the practical side we \n\t\tinvestigate various representations for the classes based on \n\t\treduced ordered binary decision diagrams (ROBDDs), disjunctive \n\t\tnormal form, conjunctive normal form, Blake canonical form, \n\t\tdual Blake canonical form, and a form specific to definite \n\t\tfunctions. We compare the resulting implementations of \n\t\tgroundness analyzers based on the representations for precision\n\t\tand efficiency.},\n  keywords  = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n
\n
\n\n\n
\n Many static analyses for declarative programming/database languages use Boolean functions to express dependencies among variables or argument positions. Examples include groundness analysis, arguably the most important analysis for logic programs, finiteness analysis and functional dependency analysis for databases. We identify two classes of Boolean functions that have been used: positive and definite functions, and we systematically investigate these classes and their efficient implementation for dependency analyses. On the theoretical side we provide syntactic characterizations and study the expressiveness and algebraic properties of the classes. In particular, we show that both are closed under existential quantification. On the practical side we investigate various representations for the classes based on reduced ordered binary decision diagrams (ROBDDs), disjunctive normal form, conjunctive normal form, Blake canonical form, dual Blake canonical form, and a form specific to definite functions. We compare the resulting implementations of groundness analyzers based on the representations for precision and efficiency.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n The Boolean Logic of Set Sharing Analysis.\n \n \n \n\n\n \n Codish, M.; and Søndergaard, H.\n\n\n \n\n\n\n In Palamidessi, C.; Glaser, H.; and Meinke, K., editor(s), Principles of Declarative Programming, volume 1490, of Lecture Notes in Computer Science, pages 89–101, 1998. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Cod-Son_PLILP98,\n  author    = {Michael Codish and \n\t\tHarald S{\\o}ndergaard},\n  title     = {The {Boolean} Logic of Set Sharing Analysis},\n  editor    = {C. Palamidessi and H. Glaser and K. Meinke},\n  booktitle = {Principles of Declarative Programming},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {1490},\n  pages     = {89--101},\n  publisher = {Springer},\n  year      = {1998},\n  doi       = {10.1007/bfb0056609},\n  abstract  = {We show that Jacobs and Langen's domain for set-sharing \n\t\tanalysis is isomorphic to the domain of positive Boolean \n\t\tfunctions, introduced by Marriott and S{\\o}ndergaard for \n\t\tgroundness dependency analysis. Viewing a set-sharing \n\t\tdescription as a minterm representation of a Boolean \n\t\tfunction leads to re-casting sharing analysis as an\n\t\tinstantiation dependency analysis. The key idea is to \n\t\tview the sets of variables in a sharing domain element \n\t\tas the models of a Boolean function. In this way, sharing \n\t\tsets are precisely dual negated positive Boolean functions. \n\t\tThis new view improves our understanding of sharing \n\t\tanalysis considerably and opens up new avenues for the\n\t\tefficient implementation of this kind of analysis, for \n\t\texample using ROBDDs. To this end we express Jacobs and \n\t\tLangen's abstract operations for set sharing in logical form.},\n  keywords  = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n
\n
\n\n\n
\n We show that Jacobs and Langen's domain for set-sharing analysis is isomorphic to the domain of positive Boolean functions, introduced by Marriott and Søndergaard for groundness dependency analysis. Viewing a set-sharing description as a minterm representation of a Boolean function leads to re-casting sharing analysis as an instantiation dependency analysis. The key idea is to view the sets of variables in a sharing domain element as the models of a Boolean function. In this way, sharing sets are precisely dual negated positive Boolean functions. This new view improves our understanding of sharing analysis considerably and opens up new avenues for the efficient implementation of this kind of analysis, for example using ROBDDs. To this end we express Jacobs and Langen's abstract operations for set sharing in logical form.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Differential Methods in Logic Program Analysis.\n \n \n \n\n\n \n Garcı́a de la Banda, M.; Marriott, K.; Stuckey, P. J.; and Søndergaard, H.\n\n\n \n\n\n\n Journal of Logic Programming, 35(1): 1–37. 1998.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Gar-Mar-Stu-Son_JLP98,\n  author    = {Maria {Garc{\\'\\i}a de la Banda} and \n\t\tKim Marriott and\n\t\tPeter J. Stuckey and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Differential Methods in Logic Program Analysis},\n  journal   = {Journal of Logic Programming},\n  volume    = {35},\n  number    = {1},\n  pages     = {1--37},\n  year      = {1998},\n  doi       = {10.1016/s0743-1066(97)10002-4},\n  abstract  = {Program analysis based on abstract interpretation has proven\n\t\tvery useful in compilation of constraint and logic programming\n\t\tlanguages. Unfortunately, the traditional goal-dependent \n\t\tframework is inherently imprecise. This is because it handles \n\t\tcall and return in such a way that dataflow information may be \n\t\tre-asserted unnecessarily, leading to a loss of precision for\n\t\tmany description domains. For a few specific domains, the \n\t\tliterature contains proposals to overcome the problem, and some\n\t\timplementations use various unpublished tricks that sometimes \n\t\tavoid the precision loss. The purpose of this paper is to map \n\t\tthe landscape of goal-dependent, goal-independent, and combined\n\t\tapproaches to generic analysis of logic programs. This includes\n\t\tformalising existing methods and tricks in a way that is \n\t\tindependent of specific description domains. Moreover, we \n\t\tsuggest new methods for overcoming the loss of \n\t\tprecision---altogether eight different semantics are \n\t\tconsidered and compared. We provide theoretical results \n\t\tdetermining the relative accuracy of the approaches. These \n\t\tshow that two of our new semantics are uniformally more \n\t\taccurate than existing approaches. Experiments that we have \n\t\tperformed (for two description domains) with implementations \n\t\tof the eight different approaches enable a discussion of their\n\t\trelative runtime performances. We discuss the expected effect \n\t\ton other domains as well and conclude that our new methods \n\t\tcan be trusted to yield significantly more accurate analysis \n\t\tfor a small extra implementation effort, without compromising \n\t\tthe efficiency of analysis.},\n  keywords  = {Abstract interpretation, Logic programming, Sharing analysis},\n}\n\n
\n
\n\n\n
\n Program analysis based on abstract interpretation has proven very useful in compilation of constraint and logic programming languages. Unfortunately, the traditional goal-dependent framework is inherently imprecise. This is because it handles call and return in such a way that dataflow information may be re-asserted unnecessarily, leading to a loss of precision for many description domains. For a few specific domains, the literature contains proposals to overcome the problem, and some implementations use various unpublished tricks that sometimes avoid the precision loss. The purpose of this paper is to map the landscape of goal-dependent, goal-independent, and combined approaches to generic analysis of logic programs. This includes formalising existing methods and tricks in a way that is independent of specific description domains. Moreover, we suggest new methods for overcoming the loss of precision—altogether eight different semantics are considered and compared. We provide theoretical results determining the relative accuracy of the approaches. These show that two of our new semantics are uniformally more accurate than existing approaches. Experiments that we have performed (for two description domains) with implementations of the eight different approaches enable a discussion of their relative runtime performances. We discuss the expected effect on other domains as well and conclude that our new methods can be trusted to yield significantly more accurate analysis for a small extra implementation effort, without compromising the efficiency of analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Practical Object-Oriented Analysis Engine for Constraint Logic Programs.\n \n \n \n\n\n \n Kelly, A. D.; Marriott, K.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Software—Practice and Experience, 28(2): 199–224. 1998.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Kel-Mar-Son-Stu_SPE98,\n  author    = {Andrew D. Kelly and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {A Practical Object-Oriented Analysis Engine for Constraint\n                Logic Programs},\n  journal   = {Software---Practice and Experience},\n  volume    = {28},\n  number    = {2},\n  pages     = {199--224},\n  year      = {1998},\n  doi       = {10.1002/(SICI)1097-024X(199802)28:2<199::AID-SPE150>3.0.CO;2-4},\n  abstract  = {The incorporation of global program analysis into recent \n\t\tcompilers for constraint logic programming (CLP) languages has \n\t\tgreatly improved the efficiency of these new languages. We \n\t\tpresent a global analyzer based on abstract interpretation.\n\t\tUnlike traditional optimizers, whose designs tend to be ad hoc,\n\t\tthe analyzer has been designed with maxiaml flexibility in\n\t\tmind. The analyzer is incremental, allowing substantial program\n\t\ttransformations by a compiler without requiring redundant\n\t\tre-computation of analysis data. The anayser is also\n\t\tgeneric in that it can perform a large number of different\n\t\tprogram analyses. Furthermore, the analyzer has an\n\t\tobject-oriented design, enabling it to be adapted to different\n\t\tapplications and allowing it to be used with various CLP \n\t\tlanguages with simple modifications. As an example of this \n\t\tgenerality, we sketch the use of the analyzer in two different\n\t\tapplications involving two different CLP languages: an \n\t\toptimizing compiler for CLP(${\\cal R}$) programs and an \n\t\tapplication for detecting occur-check problems in Prolog \n\t\tprograms.},\n  keywords  = {Constraint programming, Logic programming, Abstract interpretation, Prolog, Static analysis, Program transformation, Compilation, Unification},\n}\n\n
\n
\n\n\n
\n The incorporation of global program analysis into recent compilers for constraint logic programming (CLP) languages has greatly improved the efficiency of these new languages. We present a global analyzer based on abstract interpretation. Unlike traditional optimizers, whose designs tend to be ad hoc, the analyzer has been designed with maxiaml flexibility in mind. The analyzer is incremental, allowing substantial program transformations by a compiler without requiring redundant re-computation of analysis data. The anayser is also generic in that it can perform a large number of different program analyses. Furthermore, the analyzer has an object-oriented design, enabling it to be adapted to different applications and allowing it to be used with various CLP languages with simple modifications. As an example of this generality, we sketch the use of the analyzer in two different applications involving two different CLP languages: an optimizing compiler for CLP(${a̧l R}$) programs and an application for detecting occur-check problems in Prolog programs.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1997\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Abstract Interpretation of Active Rules and Its Use in Termination Analysis.\n \n \n \n\n\n \n Bailey, J.; Crnogorac, L.; Ramamohanarao, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Afrati, F.; and Kolaitis, P., editor(s), Database Theory—ICDT'97, volume 1186, of Lecture Notes in Computer Science, pages 188–202, 1997. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Bai-Crn-Ram-Son_ICDT97,\n  author    = {James Bailey and \n\t\tLobel Crnogorac and \n\t\tKotagiri Ramamohanarao and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Abstract Interpretation of Active Rules and Its Use in\n\t\tTermination Analysis},\n  editor    = {F. Afrati and P. Kolaitis},\n  booktitle = {Database Theory---ICDT'97},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {1186},\n  pages     = {188--202},\n  publisher = {Springer},\n  year      = {1997},\n  doi       = {10.1007/3-540-62222-5_45},\n  abstract  = {The behaviour of rules in an active database system can be\n\t\tdifficult to predict, and much work has been devoted to the\n\t\tdevelopment of automatic support for reasoning about properties\n\t\tsuch as confluence and termination. We show how abstract \n\t\tinterpretation can provide a generic framework for analysis of\n\t\tactive rules. Abstract interpretation is a well-understood, \n\t\tsemantics-based method for static analysis. Its advantage, \n\t\tapart from generality, lies in the separation of concerns: \n\t\tOnce the underlying semantics has been captured formally, a \n\t\tvariety of analyses can be derived, almost for free, as \n\t\t\\emph{approximations} to the semantics. Moreover, powerful \n\t\tgeneral theorems enable simple proofs of global correctness \n\t\tand uniform termination of specific analyses. We outline these\n\t\tideas and present, as an example application, a new method for \n\t\ttermination analysis. In terms of precision, the method \n\t\tcompares favourably with previous solutions to the problem.\n\t\tThis is because the method investigates the flow of data \n\t\trather than just the syntax of conditions and actions.},\n  keywords  = {Termination analysis, Abstract interpretation, Active databases},\n}\n\n
\n
\n\n\n
\n The behaviour of rules in an active database system can be difficult to predict, and much work has been devoted to the development of automatic support for reasoning about properties such as confluence and termination. We show how abstract interpretation can provide a generic framework for analysis of active rules. Abstract interpretation is a well-understood, semantics-based method for static analysis. Its advantage, apart from generality, lies in the separation of concerns: Once the underlying semantics has been captured formally, a variety of analyses can be derived, almost for free, as \\emphapproximations to the semantics. Moreover, powerful general theorems enable simple proofs of global correctness and uniform termination of specific analyses. We outline these ideas and present, as an example application, a new method for termination analysis. In terms of precision, the method compares favourably with previous solutions to the problem. This is because the method investigates the flow of data rather than just the syntax of conditions and actions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Generic Object-Oriented Incremental Analyser for Constraint Logic Programs.\n \n \n \n\n\n \n Kelly, A. D.; Marriott, K.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Kotagiri, R.; and Zobel, J., editor(s), Proceedings of the 20th Australasian Computer Science Conference, volume 19, of Australian Computer Science Communications, pages 92–101, 1997. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Kel-Mar-Son-Stu_ACSC97,\n  author    = {Andrew D. Kelly and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {A Generic Object-Oriented Incremental Analyser for Constraint\n             \tLogic Programs},\n  editor    = {R. Kotagiri and J. Zobel},\n  booktitle = {Proceedings of the 20th Australasian Computer Science Conference},\n  series    = {Australian Computer Science Communications},\n  volume    = {19},\n  number    = {1},\n  pages     = {92--101},\n  year      = {1997},\n  abstract  = {The incorporation of global program analysis into compilers \n\t\tfor constraint logic programming (CLP) languages has greatly\n\t\timproved the efficiency of these new languages. We present\n\t\ta global analyser based on abstract interpretation.  The \n\t\tanalyser is incremental, allowing substantial program \n\t\ttransformations by a compiler without requiring redundant\n\t\tre-computation of analysis data. The anayser is also\n\t\tgeneric in that it can perform a large number of different\n\t\tprogram analyses. Furthermore, the analyser has an \n\t\tobject-oriented design, enabling it to be easily adapted to\n\t\tdifferent applications and allowing it to be used with various\n\t\tCLP languages. As an example of this generality, we outline \n\t\tthe analyser's role in two different applications each for a\n\t\tdifferent CLP language: an optimizing compiler for CLP(R) \n\t\tprograms and an application for detecting occur-check problems\n\t\tin Prolog programs.},\n  keywords  = {Constraint programming, Logic programming, Abstract interpretation, Prolog, Static analysis, Program transformation, Compilation, Unification},\n}\n\n
\n
\n\n\n
\n The incorporation of global program analysis into compilers for constraint logic programming (CLP) languages has greatly improved the efficiency of these new languages. We present a global analyser based on abstract interpretation. The analyser is incremental, allowing substantial program transformations by a compiler without requiring redundant re-computation of analysis data. The anayser is also generic in that it can perform a large number of different program analyses. Furthermore, the analyser has an object-oriented design, enabling it to be easily adapted to different applications and allowing it to be used with various CLP languages. As an example of this generality, we outline the analyser's role in two different applications each for a different CLP language: an optimizing compiler for CLP(R) programs and an application for detecting occur-check problems in Prolog programs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Termination Analysis for Mercury.\n \n \n \n\n\n \n Speirs, C.; Somogyi, Z.; and Søndergaard, H.\n\n\n \n\n\n\n In Van Hentenryck, P., editor(s), Static Analysis: Proceedings of the Fourth International Symposium, volume 1302, of Lecture Notes in Computer Science, pages 157–171, 1997. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Spe-Som-Son_SAS97,\n  author    = {Chris Speirs and \n\t\tZoltan Somogyi and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Termination Analysis for {Mercury}},\n  editor    = {P. {Van Hentenryck}},\n  booktitle = {Static Analysis: \n\t\tProceedings of the Fourth International Symposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {1302},\n  pages     = {157--171},\n  publisher = {Springer},\n  year      = {1997},\n  doi       = {10.1007/bfb0032740},\n  abstract  = {Since the late eighties, much progress has been made in the \n\t\ttheory of termination analysis for logic programs. However, the\n\t\tpractical significance of much of this work is hard to judge, \n\t\tsince experimental evaluations rarely get published. Here we \n\t\tdescribe and evaluate a termination analyzer for Mercury, a \n\t\tstrongly typed and moded logic-functional programming language.\n\t\tMercury's high degree of referential transparency and the \n\t\tguaranteed availability of reliable mode information simplify \n\t\ttermination analysis. Our analyzer uses a variant of a method \n\t\tdeveloped by Pl{\\"u}mer. It deals with full Mercury, including \n\t\tmodules and I/O. In spite of these obstacles, it produces \n\t\tstate-of-the-art termination information, while having a \n\t\tnegligible impact on the running time of the compiler of which\n\t\tit is part, even for large programs.},\n  keywords  = {Logic programming, Mercury, Termination analysis},\n}\n\n
\n
\n\n\n
\n Since the late eighties, much progress has been made in the theory of termination analysis for logic programs. However, the practical significance of much of this work is hard to judge, since experimental evaluations rarely get published. Here we describe and evaluate a termination analyzer for Mercury, a strongly typed and moded logic-functional programming language. Mercury's high degree of referential transparency and the guaranteed availability of reliable mode information simplify termination analysis. Our analyzer uses a variant of a method developed by Plümer. It deals with full Mercury, including modules and I/O. In spite of these obstacles, it produces state-of-the-art termination information, while having a negligible impact on the running time of the compiler of which it is part, even for large programs.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1996\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A Comparison of Three Occur-Check Analysers.\n \n \n \n\n\n \n Crnogorac, L.; Kelly, A.; and Søndergaard, H.\n\n\n \n\n\n\n In Cousot, R.; and Schmidt, D. A., editor(s), Static Analysis: Proceedings of the Third International Symposium, volume 1145, of Lecture Notes in Computer Science, pages 159–173, 1996. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Crn-Kel-Son_SAS96,\n  author    = {Lobel Crnogorac and \n\t\tAndrew Kelly and \n\t\tHarald S{\\o}ndergaard},\n  title     = {A Comparison of Three Occur-Check Analysers},\n  editor    = {R. Cousot and D. A. Schmidt},\n  booktitle = {Static Analysis: Proceedings of the Third International \n\t\tSymposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {1145},\n  pages     = {159--173},\n  publisher = {Springer},\n  year      = {1996},\n  doi       = {10.1007/3-540-61739-6_40},\n  abstract  = {A well known problem with many Prolog interpreters and compilers\n\t\tis the lack of occur-check in the implementation of the \n\t\tunification algorithm. This means that such systems are unsound\n\t\twith respect to first-order predicate logic. Static analysis \n\t\toffers an appealing approach to the problem of occur-check \n\t\treduction, that is, the safe omission of occur-checks in \n\t\tunification. We compare, for the first time, three static \n\t\tmethods that have been suggested for occur-check reduction, \n\t\ttwo based on assigning ``modes'' to programs and one which \n\t\tuses abstract interpretation. In each case, the analysis or \n\t\tsome essential part of it had not been implemented so far.\n\t\tOf the mode-based methods, one is due to Chadha and Plaisted\n\t\tand the other is due to Apt and Pellegrini. The method using \n\t\tabstract interpretation is based on earlier work by Plaisted, \n\t\tS{\\o}ndergaard and others who have developed groundness and \n\t\tsharing analyses for logic programs. The conclusion is that a \n\t\ttruly global analysis based on abstract interpretation leads \n\t\tto markedly higher precision and hence fewer occur-checks at \n\t\trun-time. Given the potential run-time speedups, a precise \n\t\tanalysis would be well worth the extra time.},\n  keywords  = {Abstract interpretation, Groundness analysis, Sharing analysis, Unification, Prolog, Compilation},\n}\n\n
\n
\n\n\n
\n A well known problem with many Prolog interpreters and compilers is the lack of occur-check in the implementation of the unification algorithm. This means that such systems are unsound with respect to first-order predicate logic. Static analysis offers an appealing approach to the problem of occur-check reduction, that is, the safe omission of occur-checks in unification. We compare, for the first time, three static methods that have been suggested for occur-check reduction, two based on assigning ``modes'' to programs and one which uses abstract interpretation. In each case, the analysis or some essential part of it had not been implemented so far. Of the mode-based methods, one is due to Chadha and Plaisted and the other is due to Apt and Pellegrini. The method using abstract interpretation is based on earlier work by Plaisted, Søndergaard and others who have developed groundness and sharing analyses for logic programs. The conclusion is that a truly global analysis based on abstract interpretation leads to markedly higher precision and hence fewer occur-checks at run-time. Given the potential run-time speedups, a precise analysis would be well worth the extra time.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Low-Contact Learning in a First Year Programming Course.\n \n \n \n\n\n \n Johnston, R.; Moffat, A.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Rosenberg, J., editor(s), Proceedings of the First Australasian Conference on Computer Science Education, pages 19–26, 1996. ACM Press\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Inproceedings{Joh-Mof-Son-Stu_ACSE96,\n  author    = {Roy Johnston and \n\t\tAlistair Moffat and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title\t    = {Low-Contact Learning in a First Year Programming Course},\n  editor    = {J. Rosenberg},\n  booktitle = {Proceedings of the First Australasian Conference on \n\t\tComputer Science Education},\n  pages     = {19--26},\n  publisher = {ACM Press},\n  year      = {1996},\n  location  = {Sydney, Australia},\n  doi       = {10.1145/369585.369589},\n  abstract  = {Very large class sizes for introductory programming subjects pose\n\t\ta variety of problems, logistic and pedagogic.\n\t\tIn response to these problems, we are experimenting with \n\t\tlow-contact streams in a class of 750 first year students.\n\t\tMotivated and disciplined students are offered the alternative \n\t\tof fewer lectures and tutorials, that is, less direct contact, \n\t\tin return for more (compulsory) homework and increased \n\t\tfeed-back, that is, more indirect contact.\n\t\tWith a home computer and a link to the University, such \n\t\tstudents can take the subject with more flexible timetabling.\n\t\tWe provide material to support the higher degree of self-study \n\t\tand tools for students to monitor their own progress.\n\t\tThis paper describes how the low-contact streams are organised \n\t\tand discusses the advantages and disadvantages of the approach.\n\t\tA thorough evaluation is not possible yet, but our initial \n\t\timpression is that both traditional and self-paced students \n\t\tbenefit---the traditional students are better catered for in \n\t\ta course that is pitched at a slightly lower and less \n\t\tchallenging level, and the self-paced students respond to the \n\t\tchallenge and extend themselves in a manner that might not \n\t\toccur in a large lecture group.},\n  keywords  = {Education},\n}\n\n
\n
\n\n\n
\n Very large class sizes for introductory programming subjects pose a variety of problems, logistic and pedagogic. In response to these problems, we are experimenting with low-contact streams in a class of 750 first year students. Motivated and disciplined students are offered the alternative of fewer lectures and tutorials, that is, less direct contact, in return for more (compulsory) homework and increased feed-back, that is, more indirect contact. With a home computer and a link to the University, such students can take the subject with more flexible timetabling. We provide material to support the higher degree of self-study and tools for students to monitor their own progress. This paper describes how the low-contact streams are organised and discusses the advantages and disadvantages of the approach. A thorough evaluation is not possible yet, but our initial impression is that both traditional and self-paced students benefit—the traditional students are better catered for in a course that is pitched at a slightly lower and less challenging level, and the self-paced students respond to the challenge and extend themselves in a manner that might not occur in a large lecture group.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Two Applications of an Incremental Analysis Engine for (Constraint) Logic Programs (System Description).\n \n \n \n\n\n \n Kelly, A. D.; Marriott, K.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n In Cousot, R.; and Schmidt, D. A., editor(s), Static Analysis: Proceedings of the Third International Symposium, volume 1145, of Lecture Notes in Computer Science, pages 385–386, 1996. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Kel-Mar-Son-Stu_SAS96,\n  author    = {Andrew D. Kelly and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title     = {Two Applications of an Incremental Analysis Engine for\n                (Constraint) Logic Programs (System Description)},\n  editor    = {R. Cousot and D. A. Schmidt},\n  booktitle = {Static Analysis: Proceedings of the Third International \n\t\tSymposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {1145},\n  pages     = {385--386},\n  publisher = {Springer},\n  year      = {1996},\n  doi       = {10.1007/3-540-61739-6_55},\n  keywords  = {Constraint programming, Logic programming, Prolog, Static analysis, Program transformation, Compilation},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Immediate Fixpoints and Their Use in Groundness Analysis.\n \n \n \n\n\n \n Søndergaard, H.\n\n\n \n\n\n\n In Chandru, V.; and Vinay, V., editor(s), Foundations of Software Technology and Theoretical Computer Science, volume 1180, of Lecture Notes in Computer Science, pages 359–370, 1996. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Son_FSTTCS96,\n  author    = {Harald S{\\o}ndergaard},\n  title     = {Immediate Fixpoints and Their Use in Groundness Analysis},\n  editor    = {V. Chandru and V. Vinay},\n  booktitle = {Foundations of Software Technology and \n\t\tTheoretical Computer Science},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {1180},\n  pages     = {359--370},\n  publisher = {Springer},\n  year      = {1996},\n  doi       = {10.1007/3-540-62034-6_63},\n  abstract  = {A theorem by Schr{\\"o}der says that for a certain natural \n\t\tclass of functions $F : B \\rightarrow B$ defined on a Boolean \n\t\tlattice $B$, $F(x) = F(F(F(x)))$ for all $x \\in B$.\n\t\tAn immediate corollary is that if such a function is monotonic \n\t\tthen it is also idempotent, that is, $F(x) = F(F(x))$.\n\t\tWe show how this corollary can be extended to recognize cases\n\t\twhere recursive definitions can immediately be replaced by an \n\t\tequivalent closed form, that is, they can be solved without \n\t\tKleene iteration. Our result applies more generally to \n\t\tdistributive lattices. It has applications for example in the \n\t\tabstract interpretation of declarative programs and deductive \n\t\tdatabases. We exemplify this by showing how to accelerate \n\t\tsimple cases of strictness analysis for first-order functional \n\t\tprograms and, perhaps more successfully, groundness analysis \n\t\tfor logic programs.},\n  keywords  = {Fixed points, Logic programming, Groundness analysis, Lattice theory},\n}\n\n
\n
\n\n\n
\n A theorem by Schröder says that for a certain natural class of functions $F : B → B$ defined on a Boolean lattice $B$, $F(x) = F(F(F(x)))$ for all $x ∈ B$. An immediate corollary is that if such a function is monotonic then it is also idempotent, that is, $F(x) = F(F(x))$. We show how this corollary can be extended to recognize cases where recursive definitions can immediately be replaced by an equivalent closed form, that is, they can be solved without Kleene iteration. Our result applies more generally to distributive lattices. It has applications for example in the abstract interpretation of declarative programs and deductive databases. We exemplify this by showing how to accelerate simple cases of strictness analysis for first-order functional programs and, perhaps more successfully, groundness analysis for logic programs.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1995\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Improved Analysis of Logic Programs Using a Differential Approach.\n \n \n \n\n\n \n Garcı́a de la Banda, M.; Marriott, K.; Søndergaard, H.; and Stuckey, P. J.\n\n\n \n\n\n\n Technical Report 95/15, Department of Computer Science, The University of Melbourne, 1995.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Techreport{Garcia_TR95,\n  author      = {Maria {Garc{\\'\\i}a de la Banda} and \n\t\tKim Marriott and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n  title       = {Improved Analysis of Logic Programs Using a Differential\n\t\tApproach},\n  number      = {95/15},\n  institution = {Department of Computer Science, The University of Melbourne},\n  year        = {1995},\n  abstract    = {Abstract interpretation based program analysis has proven\n\t\tvery useful in compilation of constraint and logic programming\n\t\tlanguages. Unfortunately, existing theoretical frameworks are \n\t\tinherently imprecise. This is because of the way the frameworks\n\t\thandle call and return from an atom evaluation---in effect the \n\t\tsame information may be added twice, leading to a loss of \n\t\tprecision in many description domains. For this reason some \n\t\timplementations use seemingly \\emph{ad hoc} tricks. Here we \n\t\tformalize these tricks and suggest three methods for overcoming\n\t\tthis loss of precision. Experimental and theoretical results \n\t\tindicate that use of these methods leads to more accurate and \n\t\tfaster analyses for little extra implementation effort.},\n  keywords  = {Abstract interpretation, Logic programming},\n}\n\n
\n
\n\n\n
\n Abstract interpretation based program analysis has proven very useful in compilation of constraint and logic programming languages. Unfortunately, existing theoretical frameworks are inherently imprecise. This is because of the way the frameworks handle call and return from an atom evaluation—in effect the same information may be added twice, leading to a loss of precision in many description domains. For this reason some implementations use seemingly \\emphad hoc tricks. Here we formalize these tricks and suggest three methods for overcoming this loss of precision. Experimental and theoretical results indicate that use of these methods leads to more accurate and faster analyses for little extra implementation effort.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Optimizing Compiler for CLP(${R}$).\n \n \n \n\n\n \n Kelly, A. D.; Macdonald, A.; Marriott, K.; Søndergaard, H.; Stuckey, P. J.; and Yap, R. H. C.\n\n\n \n\n\n\n In Montanari, U.; and Rossi, F., editor(s), Principles and Practice of Constraint Programming—CP'95, volume 976, of Lecture Notes in Computer Science, pages 222–239, 1995. Springer\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Kel-Mac-Mar-Son-Stu-Yap_CP95,\n  author    = {Andrew D. Kelly and \n\t\tAndrew Macdonald and \n\t\tKim Marriott and\n            \tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and \n\t\tRoland H. C. Yap},\n  title     = {An Optimizing Compiler for {CLP(${R}$)}},\n  editor    = {U. Montanari and F. Rossi},\n  booktitle = {Principles and Practice of Constraint Programming---CP'95},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {976},\n  pages     = {222--239},\n  publisher = {Springer},\n  year      = {1995},\n  doi       = {10.1007/3-540-60299-2_14},\n  abstract  = {The considerable expressive power and flexibility gained by \n\t\tcombining constraint programming with logic programming\n\t\tis not without cost. Implementations of constraint logic \n\t\tprogramming (CLP) languages must include expensive constraint \n\t\tsolving algorithms tailored to specific domains, such as trees,\n\t\tBooleans, or real numbers. The performance of many current CLP\n\t\tcompilers and interpreters does not encourage the widespread \n\t\tuse of CLP. We outline an optimizing compiler for \n\t\tCLP(${\\cal R}$), a CLP language which extends Prolog by \n\t\tallowing linear arithmetic constraints. The compiler uses \n\t\tsophisticated global analyses to determine the applicability of\n\t\tdifferent program transformations. Two important components of \n\t\tthe compiler, the \\emph{analyzer} and the \\emph{optimizer}, \n\t\twork in continual interaction in order to apply \n\t\tsemantics-preserving transformations to the source program.\n\t\tA large suite of transformations are planned. Currently the \n\t\tcompiler applies three powerful transformations, namely \n\t\t``solver bypass'', ``dead variable elimination'' and\n\t\t``nofail constraint detection''. We explain these optimizations\n\t\tand their place in the overall compiler design and show how \n\t\tthey lead to performance improvements on a set of benchmark \n\t\tprograms.},\n  keywords  = {Constraint programming, Logic programming, Static analysis, Program transformation, Compilation},\n}\n\n
\n
\n\n\n
\n The considerable expressive power and flexibility gained by combining constraint programming with logic programming is not without cost. Implementations of constraint logic programming (CLP) languages must include expensive constraint solving algorithms tailored to specific domains, such as trees, Booleans, or real numbers. The performance of many current CLP compilers and interpreters does not encourage the widespread use of CLP. We outline an optimizing compiler for CLP(${a̧l R}$), a CLP language which extends Prolog by allowing linear arithmetic constraints. The compiler uses sophisticated global analyses to determine the applicability of different program transformations. Two important components of the compiler, the \\emphanalyzer and the \\emphoptimizer, work in continual interaction in order to apply semantics-preserving transformations to the source program. A large suite of transformations are planned. Currently the compiler applies three powerful transformations, namely ``solver bypass'', ``dead variable elimination'' and ``nofail constraint detection''. We explain these optimizations and their place in the overall compiler design and show how they lead to performance improvements on a set of benchmark programs.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1994\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Boolean Functions for Dependency Analysis: Algebraic Properties and Efficient Representation.\n \n \n \n\n\n \n Armstrong, T.; Marriott, K.; Schachte, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Le Charlier, B., editor(s), Static Analysis: Proceedings of the First International Symposium, volume 864, of Lecture Notes in Computer Science, pages 266–280, 1994. Springer-Verlag\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Arm-Mar-Sch-Son_SAS94,\n  author    = {Tania Armstrong and \n\t\tKim Marriott and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n  title     = {{Boolean} Functions for Dependency Analysis:\n\t\tAlgebraic Properties and Efficient Representation},\n  editor    = {B. {Le Charlier}},\n  booktitle = {Static Analysis: Proceedings of the First International \n\t\tSymposium},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {864},\n  pages     = {266--280},\n  publisher = {Springer-Verlag},\n  year      = {1994},\n  doi       = {10.1007/3-540-58485-4_46},\n  abstract  = {Many analyses for logic programming languages use Boolean \n\t\tfunctions to express dependencies between variables or argument\n\t\tpositions. Examples include groundness analysis, arguably the \n\t\tmost important analysis for logic programs, finiteness analysis \n\t      \tand functional dependency analysis. We identify two classes of \n\t\tBoolean functions that have been used: positive and definite \n\t\tfunctions, and we systematically investigate these classes and \n\t\ttheir efficient implementation for dependency analyses. We \n\t\tprovide syntactic characterizations and study their algebraic \n\t\tproperties. In particular, we show that both classes are closed\n\t\tunder existential quantification. We investigate representations\n\t\tfor these classes based on: reduced ordered binary decision \n\t\tdiagrams (ROBDDs), disjunctive normal form, conjunctive normal \n\t\tform, Blake canonical form, dual Blake canonical form, and a \n\t\tform specific to definite functions. We give an  empirical \n\t\tcomparison of these different representations for groundness \n\t\tanalysis.},\n  keywords  = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n
\n
\n\n\n
\n Many analyses for logic programming languages use Boolean functions to express dependencies between variables or argument positions. Examples include groundness analysis, arguably the most important analysis for logic programs, finiteness analysis and functional dependency analysis. We identify two classes of Boolean functions that have been used: positive and definite functions, and we systematically investigate these classes and their efficient implementation for dependency analyses. We provide syntactic characterizations and study their algebraic properties. In particular, we show that both classes are closed under existential quantification. We investigate representations for these classes based on: reduced ordered binary decision diagrams (ROBDDs), disjunctive normal form, conjunctive normal form, Blake canonical form, dual Blake canonical form, and a form specific to definite functions. We give an empirical comparison of these different representations for groundness analysis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Denotational Abstract Interpretation of Logic Programs.\n \n \n \n\n\n \n Marriott, K.; Søndergaard, H.; and Jones, N. D.\n\n\n \n\n\n\n ACM Transactions on Programming Languages and Systems, 16(3): 607–648. 1994.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Mar-Son-Jon_TOPLAS94,\n  author    = {Kim Marriott and\n\t\tHarald S{\\o}ndergaard and \n\t\tNeil D. Jones},\n  title     = {Denotational Abstract Interpretation of Logic Programs},\n  journal   = {ACM Transactions on Programming Languages and Systems},\n  volume    = {16},\n  number    = {3},\n  pages     = {607--648},\n  year      = {1994},\n  doi       = {10.1145/177492.177650},\n  abstract  = {Logic programming languages are based on a principle of \n\t\tseparation of ``logic'' and ``control.'' This means that they\n\t\tcan be given simple model-theoretic semantics without regard to\n\t\tany particular execution mechanism (or proof procedure, viewing\n\t\texecution as theorem proving). While the separation is desirable\n\t\tfrom a semantical point of view, it does, however, make sound, \n\t\tefficient implementation of logic programming languages \n\t\tdifficult. The lack of ``control information'' in programs \n\t\tcalls for complex dataflow analysis techniques to guide \n\t\texecution. Since dataflow analysis furthermore finds extensive \n\t\tuse in error-finding and transformation tools, there is a need \n\t\tfor a simple and powerful theory of dataflow analysis of logic \n\t\tprograms. The present paper offers such a theory, based on \n\t\tF. Nielson's extension of P. and R. Cousot's abstract \n\t\tinterpretation. We present a denotational definition of the \n\t\tsemantics of definite logic programs. This definition is of \n\t\tinterest in its own right because of its compactness.\n\t\tStepwise we develop the definition into a generic dataflow \n\t\tanalysis which encompasses a large class of dataflow analyses\n\t\tbased on the SLD execution model. We exemplify one instance of\n\t\tthe definition by developing a provably correct groundness \n\t\tanalysis to predict how variables may be bound to ground terms \n\t\tduring execution. We also discuss implementation issues and \n\t\trelated work.},\n  keywords  = {Abstract interpretation, Abstract domains, Groundness analysis, Logic programming},\n}\n\n
\n
\n\n\n
\n Logic programming languages are based on a principle of separation of ``logic'' and ``control.'' This means that they can be given simple model-theoretic semantics without regard to any particular execution mechanism (or proof procedure, viewing execution as theorem proving). While the separation is desirable from a semantical point of view, it does, however, make sound, efficient implementation of logic programming languages difficult. The lack of ``control information'' in programs calls for complex dataflow analysis techniques to guide execution. Since dataflow analysis furthermore finds extensive use in error-finding and transformation tools, there is a need for a simple and powerful theory of dataflow analysis of logic programs. The present paper offers such a theory, based on F. Nielson's extension of P. and R. Cousot's abstract interpretation. We present a denotational definition of the semantics of definite logic programs. This definition is of interest in its own right because of its compactness. Stepwise we develop the definition into a generic dataflow analysis which encompasses a large class of dataflow analyses based on the SLD execution model. We exemplify one instance of the definition by developing a provably correct groundness analysis to predict how variables may be bound to ground terms during execution. We also discuss implementation issues and related work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Optimizing Compilation for CLP(${R}$).\n \n \n \n\n\n \n Marriott, K.; Søndergaard, H.; Stuckey, P. J.; and Yap, R.\n\n\n \n\n\n\n In Gupta, G., editor(s), Proceedings of the 17th Australian Computer Science Conference, volume 16, of Australian Computer Science Communications, pages 551–560, 1994. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son-Stu-Yap_ACSC94,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and \n\t\tRoland Yap},\n  title\t    = {Optimizing Compilation for {CLP(${R}$)}},\n  editor    = {G. Gupta},\n  booktitle = {Proceedings of the 17th Australian Computer Science Conference},\n  series    = {Australian Computer Science Communications},\n  volume    = {16},\n  number    = {1},\n  pages     = {551--560},\n  year\t    = {1994},\n  abstract  = {Constraint Logic Programming (CLP) is a recent innovation in\n\t\tprogramming language design. CLP languages extend logic\n\t\tprogramming by allowing constraints from different domains\n\t\tsuch as real numbers or Boolean functions. This gives\n\t\tconsiderable expressive power and flexibility and CLP\n\t\tprograms have proven to be a high-level programming\n\t\tparadigm for applications based on interactive mathematical\n\t\tmodelling. These advantages, however, are not without cost.\n\t\tImplementations of CLP languages must include expensive\n\t\tconstraint solving algorithms tailored to the specific\n\t\tdomains. Indeed, performance of the current generation of\n\t\tCLP compilers and interpreters is one of the main obstacles\n\t\tto the widespread use of CLP. Here we outline the design\n\t\tof a highly optimizing compiler for CLP($\\Re$), a CLP\n\t\tlanguage which extends Prolog by allowing linear arithmetic\n\t\tconstraints. This compiler is intended to overcome the\n\t\tefficiency problems of the current implementation\n\t\ttechnology. The main innovation in the compiler is a\n\t\tcomprehensive suite of program optimizations and associated\n\t\tglobal analyses which determine applicability of each\n\t\toptimization. We describe these optimizations and report\n\t\tvery promising results from preliminary experiments.},\n  keywords  = {Constraint programming, Logic programming, Compilation, Static analysis},\n}\n\t\t\n
\n
\n\n\n
\n Constraint Logic Programming (CLP) is a recent innovation in programming language design. CLP languages extend logic programming by allowing constraints from different domains such as real numbers or Boolean functions. This gives considerable expressive power and flexibility and CLP programs have proven to be a high-level programming paradigm for applications based on interactive mathematical modelling. These advantages, however, are not without cost. Implementations of CLP languages must include expensive constraint solving algorithms tailored to the specific domains. Indeed, performance of the current generation of CLP compilers and interpreters is one of the main obstacles to the widespread use of CLP. Here we outline the design of a highly optimizing compiler for CLP($\\Re$), a CLP language which extends Prolog by allowing linear arithmetic constraints. This compiler is intended to overcome the efficiency problems of the current implementation technology. The main innovation in the compiler is a comprehensive suite of program optimizations and associated global analyses which determine applicability of each optimization. We describe these optimizations and report very promising results from preliminary experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n PEPM '94: Proceedings of the ACM SIGPLAN Workshop on Partial Evaluation and Semantics-Based Program Manipulation.\n \n \n \n\n\n \n Sestoft, P.; and Søndergaard, H.,\n editors.\n \n\n\n \n\n\n\n 1994.\n Technical Report 94/9, Department of Computer Science, The University of Melbourne\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Proceedings{Ses-Son_PEPM94,\n  editor    = {Peter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {PEPM '94: Proceedings of the ACM SIGPLAN Workshop on Partial \n\t\tEvaluation and Semantics-Based Program Manipulation},\n  location  = {Orlando, Florida},\n  year      = {1994},\n  doi       = {10.1007/BF01019002},\n  note\t    = {Technical Report 94/9, Department of Computer Science, \n\t\tThe University of Melbourne},\n  keywords  = {Partial evaluation, Program transformation, Static analysis},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1993\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Definiteness Analysis for CLP(${R}$).\n \n \n \n\n\n \n Baker, N.; and Søndergaard, H.\n\n\n \n\n\n\n In Gupta, G.; Mohay, G.; and Topor, R., editor(s), Proceedings of the Sixteenth Australian Computer Science Conference, volume 15, of Australian Computer Science Communications, pages 321–332, 1993. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{Bak-Son_ACSC93,\n  author    = {Naomi Baker and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Definiteness Analysis for {CLP(${R}$)}},\n  editor    = {G. Gupta and G. Mohay and R. Topor},\n  booktitle = {Proceedings of the Sixteenth Australian Computer Science \n\t\tConference},\n  series    = {Australian Computer Science Communications},\n  volume    = {15},\n  number    = {1},\n  pages     = {321--332},\n  year      = {1993},\n  abstract  = {Constraint logic programming (CLP) languages generalise\n\t\tlogic programming languages, amalgamating logic programming\n\t\tand constraint programming.  Combining the best of two\n\t\tworlds, they provide powerful tools for wide classes\n\t\tof problems.  As with logic programming languages, code\n\t\toptimization by compilers is an important issue in the\n\t\timplementation of CLP languages.  A compiler needs\n\t\tsophisticated global information, collected by dataflow\n\t\tanalyses, to generate competitive code.\n\t\tOne kind of useful dataflow information concerns the point\n\t\tat which variables become definite, that is, constrained\n\t\tto take a unique value.  In this paper we present a very\n\t\tprecise dataflow analysis to determine definiteness, and we\n\t\tdiscuss its applications.  By separating the two concerns:\n\t\tcorrectness and implementation techniques, abstract\n\t\tinterpretation enables us to develop a sophisticated\n\t\tdataflow analysis in a straightforward manner, in fact in\n\t\ta framework where the correctness of the analysis is easily\n\t\testablished---a feature which is uncommon when complex\n\t\tanalyses are developed in an ad hoc way.\n\t\tWe use a class of Boolean functions, the positive functions,\n\t\tto represent the definiteness relationship between variables.\n\t\tA Boolean function is interpreted as expressing a relation\n\t\twhich holds not simply at the given point in an evaluation,\n\t\tbut in fact during the rest of the evaluation branch.\n\t\tThe nature of variables in a CLP language makes this\n\t\ttreatment both possible and natural.},\n  keywords  = {Constraint logic programming, Boolean logic, Abstract interpretation},\n}\n\n
\n
\n\n\n
\n Constraint logic programming (CLP) languages generalise logic programming languages, amalgamating logic programming and constraint programming. Combining the best of two worlds, they provide powerful tools for wide classes of problems. As with logic programming languages, code optimization by compilers is an important issue in the implementation of CLP languages. A compiler needs sophisticated global information, collected by dataflow analyses, to generate competitive code. One kind of useful dataflow information concerns the point at which variables become definite, that is, constrained to take a unique value. In this paper we present a very precise dataflow analysis to determine definiteness, and we discuss its applications. By separating the two concerns: correctness and implementation techniques, abstract interpretation enables us to develop a sophisticated dataflow analysis in a straightforward manner, in fact in a framework where the correctness of the analysis is easily established—a feature which is uncommon when complex analyses are developed in an ad hoc way. We use a class of Boolean functions, the positive functions, to represent the definiteness relationship between variables. A Boolean function is interpreted as expressing a relation which holds not simply at the given point in an evaluation, but in fact during the rest of the evaluation branch. The nature of variables in a CLP language makes this treatment both possible and natural.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On Propagation-Based Analysis of Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Michaylov, S.; and Winsborough, W., editor(s), Proceedings of the ILPS 93 Workshop on Global Compilation, Vancouver, Canada, pages 47–65, 1993. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son_ILPSworkshop93,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {On Propagation-Based Analysis of Logic Programs},\n  editor    = {S. Michaylov and W. Winsborough},\n  booktitle = {Proceedings of the ILPS 93 Workshop on Global Compilation,\n\t\t\tVancouver, Canada},\n  pages     = {47--65},\n  year\t    = {1993},\n  abstract  = {Notions such as ``reexecution'' and ``propagation'' have recently\n\t\tattracted attention in dataflow analysis of logic programs. \n\t\tBoth techniques promise more accurate dataflow analysis without \n\t\trequiring more complex description domains. Propagation, \n\t\thowever, has never been given a formal definition. It has \n\t\ttherefore been difficult to discuss properties such as \n\t\tcorrectness, precision, and termination of propagation. \n\t\tWe suggest a definition of propagation. Comparing \n\t\tpropagation-based analysis with the more conventional\n\t\tapproach based on abstract interpretation, we find that \n\t\tpropagation involves a certain inherent loss of precision \n\t\twhen dataflow analyses are based on description domains\n\t\twhich are not ``downwards closed'' (including mode analysis).\n\t\tIn the archetypical downwards closed case, groundness \n\t\tanalysis, we contrast approaches using Boolean functions as\n\t\tdescriptions with those using propagation or reexecution.},\n  keywords  = {Abstract interpretation, Boolean logic, Logic programming, Mode analysis, Groundness analysis},\n}\n\n
\n
\n\n\n
\n Notions such as ``reexecution'' and ``propagation'' have recently attracted attention in dataflow analysis of logic programs. Both techniques promise more accurate dataflow analysis without requiring more complex description domains. Propagation, however, has never been given a formal definition. It has therefore been difficult to discuss properties such as correctness, precision, and termination of propagation. We suggest a definition of propagation. Comparing propagation-based analysis with the more conventional approach based on abstract interpretation, we find that propagation involves a certain inherent loss of precision when dataflow analyses are based on description domains which are not ``downwards closed'' (including mode analysis). In the archetypical downwards closed case, groundness analysis, we contrast approaches using Boolean functions as descriptions with those using propagation or reexecution.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Precise and Efficient Groundness Analysis for Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n ACM Letters on Programming Languages and Systems, 2(1–4): 181–196. 1993.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Mar-Son_LOPLAS93,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {Precise and Efficient Groundness Analysis for Logic Programs},\n  journal   = {ACM Letters on Programming Languages and Systems},\n  volume    = {2},\n  number    = {1--4},\n  pages\t    = {181--196},\n  year\t    = {1993},\n  doi       = {10.1145/176454.176519},\n  abstract  = {We show how precise groundness information can be extracted from\n\t\tlogic programs. The idea is to use abstract interpretation with\n\t\tBoolean functions as ``approximations'' to groundness \n\t\tdependencies between variables. This idea is not new, and \n\t\tdifferent classes of Boolean functions have been used. We argue,\n\t\thowever, that one class, the \\emph{positive} functions, is more \n\t\tsuitable than others. Positive Boolean functions have a certain\n\t\tproperty which we (inspired by A. Langen) call ``condensation.''\n\t\tThis property allows for rapid computation of groundness \n\t\tinformation.},\n  keywords  = {Abstract interpretation, Abstract domains, Logic programming, Boolean logic, Groundness analysis},\n}\n\n
\n
\n\n\n
\n We show how precise groundness information can be extracted from logic programs. The idea is to use abstract interpretation with Boolean functions as ``approximations'' to groundness dependencies between variables. This idea is not new, and different classes of Boolean functions have been used. We argue, however, that one class, the \\emphpositive functions, is more suitable than others. Positive Boolean functions have a certain property which we (inspired by A. Langen) call ``condensation.'' This property allows for rapid computation of groundness information.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Difference-List Transformation for Prolog.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n New Generation Computing, 11(2): 125–157. 1993.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Mar-Son_NGC93,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {Difference-List Transformation for {Prolog}},\n  journal   = {New Generation Computing},\n  volume    = {11},\n  number    = {2},\n  pages     = {125--157}, \n  year\t    = {1993},\n  doi       = {10.1007/BF03037156},\n  abstract  = {Difference-lists are terms that represent lists.\n\t\tThe use of difference-lists can speed up most list-processing \n\t\tprograms considerably. Prolog programmers routinely use \n\t\t``difference-list versions'' of programs, but very little \n\t\tinvestigation has taken place into difference-list \n\t\ttransformation. Thus, to most programmers it is either \n\t\tunknown that the use of difference-lists is far from safe in \n\t\tall contexts, or else this fact is known but attributed to \n\t\tProlog's infamous ``occur check problem.'' In this paper we \n\t\tstudy the transformation of list-processing programs into \n\t\tprograms that use difference-lists. In particular we are \n\t\tconcerned with finding circumstances under which the \n\t\ttransformation is safe. We show that dataflow analysis can \n\t\tbe used to determine whether the transformation is applicable \n\t\tto a given program, thereby allowing for automatic \n\t\ttransformation. We prove that our transformation preserves \n\t\tstrong operational equivalence.},\n  keywords  = {Program transformation, Static analysis, Logic programming, Prolog},\n}\n\n
\n
\n\n\n
\n Difference-lists are terms that represent lists. The use of difference-lists can speed up most list-processing programs considerably. Prolog programmers routinely use ``difference-list versions'' of programs, but very little investigation has taken place into difference-list transformation. Thus, to most programmers it is either unknown that the use of difference-lists is far from safe in all contexts, or else this fact is known but attributed to Prolog's infamous ``occur check problem.'' In this paper we study the transformation of list-processing programs into programs that use difference-lists. In particular we are concerned with finding circumstances under which the transformation is safe. We show that dataflow analysis can be used to determine whether the transformation is applicable to a given program, thereby allowing for automatic transformation. We prove that our transformation preserves strong operational equivalence.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1992\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Bottom-Up Dataflow Analysis of Normal Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n Journal of Logic Programming, 13(2 & 3): 181–204. 1992.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Mar-Son_JLP92,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {Bottom-Up Dataflow Analysis of Normal Logic Programs},\n  journal   = {Journal of Logic Programming},\n  volume    = {13},\n  number    = {2 \\& 3},\n  pages\t    = {181--204}, \n  year\t    = {1992},\n  doi       = {10.1016/0743-1066(92)90031-W},\n  abstract  = {A theory of semantics-based dataflow analysis using a notion of\n\t\t``insertion'' is presented. This notion relaxes the Galois \n\t\tconnections used in P. and R. Cousot's theory of abstract \n\t\tinterpretation. The aim is to obtain a firm basis for the \n\t\tdevelopment of dataflow analyses of normal logic programs.\n\t\tA dataflow analysis is viewed as a non-standard semantics that\n\t\tapproximates the standard semantics by manipulating descriptions\n\t\tof data objects rather than the objects themselves. A Kleene \n\t\tlogic-based semantics for normal logic programs is defined, \n\t\tsimilar to Fitting's $\\Phi_P$ semantics. This provides the \n\t\tneeded semantic base for ``bottom-up'' dataflow analyses. \n\t\tSuch analyses give information about the success and failure \n\t\tsets of a program. A major application of bottom-up analysis \n\t\tis therefore type inference. We detail a dataflow analysis \n\t\tusing descriptions similar to Sato and Tamaki's depth $k$ \n\t\tabstractions and another using Marriott, Naish and Lassez's\n\t\t``singleton'' abstractions. We show that both are sound with \n\t\trespect to our semantics and outline various uses of the \n\t\tanalyses. Finally we justify our choice of semantics by showing\n\t\tthat it is the most abstract of a number of possible semantics.\n\t\tThis means that every analysis based on our semantics is \n\t\tcorrect with respect to these other semantics, including \n\t\tKunen's semantics, SLDNF resolution, and the common (sound) \n\t\tProlog semantics.},\n  keywords  = {Abstract interpretation, Logic programming, Many-valued logic, Depth k analysis, Semantics},\n}\n\n
\n
\n\n\n
\n A theory of semantics-based dataflow analysis using a notion of ``insertion'' is presented. This notion relaxes the Galois connections used in P. and R. Cousot's theory of abstract interpretation. The aim is to obtain a firm basis for the development of dataflow analyses of normal logic programs. A dataflow analysis is viewed as a non-standard semantics that approximates the standard semantics by manipulating descriptions of data objects rather than the objects themselves. A Kleene logic-based semantics for normal logic programs is defined, similar to Fitting's $Φ_P$ semantics. This provides the needed semantic base for ``bottom-up'' dataflow analyses. Such analyses give information about the success and failure sets of a program. A major application of bottom-up analysis is therefore type inference. We detail a dataflow analysis using descriptions similar to Sato and Tamaki's depth $k$ abstractions and another using Marriott, Naish and Lassez's ``singleton'' abstractions. We show that both are sound with respect to our semantics and outline various uses of the analyses. Finally we justify our choice of semantics by showing that it is the most abstract of a number of possible semantics. This means that every analysis based on our semantics is correct with respect to these other semantics, including Kunen's semantics, SLDNF resolution, and the common (sound) Prolog semantics.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Non-Determinism in Functional Languages.\n \n \n \n\n\n \n Søndergaard, H.; and Sestoft, P.\n\n\n \n\n\n\n The Computer Journal, 35(5): 514–523. 1992.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Son-Ses_CJ92,\n  author    = {Harald S{\\o}ndergaard and \n\t\tPeter Sestoft},\n  title     = {Non-Determinism in Functional Languages},\n  journal   = {The Computer Journal},\n  volume    = {35},\n  number    = {5},\n  pages\t    = {514--523},\n  year\t    = {1992},\n  doi       = {10.1093/comjnl/35.5.514},\n  abstract  = {The introduction of a non-deterministic operator in even a\n\t\tvery simple functional programming language gives rise to\n\t\ta plethora of semantic questions. These questions are not \n\t\tonly concerned with the choice operator itself. A\n\t\tsurprisingly large number of different parameter passing\n\t\tmechanisms are made possible by the introduction of bounded\n\t\tnon-determinism. The diversity of semantic possibilities\n\t\tis examined systematically using denotational definitions\n\t\tbased on mathematical structures called powerdomains. This\n\t\tresults in an improved understanding of the different kinds\n\t\tof non-determinism and the properties of different kinds of\n\t\tnon-deterministic languages.},\n  keywords  = {Programming language concepts, Nondeterminism, Functional programming, Semantics},\n}\n\n
\n
\n\n\n
\n The introduction of a non-deterministic operator in even a very simple functional programming language gives rise to a plethora of semantic questions. These questions are not only concerned with the choice operator itself. A surprisingly large number of different parameter passing mechanisms are made possible by the introduction of bounded non-determinism. The diversity of semantic possibilities is examined systematically using denotational definitions based on mathematical structures called powerdomains. This results in an improved understanding of the different kinds of non-determinism and the properties of different kinds of non-deterministic languages.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1990\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n A General Theory for Abstraction.\n \n \n \n\n\n \n Cremonini, R.; Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Tsang, C. P., editor(s), AI '90: Proceedings of the Fourth Australian Joint Conference on Artificial Intelligence, pages 121–134, 1990. World Scientific Publ.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Cre-Mar-Son_AI90,\n  author    = {Roberto Cremonini and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {A General Theory for Abstraction},\n  editor    = {C. P. Tsang},\n  booktitle = {AI '90: Proceedings of the Fourth Australian Joint Conference on \n\t\t\tArtificial Intelligence}, \n  pages     = {121--134},\n  publisher = {World Scientific Publ.},\n  year\t    = {1990},\n  abstract  = {Abstraction is the process of mapping one representation of \n\t\ta problem into another representation so as to simplify \n\t\treasoning while preserving the essence of the problem.\n\t\tIt has been investigated in many different areas of computer\n\t\tscience. In artificial intelligence, abstraction is used in\n\t\tautomated theorem proving, problem solving, planning, common\n\t\tsense reasoning and qualitative reasoning. In dataflow \n\t\tanalysis of programs, it has been formalized in the theory \n\t\tof abstract interpretation. However, the possible \n\t\tconnections between research on abstraction in artificial \n\t\tintelligence and dataflow analysis have gone unnoticed until\n\t\tnow. This paper investigates these connections and\n\t\tprovides a general theory of abstraction for artificial \n\t\tintelligence. In particular, we present a method for \n\t\tbuilding an ``optimal'' abstraction and give general \n\t\tsufficient conditions for the abstraction to be ``safe.''\n\t\tThe usefulness and generality of our theory is illustrated \n\t\twith a number of example applications and by comparisons \n\t\twith related work.},\n  keywords  = {Abstract interpretation, Artificial intelligence},\n}\n\n
\n
\n\n\n
\n Abstraction is the process of mapping one representation of a problem into another representation so as to simplify reasoning while preserving the essence of the problem. It has been investigated in many different areas of computer science. In artificial intelligence, abstraction is used in automated theorem proving, problem solving, planning, common sense reasoning and qualitative reasoning. In dataflow analysis of programs, it has been formalized in the theory of abstract interpretation. However, the possible connections between research on abstraction in artificial intelligence and dataflow analysis have gone unnoticed until now. This paper investigates these connections and provides a general theory of abstraction for artificial intelligence. In particular, we present a method for building an ``optimal'' abstraction and give general sufficient conditions for the abstraction to be ``safe.'' The usefulness and generality of our theory is illustrated with a number of example applications and by comparisons with related work.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Characterization of Non-floundering Logic Programs.\n \n \n \n\n\n \n Marriott, K.; Søndergaard, H.; and Dart, P.\n\n\n \n\n\n\n In Debray, S.; and Hermenegildo, M., editor(s), Logic Programming: Proceedings of the 1990 North American Conference, pages 661–680, 1990. MIT Press\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son-Dar_NACLP90,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPhilip Dart},\n  title\t    = {A Characterization of Non-floundering Logic Programs},\n  editor    = {S. Debray and M. Hermenegildo},\n  booktitle = {Logic Programming: \n\t\tProceedings of the 1990 North American Conference},\n  pages\t    = {661--680},\n  publisher = {MIT Press}, \n  year\t    = {1990},\n  abstract  = {SLDNF resolution provides an operational semantics which\n\t\t(unlike many Prolog implementations of negation) is both \n\t\tcomputationally tractable and sound with respect to a \n\t\tdeclarative semantics. However, SLDNF is not complete, and \n\t\tit achieves soundness only by discarding certain queries to \n\t\tprograms because they ``flounder.''\n\t\tWe present dataflow analyses that for a given normal logic \n\t\tprogram detects (1) whether naive Prolog-style handling of \n\t\tnegation is sound and (2) whether SLDNF resolution avoids \n\t\tfloundering. The analyses are presented in a \n\t\tlanguage-independent framework of abstract interpretation based\n\t\ton denotational definitions. To our knowledge they are more \n\t\tprecise than any of their kind. In particular we obtain a \n\t\tcharacterization of a class of non-floundering programs larger\n\t\tthan any other that we know of.},\n  keywords  = {Floundering analysis, Logic programming, Prolog},\n}\n\n
\n
\n\n\n
\n SLDNF resolution provides an operational semantics which (unlike many Prolog implementations of negation) is both computationally tractable and sound with respect to a declarative semantics. However, SLDNF is not complete, and it achieves soundness only by discarding certain queries to programs because they ``flounder.'' We present dataflow analyses that for a given normal logic program detects (1) whether naive Prolog-style handling of negation is sound and (2) whether SLDNF resolution avoids floundering. The analyses are presented in a language-independent framework of abstract interpretation based on denotational definitions. To our knowledge they are more precise than any of their kind. In particular we obtain a characterization of a class of non-floundering programs larger than any other that we know of.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Abstract Interpretation of Logic Programs: The Denotational Approach.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Bossi, A., editor(s), Proceedings of the Fifth Italian Conference on Logic Programming, pages 399–425, 1990. Gruppo Ricercatori ed Utenti di Logic Programming, Padova, Italy\n Invited paper\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son_GULP90,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {Abstract Interpretation of Logic Programs:\n\t\t\tThe Denotational Approach},\n  editor    = {A. Bossi},\n  booktitle = {Proceedings of the Fifth Italian Conference on Logic Programming},\n  pages     = {399--425},\n  organization = {Gruppo Ricercatori ed Utenti di Logic Programming,\n\t\t\tPadova, Italy}, \n  year\t    = {1990},\n  note\t    = {Invited paper},\n  abstract  = {Logic programming languages are based on a principle of \n\t\tseparation of ``logic'' and ``control.'' This means that \n\t\tthey can be given simple model-theoretic semantics\n\t\twithout regard to any particular execution mechanism.\n\t\tWhile this is desirable from a semantical point of view, \n\t\tit does make sound, efficient implementation of logic \n\t\tprogramming languages difficult. The lack of ``control \n\t\tinformation'' in programs calls for complex dataflow\n\t\tanalyses to guide execution. Since dataflow analysis also \n\t\tfinds extensive use in error-finding and transformation \n\t\ttools, there is a need for a simple and powerful theory\n\t\tof dataflow analysis of logic programs.\n\t\tWe offer such a theory, inspired by F. Nielson's extension\n\t\tof P. and R. Cousot's \\emph{abstract interpretation}.\n\t\tWe then present a denotational definition of the semantics \n \t\tof definite logic programs. This definition is of interest \n\t\tin its own right because of its novel application of \n\t\t\\emph{parametric substitutions} which makes it very compact\n\t\tand yet transparent. Stepwise we develop the definition \n\t\tinto a generic dataflow analysis which encompasses a large \n\t\tclass of dataflow analyses based on the SLD execution model.\n\t\tWe exemplify one instance of the definition by developing a \n\t\tprovably correct groundness analysis to predict how \n\t\tvariables may be bound to ground terms during execution.\n\t\tWe also discuss related work and other applications.},\n  keywords  = {Abstract interpretation, Abstract domains, Groundness analysis, Logic programming},\n}\n\n
\n
\n\n\n
\n Logic programming languages are based on a principle of separation of ``logic'' and ``control.'' This means that they can be given simple model-theoretic semantics without regard to any particular execution mechanism. While this is desirable from a semantical point of view, it does make sound, efficient implementation of logic programming languages difficult. The lack of ``control information'' in programs calls for complex dataflow analyses to guide execution. Since dataflow analysis also finds extensive use in error-finding and transformation tools, there is a need for a simple and powerful theory of dataflow analysis of logic programs. We offer such a theory, inspired by F. Nielson's extension of P. and R. Cousot's \\emphabstract interpretation. We then present a denotational definition of the semantics of definite logic programs. This definition is of interest in its own right because of its novel application of \\emphparametric substitutions which makes it very compact and yet transparent. Stepwise we develop the definition into a generic dataflow analysis which encompasses a large class of dataflow analyses based on the SLD execution model. We exemplify one instance of the definition by developing a provably correct groundness analysis to predict how variables may be bound to ground terms during execution. We also discuss related work and other applications.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Analysis of Constraint Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Debray, S.; and Hermenegildo, M., editor(s), Logic Programming: Proceedings of the 1990 North American Conference, pages 531–547, 1990. MIT Press\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son_NACLP90,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {Analysis of Constraint Logic Programs},\n  editor    = {S. Debray and M. Hermenegildo},\n  booktitle = {Logic Programming: \n\t\tProceedings of the 1990 North American Conference},\n  pages\t    = {531--547},\n  publisher = {MIT Press}, \n  year\t    = {1990},\n  abstract  = {Increasingly, compilers for logic programs are making use \n\t\tof information from dataflow analyses to aid in the generation\n\t\tof efficient target code. However, generating efficient target\n\t\tcode from constraint logic programs is even more difficult than\n\t\tfrom logic programs. Thus we feel that good compilation of\n\t\tconstraint logic programs will require sophisticated analysis \n\t\tof the source code. Here we present a simple framework for\n\t\tthe abstract interpretation of constraint logic programs\n\t\twhich is intended to serve as the basis for developing such \n\t\tanalyses. We show the framework's practical usefulness by \n\t\tsketching two dataflow analyses that are based on it\n\t\tand indicating how they can be used to improve the code \n\t\tgenerated by a compiler.},\n  keywords  = {Constraint programming, Logic programming, Abstract interpretation, Semantics, Compilation},\n}\n\n
\n
\n\n\n
\n Increasingly, compilers for logic programs are making use of information from dataflow analyses to aid in the generation of efficient target code. However, generating efficient target code from constraint logic programs is even more difficult than from logic programs. Thus we feel that good compilation of constraint logic programs will require sophisticated analysis of the source code. Here we present a simple framework for the abstract interpretation of constraint logic programs which is intended to serve as the basis for developing such analyses. We show the framework's practical usefulness by sketching two dataflow analyses that are based on it and indicating how they can be used to improve the code generated by a compiler.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Referential Transparency, Definiteness and Unfoldability.\n \n \n \n\n\n \n Søndergaard, H.; and Sestoft, P.\n\n\n \n\n\n\n Acta Informatica, 27(6): 505–517. 1990.\n Reviewed in \\emphComputing Reviews \\bf 32(3): 144–145, 1991\n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Son-Ses_ACTA90,\n  author    = {Harald S{\\o}ndergaard and \n\t\tPeter Sestoft},\n  title\t    = {Referential Transparency, Definiteness and Unfoldability},\n  journal   = {Acta Informatica},\n  volume    = {27},\n  number    = {6},\n  pages\t    = {505--517},\n  year\t    = {1990},\n  note\t    = {Reviewed in \\emph{Computing Reviews} {\\bf 32}(3): 144--145, 1991},\n  doi       = {10.1007/BF00277387},\n  abstract  = {The term ``referential transparency'' is frequently used\n\t\tto indicate that a programming language has certain useful\n\t\tproperties. We observe, however, that the formal and\n\t\tinformal definitions given in the literature are not\n\t\tequivalent and we investigate their relationships.\n\t\tTo this end, we study the definitions in the context of\n\t\ta simple expression language and show that in the presence\n\t\tof non-determinism, the differences are manifest. We \n\t\tpropose a definition of ``referential transparency'', based\n\t\ton Quine's, as well as of the related notions: definiteness\n\t\tand unfoldability. We demonstrate that these notions are\n\t\tuseful to characterize programming languages.},\n  keywords  = {Programming language concepts, Nondeterminism, Semantics},\n}\n\n
\n
\n\n\n
\n The term ``referential transparency'' is frequently used to indicate that a programming language has certain useful properties. We observe, however, that the formal and informal definitions given in the literature are not equivalent and we investigate their relationships. To this end, we study the definitions in the context of a simple expression language and show that in the presence of non-determinism, the differences are manifest. We propose a definition of ``referential transparency'', based on Quine's, as well as of the related notions: definiteness and unfoldability. We demonstrate that these notions are useful to characterize programming languages.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1989\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Mix: A Self-Applicable Partial Evaluator for Experiments in Compiler Generation.\n \n \n \n\n\n \n Jones, N. D.; Sestoft, P.; and Søndergaard, H.\n\n\n \n\n\n\n Lisp and Symbolic Computation, 2(1): 9–50. 1989.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Jon-Ses-Son_LASC89,\n  author    = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Mix: A Self-Applicable Partial Evaluator for Experiments in\n\t\tCompiler Generation},\n  journal   = {Lisp and Symbolic Computation},\n  volume    = {2},\n  number    = {1},\n  pages     = {9--50},\n  year      = {1989},\n  doi       = {10.1007/BF01806312},\n  abstract  = {The program transformation principle called partial \n\t\tevaluation has interesting applications in compilation\n\t\tand compiler generation. Self-applicable partial evaluators\n\t\tmay be used for transforming interpreters into corresponding\n\t\tcompilers and even for the generation of compiler generators.\n\t\tThis is useful because interpreters are significantly easier\n\t\tto write than compilers, but run much slower than compiled\n\t\tcode. A major difficulty in writing compilers (and compiler\n\t\tgenerators) is the thinking in terms of distinct binding\n\t\ttimes: run time and compile time (and compiler generation\n\t\ttime). The paper gives an introduction to partial\n\t\tevaluation and describes a fully automatic though \n\t\texperimental partial evaluator, called mix, able to generate\n\t\tstand-alone compilers as well as a compiler generator.\n\t\tMix partially evaluates programs written in Mixwell,\n\t\tessentially a first-order subset of statically scoped pure\n\t\tLisp. For compiler generation purposes it is necessary\n\t\tthat the partial evaluator be self-applicable. Even though\n\t\tthe potential utility of a self-applicable partial evaluator\n\t\thas been recognized since 1971, a 1984 version of mix\n\t\tappears to be the first successful implementation.\n\t\tThe overall structure of mix and the basic ideas behind its\n\t\tway of working are sketched. Finally, some results of using\n\t\ta version of mix are reported.},\n  keywords  = {Partial evaluation, Program transformation, Compilation},\n}\n\n
\n
\n\n\n
\n The program transformation principle called partial evaluation has interesting applications in compilation and compiler generation. Self-applicable partial evaluators may be used for transforming interpreters into corresponding compilers and even for the generation of compiler generators. This is useful because interpreters are significantly easier to write than compilers, but run much slower than compiled code. A major difficulty in writing compilers (and compiler generators) is the thinking in terms of distinct binding times: run time and compile time (and compiler generation time). The paper gives an introduction to partial evaluation and describes a fully automatic though experimental partial evaluator, called mix, able to generate stand-alone compilers as well as a compiler generator. Mix partially evaluates programs written in Mixwell, essentially a first-order subset of statically scoped pure Lisp. For compiler generation purposes it is necessary that the partial evaluator be self-applicable. Even though the potential utility of a self-applicable partial evaluator has been recognized since 1971, a 1984 version of mix appears to be the first successful implementation. The overall structure of mix and the basic ideas behind its way of working are sketched. Finally, some results of using a version of mix are reported.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Semantics-Based Dataflow Analysis of Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Ritter, G. X., editor(s), Information Processing 89, pages 601–606, 1989. North-Holland\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son_IFIP89,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Semantics-Based Dataflow Analysis of Logic Programs},\n  editor    = {G. X. Ritter},\n  booktitle = {Information Processing 89},\n  pages     = {601--606},\n  publisher = {North-Holland},\n  year      = {1989},\n  abstract  = {The increased acceptance of Prolog has motivated widespread \n\t \tinterest in the semantics-based dataflow analysis of logic \n\t\tprograms and a number of different approaches have been \n\t\tsuggested. However, the relationships between these \n\t\tapproaches are not clear. The present paper provides a \n\t\tunifying introduction to the approaches by giving novel \n\t\tdenotational semantic definitions which capture their \n\t\tessence. In addition, the wide range of analysis tools \n\t\tsupported by semantics-based dataflow analysis are discussed.},\n  keywords  = {Logic programming, Prolog, Static analysis, Semantics},\n}\n\n
\n
\n\n\n
\n The increased acceptance of Prolog has motivated widespread interest in the semantics-based dataflow analysis of logic programs and a number of different approaches have been suggested. However, the relationships between these approaches are not clear. The present paper provides a unifying introduction to the approaches by giving novel denotational semantic definitions which capture their essence. In addition, the wide range of analysis tools supported by semantics-based dataflow analysis are discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On Prolog and the Occur Check Problem.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n SIGPLAN Notices, 24(5): 76–82. 1989.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Mar-Son_occur89,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title     = {On {Prolog} and the Occur Check Problem},\n  journal   = {SIGPLAN Notices},\n  volume    = {24},\n  number    = {5},\n  pages     = {76--82},\n  year      = {1989},\n  doi       = {10.1145/66068.66075},\n  abstract  = {It is well known that omission of the occur check in \n\t\tunification leads to unsound Prolog systems. Nevertheless,\n\t \tmost Prolog systems omit the occur check because this\n\t\tmakes unification much faster and unsoundness allegedly\n\t\tseldom manifests itself. We revisit the occur check\n\t\tproblem and point to two aspects that have previously \n\t\treceived no attention. Firstly, ``unification without the\n\t\toccur check'' is ambiguous, and in practice, Prolog systems\n\t\tvary markedly in their reaction to programs having occur\n\t\tcheck problems. Secondly, even very simple program\n\t\ttransformations are unsafe for pure Prolog when the occur\n\t\tcheck is omitted. We conclude that the occur check problem\n\t\tis important, and in particular, that the current effort to\n\t\tstandardize Prolog should address it.},\n  keywords  = {Logic programming, Prolog, Unification},\n}\n\n
\n
\n\n\n
\n It is well known that omission of the occur check in unification leads to unsound Prolog systems. Nevertheless, most Prolog systems omit the occur check because this makes unification much faster and unsoundness allegedly seldom manifests itself. We revisit the occur check problem and point to two aspects that have previously received no attention. Firstly, ``unification without the occur check'' is ambiguous, and in practice, Prolog systems vary markedly in their reaction to programs having occur check problems. Secondly, even very simple program transformations are unsafe for pure Prolog when the occur check is omitted. We conclude that the occur check problem is important, and in particular, that the current effort to standardize Prolog should address it.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Notes for a Tutorial on Abstract Interpretation of Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n 1989.\n Tutorial Notes for the 1989 North American Conference on Logic Programming. Assocation for Logic Programming, 1989\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Unpublished{Mar-Son_tutorial89,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Notes for a Tutorial on Abstract Interpretation of Logic \n\t\tPrograms},\n  year      = {1989},\n  note      = {Tutorial Notes for the 1989 North American Conference on \n\t\tLogic Programming. Assocation for Logic Programming, 1989},\n  keywords  = {Abstract interpretation, Abstract domains, Logic programming, Unification, Semantics, Groundness analysis},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Semantics-Based Analysis and Transformation of Logic Programs.\n \n \n \n\n\n \n Søndergaard, H.\n\n\n \n\n\n\n Ph.D. Thesis, University of Copenhagen, Denmark, 1989.\n Also available as: Technical Report 89/21, Department of Computer Science, The University of Melbourne, 1990, DIKU Report 89/22, Department of Computer Science, University of Copenhagen, 1990, and Technical Report #12, Key Center for Knowledge Based Systems, RMIT and the University of Melbourne, 1990\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Phdthesis{Son_phd89,\n  author    = {Harald S{\\o}ndergaard},\n  title\t    = {Semantics-Based Analysis and Transformation of Logic Programs},\n  school    = {University of Copenhagen, Denmark},\n  year\t    = {1989},\n  note\t    = {Also available as: Technical Report 89/21,\n\t\tDepartment of Computer Science, \n\t\tThe University of Melbourne, 1990,\n\t\tDIKU Report 89/22, Department of Computer Science, \n\t\tUniversity of Copenhagen, 1990,\n\t\tand Technical Report \\#12,\n\t\tKey Center for Knowledge Based Systems,\n\t\tRMIT and the University of Melbourne, 1990},\n  abstract  = {Dataflow analysis is an essential component of many programming \n\t\ttools. One use of dataflow information is to identify errors in\n\t\ta program, as done by program ``debuggers'' and type checkers.\n\t\tAnother is in compilers and other program transformers, where \n\t\tthe analysis may guide various optimisations.\n\n\t\tThe correctness of a programming tool's dataflow analysis \n\t\tcomponent is usually of paramount importance. The theory of \n\t\tabstract interpretation, originally developed by P. and R. \n\t\tCousot aims at providing a framework for the development of \n\t\tcorrect analysis tools. In this theory, dataflow analysis is \n\t\tviewed as ``non-standard'' semantics, and abstract \n\t\tinterpretation prescribes certain relations between standard\n\t\tand non-standard semantics, in order to guarantee the \n\t\tcorrectness of the non-standard semantics with respect to the \n\t\tstandard semantics.\n\n\t\tThe increasing acceptance of Prolog as a practical programming \n\t\tlanguage has motivated widespread interest in dataflow analysis\n\t\tof logic programs and especially in abstract interpretation.\n\t\tLogic programmming languages are attractive from a semantical \n\t\tpoint of view, but dataflow analysis of logic programs is more \n\t\tcomplex than that of more traditional programming languages, \n\t\tsince dataflow is bi-directional (owing to unification) and \n\t\tcontrol flow is in terms of backtracking.\n\n\t\tThe present thesis is concerned with semantics-based dataflow \n\t\tanalysis of logic programs. We first set up a theory for \n\t\tdataflow analysis which is basically that of abstract \n\t\tinterpretation as introduced by P. and R. Cousot. We do, \n\t\thowever, relax the classical theory of abstract interpretation\n\t\tsomewhat, by giving up the demand for (unique) \\emph{best} \n\t\tapproximations.\n\n\t\tTwo different kinds of analysis of logic programs are then \n\t\tidentified: a \\emph{bottom-up} analysis yields an approximation\n\t\tto the success set (and possibly the failure set) of a program,\n\t\twhereas a \\emph{top-down} analysis yields an approximation to \n\t\tthe call patterns that would appear during an execution based \n\t\ton SLD resolution. We investigate some of the uses of the two \n\t\tkinds of analysis. In the bottom-up case, we pay special \n\t\tattention to (bottom-up) type inference and its use in program \n\t\tspecialisation. In the top-down case, we present a generic \n\t\t``dataflow semantics'' that encompasses many useful dataflow \n\t\tanalyses. As an instance of the generic semantics, a groundness\n\t\tanalysis is detailed, and a series of other applications are \n\t\tmentioned.\n\n\t\tWe finally present a transformation technique, based on \n\t\ttop-down analysis, for introducing difference-lists in a \n\t\tlist-manipulating program, without changing the program's \n\t\tsemantics. This may lead to substantial improvement in a \n\t\tprogram's execution time.},\n  keywords  = {Abstract interpretation, Abstract domains, Logic programming, Semantics, Many-valued logic, Unification, Groundness analysis, Program transformation},\n}\n\n
\n
\n\n\n
\n Dataflow analysis is an essential component of many programming tools. One use of dataflow information is to identify errors in a program, as done by program ``debuggers'' and type checkers. Another is in compilers and other program transformers, where the analysis may guide various optimisations. The correctness of a programming tool's dataflow analysis component is usually of paramount importance. The theory of abstract interpretation, originally developed by P. and R. Cousot aims at providing a framework for the development of correct analysis tools. In this theory, dataflow analysis is viewed as ``non-standard'' semantics, and abstract interpretation prescribes certain relations between standard and non-standard semantics, in order to guarantee the correctness of the non-standard semantics with respect to the standard semantics. The increasing acceptance of Prolog as a practical programming language has motivated widespread interest in dataflow analysis of logic programs and especially in abstract interpretation. Logic programmming languages are attractive from a semantical point of view, but dataflow analysis of logic programs is more complex than that of more traditional programming languages, since dataflow is bi-directional (owing to unification) and control flow is in terms of backtracking. The present thesis is concerned with semantics-based dataflow analysis of logic programs. We first set up a theory for dataflow analysis which is basically that of abstract interpretation as introduced by P. and R. Cousot. We do, however, relax the classical theory of abstract interpretation somewhat, by giving up the demand for (unique) \\emphbest approximations. Two different kinds of analysis of logic programs are then identified: a \\emphbottom-up analysis yields an approximation to the success set (and possibly the failure set) of a program, whereas a \\emphtop-down analysis yields an approximation to the call patterns that would appear during an execution based on SLD resolution. We investigate some of the uses of the two kinds of analysis. In the bottom-up case, we pay special attention to (bottom-up) type inference and its use in program specialisation. In the top-down case, we present a generic ``dataflow semantics'' that encompasses many useful dataflow analyses. As an instance of the generic semantics, a groundness analysis is detailed, and a series of other applications are mentioned. We finally present a transformation technique, based on top-down analysis, for introducing difference-lists in a list-manipulating program, without changing the program's semantics. This may lead to substantial improvement in a program's execution time.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1988\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Prolog Program Transformation by Introduction of Difference Lists.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Proceedings of the International Computer Science Conference '88, pages 206–213, 1988. IEEE Computer Society\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son_ICSC88,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Prolog Program Transformation by Introduction of \n\t\tDifference Lists},\n  booktitle = {Proceedings of the International Computer Science \n\t\tConference '88},\n  pages     = {206--213},\n  publisher = {IEEE Computer Society},\n  location  = {Hong Kong},\n  year      = {1988},\n  keywords  = {Program transformation, Logic programming, Prolog},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Bottom-Up Abstract Interpretation of Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n In Kowalski, R. A.; and Bowen, K. A., editor(s), Logic Programming: Proceedings of the Fifth International Conference and Symposium, pages 733–748, 1988. MIT Press\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Mar-Son_ICSLP88,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title\t    = {Bottom-Up Abstract Interpretation of Logic Programs},\n  editor    = {R. A. Kowalski and K. A. Bowen},\n  booktitle = {Logic Programming: Proceedings of the Fifth International \n\t\tConference and Symposium},\n  pages\t    = {733--748},\n  publisher = {MIT Press}, \n  year\t    = {1988},\n  abstract  = {Abstract interpretation is useful in the design and \n\t\tverification of dataflow analyses. Given a programming\n\t\tlanguage and a fixpoint characterization of its semantics,\n\t\tabstract interpretation enables the explicit formulation\n\t\tof the relation between a dataflow analysis and the \n\t\tsemantics. A key notion in this is that of a ``collecting''\n\t\tsemantics---an extension of the semantics that attaches\n\t\tinformation about run-time behaviour to program points.\n\t \tDifferent collecting semantics lead to different types of\n\t\tdataflow analysis. We present a novel collecting semantics \n\t\tfor normal logic programs. It is based on a Fitting-style \n\t\tbottom-up semantics and forms a firm semantic base for\n\t\tbottom-up dataflow analyses. One such analysis, using\n\t\tdescriptions similar to Sato and Tamaki's depth k\n\t\tabstractions, is detailed and shown to be sound with \n\t\trespect to the collecting semantics. Its use in program\n\t\tspecialization and termination analysis is demonstrated.},\n  keywords  = {Abstract interpretation, Logic programming, Many-valued logic, Depth k analysis, Program transformation, Semantics, Termination analysis},\n}\n\n
\n
\n\n\n
\n Abstract interpretation is useful in the design and verification of dataflow analyses. Given a programming language and a fixpoint characterization of its semantics, abstract interpretation enables the explicit formulation of the relation between a dataflow analysis and the semantics. A key notion in this is that of a ``collecting'' semantics—an extension of the semantics that attaches information about run-time behaviour to program points. Different collecting semantics lead to different types of dataflow analysis. We present a novel collecting semantics for normal logic programs. It is based on a Fitting-style bottom-up semantics and forms a firm semantic base for bottom-up dataflow analyses. One such analysis, using descriptions similar to Sato and Tamaki's depth k abstractions, is detailed and shown to be sound with respect to the collecting semantics. Its use in program specialization and termination analysis is demonstrated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n On Describing Success Patterns of Logic Programs.\n \n \n \n\n\n \n Marriott, K.; and Søndergaard, H.\n\n\n \n\n\n\n Technical Report 88/12, Department of Computer Science, The University of Melbourne, 1988.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Techreport{Mar-Son_success_patterns88,\n  author    = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n  title     = {On Describing Success Patterns of Logic Programs},\n  number    = {88/12},\n  institution = {Department of Computer Science, The University of Melbourne},\n  year      = {1988},\n  abstract  = {Sato and Tamaki's depth $k$ abstractions are reviewed, and their\n\t\tuse as (finite) descriptions of (infinite) sets of expressions\n\t\t(terms and atomic formulas) is discussed. This leads to an\n\t\tinvestigation of other classes of expressions. A certain\n\t\tproperty of a class is identified as being sufficient for the\n\t\texistence of a Galois insertion from the class to the set of\n\t\tall expressions, and thus for abstract interpretation to be\n\t\tapplicable. Sato and Tamaki's class does not have the property,\n\t\tbut it is shown how it can be extended to a class having the\n\t\tproperty while still remaining finite.},\n  keywords  = {Abstract interpretation, Abstract domains, Depth k analysis},\n}\n\n
\n
\n\n\n
\n Sato and Tamaki's depth $k$ abstractions are reviewed, and their use as (finite) descriptions of (infinite) sets of expressions (terms and atomic formulas) is discussed. This leads to an investigation of other classes of expressions. A certain property of a class is identified as being sufficient for the existence of a Galois insertion from the class to the set of all expressions, and thus for abstract interpretation to be applicable. Sato and Tamaki's class does not have the property, but it is shown how it can be extended to a class having the property while still remaining finite.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Bibliography on Partial Evaluation.\n \n \n \n\n\n \n Sestoft, P.; and Søndergaard, H.\n\n\n \n\n\n\n SIGPLAN Notices, 23(2): 19–27. 1988.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{Ses-Son_pebibl88,\n  author    = {Peter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n  title     = {A Bibliography on Partial Evaluation},\n  journal   = {SIGPLAN Notices},\n  volume    = {23},\n  number    = {2},\n  pages     = {19--27},\n  year      = {1988},\n  doi       = {10.1145/43908.43910},\n  abstract  = {This note gives a short introduction to partial evaluation \n\t\tand an extensive, annotated bibliography to the field. \n\t\tThe bibliography is restricted to cover public-domain \n\t\tliterature in English.},\n  keywords  = {Partial evaluation},\n}\n\n
\n
\n\n\n
\n This note gives a short introduction to partial evaluation and an extensive, annotated bibliography to the field. The bibliography is restricted to cover public-domain literature in English.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1987\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Mix: A Self-Applicable Partial Evaluator for Experiments in Compiler Generation—Extended Abstract.\n \n \n \n\n\n \n Jones, N. D.; Sestoft, P.; and Søndergaard, H.\n\n\n \n\n\n\n In et al. , M. M., editor(s), Mathematical Foundations of Programming Language Semantics, volume 298, of Lecture Notes in Computer Science, pages 386–413. Springer-Verlag, 1987.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Incollection{Jon-Ses-Son_mix-abstract87,\n  author    = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n  title     = {Mix: A Self-Applicable Partial Evaluator for Experiments in\n\t\tCompiler Generation---Extended Abstract},\n  editor    = {M. Main et al.},\n  booktitle = {Mathematical Foundations of Programming Language Semantics},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {298},\n  publisher = {Springer-Verlag},\n  pages     = {386--413},\n  year      = {1987},\n  doi       = {10.1007/3-540-19020-1_21},\n  keywords  = {Partial evaluation, Program transformation, Compilation},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A Semantics-Based Framework for the Abstract Interpretation of Prolog.\n \n \n \n\n\n \n Jones, N. D.; and Søndergaard, H.\n\n\n \n\n\n\n In Abramsky, S.; and Hankin, C., editor(s), Abstract Interpretation of Declarative Languages, 6, pages 123–142. Ellis Horwood, 1987.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Incollection{Jon-Son_absint87,\n  author    = {Neil D. Jones and \n\t\tHarald S{\\o}ndergaard},\n  title     = {A Semantics-Based Framework for the Abstract Interpretation of \n\t\t{Prolog}},\n  editor    = {S. Abramsky and C. Hankin},\n  booktitle = {Abstract Interpretation of Declarative Languages},\n  chapter   = {6},\n  pages     = {123--142},\n  publisher = {Ellis Horwood},\n  year      = {1987},\n  keywords  = {Abstract interpretation, Logic programming, Prolog, Groundness analysis, Sharing analysis},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1986\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n An Application of Abstract Interpretation of Logic Programs: Occur Check Reduction.\n \n \n \n\n\n \n Søndergaard, H.\n\n\n \n\n\n\n In Robinet, B.; and Wilhelm, R., editor(s), Proceedings of the European Symposium on Programming—ESOP 86, volume 213, of Lecture Notes in Computer Science, pages 327–338, 1986. Springer-Verlag\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Inproceedings{Son_ESOP86,\n  author    = {Harald S{\\o}ndergaard},\n  title     = {An Application of Abstract Interpretation of Logic Programs:\n\t\tOccur Check Reduction},\n  editor    = {B. Robinet and R. Wilhelm},\n  booktitle = {Proceedings of the European Symposium on Programming---ESOP 86},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {213},\n  pages     = {327--338},\n  publisher = {Springer-Verlag},\n  year      = {1986},\n  doi       = {10.1007/3-540-16442-1_25},\n  abstract  = {The occur check in Robinson unification is superfluous in most \n\t\tunifications that take place in practice. The present paper is \n\t\tconcerned with the problem of determining circumstances under \n\t\twhich the occur may safely be dispensed with. The method given \n\t\tdraws on one outlined by David Plaisted. The framework, \n\t\thowever, differs in that we systematically apply the abstract \n\t\tinterpretation principle to logic programs. The aim is to give\n\t\ta clear presentation and to facilitate justification of the \n\t\tsoundness of the method.},\n  keywords  = {Abstract interpretation, Logic programming, Prolog, Unification},\n}\n\n
\n
\n\n\n
\n The occur check in Robinson unification is superfluous in most unifications that take place in practice. The present paper is concerned with the problem of determining circumstances under which the occur may safely be dispensed with. The method given draws on one outlined by David Plaisted. The framework, however, differs in that we systematically apply the abstract interpretation principle to logic programs. The aim is to give a clear presentation and to facilitate justification of the soundness of the method.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1985\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n An Experiment in Partial Evaluation: The Generation of a Compiler Generator.\n \n \n \n\n\n \n Jones, N. D.; Sestoft, P.; and Søndergaard, H.\n\n\n \n\n\n\n In Jouannaud, J., editor(s), Rewriting Techniques and Applications, volume 202, of Lecture Notes in Computer Science, pages 124–140. Springer-Verlag, 1985.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Incollection{Jon-Ses-Son_RTA85,\n  author    = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n  title     = {An Experiment in Partial Evaluation:\n\t\t\tThe Generation of a Compiler Generator},\n  editor    = {J.-P. Jouannaud},\n  booktitle = {Rewriting Techniques and Applications},\n  series    = {Lecture Notes in Computer Science},\n  volume    = {202},\n  pages     = {124--140},\n  publisher = {Springer-Verlag},\n  year      = {1985},\n  doi       = {10.1007/3-540-15976-2_6},\n  keywords  = {Partial evaluation, Program transformation, Compilation},\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An Experiment in Partial Evaluation: The Generation of a Compiler Generator (Extended Abstract).\n \n \n \n\n\n \n Jones, N. D.; Sestoft, P.; and Søndergaard, H.\n\n\n \n\n\n\n SIGPLAN Notices, 20(8): 82–87. 1985.\n \n\n\n\n
\n\n\n\n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@Article{Jon-Ses-Son_SIGPLANN85,\n  author    = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n  title     = {An Experiment in Partial Evaluation: The Generation of a \n\t\tCompiler Generator (Extended Abstract)},\n  journal   = {SIGPLAN Notices},\n  volume    = {20},\n  number    = {8},\n  pages     = {82--87},\n  year      = {1985},\n  doi       = {10.1145/988346.988358},\n  keywords  = {Partial evaluation, Program transformation, Compilation},\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);