var bibbase_data = {"data":"\n\n
\n <script src=\"https://bibbase.org/show?bib=people.eng.unimelb.edu.au%2Fharald%2Fbibbase.bib&jsonp=1&theme=dividers&jsonp=1\"></script>\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=people.eng.unimelb.edu.au%2Fharald%2Fbibbase.bib&jsonp=1&theme=dividers\");\n print_r($contents);\n ?>\n
\n \n <iframe src=\"https://bibbase.org/show?bib=people.eng.unimelb.edu.au%2Fharald%2Fbibbase.bib&jsonp=1&theme=dividers\"></iframe>\n
\n \n For more details see the documention.\n
\nTo the site owner:
\n\nAction required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n
\n \n \n Fix it now\n
\n@Proceedings{Bow-Son_LOPSTR24,\n editor = {Juliana Bowles and Harald S{\\o}ndergaard},\n title = {Logic-Based Program Synthesis and Transformation:\n\t\tProceedings of the 34th International Symposium},\n series = {Lecture Notes in Computer Science},\n volume = {14919},\n publisher = {Springer},\n year = {2024},\n doi = {10.1007/978-3-031-71294-4},\n keywords = {Program transformation, Program synthesis, Program analysis, Inversion, Specification, Logic-based methods, Logic programming},\n}\n\n\n
@Article{Kaf-Gan-Sch-Son-Stu_SoSyM24,\n author = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n title = {A Lightweight Approach to Nontermination Inference Using\n\t \t{Constrained} {Horn} {Clauses}},\n journal = {Software and Systems Modeling},\n volume = {23},\n pages = {319--342},\n year = {2024},\n doi = {10.1007/s10270-024-01161-5},\n abstract = {Nontermination is an unwanted program property for some \n\t \tsoftware systems, and a safety property for other systems.\n\t\tIn either case, automated discovery of preconditions for \n\t\tnontermination is of interest. We introduce \\textsc{NtHorn},\n\t\ta fast lightweight nontermination analyser, which is able to\n\t\tdeduce non-trivial sufficient conditions for nontermination.\n\t\tUsing Constrained Horn Clauses (CHCs) as a vehicle, we show \n\t\thow established techniques for CHC program transformation \n\t\tand abstract interpretation can be exploited for the purpose\n\t\tof nontermination analysis. \\textsc{NtHorn} is comparable in \n\t\teffectiveness to the state-of-the-art nontermination analysis\n\t\ttools, as measured on standard competition benchmark suites \n\t\t(consisting of integer manipulating programs), while typically\n\t\tsolving problems faster by one order of magnitude.},\n}\n\n\n
@inproceedings{Ses-Son_PEPM24,\n author = {Peter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n title = {The Genesis of {Mix}: Early Days of Self-Applicable \n\t\tPartial Evaluation (Invited Contribution)},\n editor = {G. Keller and M. Wang},\n booktitle = {PEPM 2024: Proceedings of the 2024 ACM SIGPLAN International\n \tWorkshop on Partial Evaluation and Program Manipulation},\n pages = {1--13},\n year = {2024},\n doi = {10.1145/3635800.3637445},\n abstract = {Forty years ago development started on Mix, a partial evaluator\n\t\tdesigned specifically for the purpose of self-application.\n\t\tThe effort, led by Neil D. Jones at the University of \n\t\tCopenhagen, eventually demonstrated that non-trivial compilers\n\t\tcould be generated automatically by applying a partial \n\t\tevaluator to itself. The possibility, in theory, of such \n\t\tself-application had been known for more than a decade, but\n\t\tremained unrealized by the start of 1984. We describe the \n\t\tgenesis of Mix, including the research environment, the \n\t\tchallenges, and the main insights that led to success. We \n\t\temphasize the critical role played by program annotation as a\n\t\tpre-processing step, a process that became automated in the \n\t\tform of binding-time analysis.},\n keywords = {Partial evaluation, Binding-time analysis, Compilation, History},\n}\n\n\n
@InProceedings{Far-Jef-Son_ITiCSE22,\n author = {Matthew Farrugia-Roberts and\n\t\tBryn Jeffries and\n\t\tHarald S{\\o}ndergaard},\n title = {Programming to Learn:\n\t\tLogic and Computation from a Programming Perspective},\n booktitle = {Proceedings of the 27th Annual Conference on Innovation and \n\t\tTechnology in Computer Science Education},\n pages = {311--317},\n publisher = {Association for Computing Machinery},\n address = {New York, NY},\n year = {2022},\n doi = {10.1145/3502718.3524814},\n abstract = {Programming problems are commonly used as a learning and \n\t\tassessment activity for learning to program. We believe\n\t\tthat programming problems can be effective for broader \n\t\tlearning goals. In our large-enrolment course, we have \n\t\tdesigned special programming problems relevant to logic, \n\t\tdiscrete mathematics, and the theory of computation, and \n\t\twe have used them for formative and summative assessment.\n\t\tIn this report, we reflect on our experience. We aim to \n\t\tleverage our students' programming backgrounds by offering\n\t\ta code-based formalism for our mathematical syllabus.\n\t\tWe find we can translate many traditional questions into \n\t\tprogramming problems of a special kind---calling for \n\t\t`programs' as simple as a single expression, such as a \n\t\tformula or automaton represented in code. A web-based \n\t\tplatform enables self-paced learning with rapid contextual\n\t\tcorrective feedback, and helps us scale summative \n\t\tassessment to the size of our cohort. We identify several \n\t\tbarriers arising with our approach and discuss how we have\n\t\tattempted to negate them. We highlight the potential of \n\t\tprogramming problems as a digital learning activity even \n\t\tbeyond a logic and computation course.},\n keywords = {Education, Web-based learning},\n}\n\n\n
@InProceedings{Far-Jef-Son_TFPIE21,\n author = {Matthew Farrugia-Roberts and\n\t\tBryn Jeffries and\n\t\tHarald S{\\o}ndergaard},\n title = {Teaching Simple Constructive Proofs with {Haskell} Programs},\n editor = {P. Achten and E. Machkasova},\n booktitle = {Trends in Functional Programming in Education (TFPIE)},\n series = {Electronic Proceedings in Theoretical Computer Science},\n volume = {363},\n pages = {54--73},\n year = {2022},\n url_Paper = {https://arxiv.org/abs/2208.04699v1},\n doi = {10.4204/EPTCS.363.4},\n abstract = {In recent years we have explored using Haskell alongside a\n\t\ttraditional mathematical formalism in our large-enrolment \n\t\tuniversity course on topics including logic and formal \n\t\tlanguages, aiming to offer our students a programming \n\t\tperspective on these mathematical topics. We have found it \n\t\tpossible to offer almost all formative and summative \n\t\tassessment through an interactive learning platform, using \n\t\tHaskell as a lingua franca for digital exercises across our \n\t\tbroad syllabus. One of the hardest exercises to convert \n\t\tinto this format are traditional written proofs conveying \n\t\tconstructive arguments. In this paper we reflect on the \n\t\tdigitisation of this kind of exercise. We share many examples\n\t\tof Haskell exercises designed to target similar skills to \n\t\twritten proof exercises across topics in propositional logic \n\t\tand formal languages, discussing various aspects of the design \n\t\tof such exercises. We also catalogue a sample of student \n\t\tresponses to such exercises. This discussion contributes to \n\t\tour broader exploration of programming problems as a flexible\n\t\tdigital medium for learning and assessment.},\n keywords = {Education, Web-based learning, Haskell},\n}\n\n\n
@InProceedings{Son_LOPSTR21,\n author = {Harald S{\\o}ndergaard},\n title = {String Abstract Domains and Their Combination},\n editor = {E. {De Angelis} and W. Vanhoof},\n booktitle = {Logic-Based Program Synthesis and Transformation},\n series = {Lecture Notes in Computer Science},\n volume = {13290},\n pages = {1--15},\n publisher = {Springer},\n year = {2022},\n doi = {10.1007/978-3-030-98869-2_1},\n abstract = {We survey recent developments in string static analysis, with\n\t\tan emphasis on how string abstract domains can be combined.\n\t\tThe paper has formed the basis for an invited presentation\n\t\tgiven to LOPSTR 2021 and PPDP 2021.},\n keywords = {Abstract domains, String analysis},\n}\n\n\n
@article{Gan-Ma-Nav-Sch-Son-Stu_TOPLAS21,\n author = {Graeme Gange and\n Zequn Ma and\n Jorge A. Navas and\n Peter Schachte and\n Harald S{\\o}ndergaard and\n Peter J. Stuckey},\n title = {A Fresh Look at {Zones} and {Octagons}},\n journal = {ACM Transactions on Programming Languages and Systems},\n volume = {43},\n number = {3},\n pages = {11:1--11:51},\n year = {2021},\n doi = {10.1145/3457885},\n abstract = {Zones and Octagons are popular abstract domains for \n\t\tstatic program analysis. They enable the automated \n\t\tdiscovery of simple numerical relations that hold \n\t\tbetween pairs of program variables. Both domains \n\t\tare well understood mathematically but the detailed \n\t\timplementation of static analyses based on these \n\t\tdomains poses many interesting algorithmic challenges. \n\t\tIn this paper we study the two abstract domains, their \n\t\timplementation and use. Utilizing improved data \n\t\tstructures and algorithms for the manipulation of \n\t\tgraphs that represent difference-bound constraints, \n\t\twe present fast implementations of both abstract domains, \n\t\tbuilt around a common infrastructure. We compare the \n\t\tperformance of these implementations against alternative \n\t\tapproaches offering the same analysis precision. We \n\t\tquantify the differences in performance by measuring \n\t\ttheir speed and precision on standard benchmarks. We \n\t\talso assess, in the context of software verification, \n\t\tthe extent to which the improved precision translates \n\t\tto better verification outcomes. Experiments demonstrate \n\t\tthat our new implementations improve the state-of-the-art \n\t\tfor both Zones and Octagons significantly.},\n keywords = {Abstract domains, Abstract interpretation, Static analysis},\n}\n\n\n
@InProceedings{Gan-Nav-Sch-Son-Stu_SAS21,\n author = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Disjunctive Interval Analysis},\n editor = {C. Dr{\\u{a}}goi and S. Mukherjee and K. Namjoshi},\n booktitle = {Static Analysis},\n series = {Lecture Notes in Computer Science},\n volume = {12913},\n pages = {144--165},\n publisher = {Springer},\n year = {2021},\n doi = {10.1007/978-3-030-88806-0_7},\n abstract = {We revisit disjunctive interval analysis based on the \n\t\tBoxes abstract domain. We propose the use of what we call \n\t\trange decision diagrams (RDDs) to implement Boxes, and we \n\t\tprovide algorithms for the necessary RDD operations. \n\t\tRDDs tend to be more compact than the linear decision\n\t\tdiagrams (LDDs) that have traditionally been used for Boxes.\n\t\tRepresenting information more directly, RDDs also allow for \n\t\tthe implementation of more accurate abstract operations.\n\t\tThis comes at no cost in terms of analysis efficiency,\n\t\twhether LDDs utilise dynamic variable ordering or not.\n\t\tRDD and LDD implementations are available in the Crab analyzer,\n\t\tand our experiments confirm that RDDs are well suited for\n\t\tdisjunctive interval analysis.},\n keywords = {Abstract domains, Abstract interpretation, Static analysis, Interval analysis},\n}\n\n\n
@InProceedings{Kaf-Gan-Sch-Son-Stu_SEFM21,\n author = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n title = {Lightweight Non-Termination Inference with {CHCs}},\n editor = {R. Calinescu and C. P{\\u{a}}s{\\u{a}}reanu},\n booktitle = {Software Engineering and Formal Methods},\n series = {Lecture Notes in Computer Science},\n volume = {13085},\n pages = {383--402},\n year = {2021},\n doi = {10.1007/978-3-030-92124-8_22},\n abstract = {Non-termination is an unwanted program property (considered a \n\t\tbug) for some software systems, and a safety property for \n\t\tother systems. In either case, automated discovery of \n\t\tpreconditions for non-termination is of interest. We introduce\n\t\t\\textsc{NtHorn}, a fast lightweight non-termination analyser,\n\t\table to deduce non-trivial sufficient conditions for \n\t\tnon-termination. Using Constrained Horn Clauses (CHCs) as a \n\t\tvehicle, we show how established techniques for CHC program \n\t\ttransformation and abstract interpretation can be exploited \n\t\tfor the purpose of non-termination analysis. \\textsc{NtHorn} \n\t\tis comparable in power to the state-of-the-art\n\t\tnon-termination analysis tools, as measured on standard \n\t\tcompetition benchmark suites (consisting of integer \n\t\tmanipulating programs), while typically solving problems \n\t\tan order of magnitude faster.},\n keywords = {Program verification, Program transformation, Partial evaluation, Nontermination analysis, Horn clauses},\n}\n\n\n
@article{Kaf-Gan-Stu-Sch-Son_TPLP21,\n author = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter J. Stuckey and\n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title = {Transformation-Enabled Precondition Inference},\n journal = {Theory and Practice of Logic Programming},\n volume = {21},\n pages = {700--716},\n year = {2021},\n doi = {10.1017/S1471068421000272},\n abstract = {Precondition inference is a non-trivial problem with \n\t\timportant applications in program analysis and verification. \n\t\tWe present a novel iterative method for automatically deriving \n\t\tpreconditions for the safety and unsafety of programs. \n\t\tEach iteration maintains over-approximations of the set of \n\t\tsafe and unsafe initial states; which are used to partition \n\t\tthe program's initial states into those known to be safe, \n\t\tknown to be unsafe and unknown. We then construct revised \n\t\tprograms with those unknown initial states and iterate the \n\t\tprocedure until the approximations are disjoint or some \n\t\ttermination criteria are met. An experimental evaluation of \n\t\tthe method on a set of software verification benchmarks shows \n\t\tthat it can infer precise preconditions (sometimes optimal) \n\t\tthat are not possible using previous methods.},\n keywords = {Program verification, Program transformation},\n}\n\n\n
@inproceedings{Ama-Gan-Sch-Son-Stu_DIPL20,\n author = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey}, \n title = {Abstract Interpretation, Symbolic Execution and Constraints},\n editor = {F. S. {de Boer} and J. Mauro},\n booktitle = {Recent Developments in the Design and Implementation of \n\t\tProgramming Languages},\n series = {OpenAccess Series in Informatics},\n volume = {86}, \n pages = {7:1--7:19},\n publisher = {Schloss Dagstuhl-Leibniz-Zentrum f{\\"u}r Informatik},\n year = {2020},\n doi = {10.4230/OASIcs.Gabbrielli.2020.7},\n url_Paper = {https://drops.dagstuhl.de/opus/volltexte/2020/13229/pdf/OASIcs-Gabbrielli-7.pdf},\n abstract = {Abstract interpretation is a static analysis framework for \n\t\tsound over-approximation of all possible runtime states of \n\t\ta program. Symbolic execution is a framework for reachability\n\t\tanalysis which tries to explore all possible execution paths \n\t\tof a program. A shared feature between abstract interpretation\n\t\tand symbolic execution is that each---implicitly or \n\t\texplicitly---maintains constraints during execution, \n\t\tin the form of invariants or path conditions.\n\t\tWe investigate the relations between the worlds of abstract \n\t\tinterpretation, symbolic execution and constraint solving, \n\t\tto expose potential synergies.},\n keywords = {Constraint programming, Symbolic execution, Abstract interpretation},\n}\n\n\n
@inproceedings{Ama-Gan-Sch-Son-Stu_ECAI20,\n author = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey}, \n title = {String Constraint Solving: Past, Present and Future},\n editor = {G. {De Giacomo} and others},\n booktitle = {Proceedings of the 24th European Conference on \n\t\tArtificial Intelligence},\n pages = {2875--2876},\n publisher = {IOS Press},\n year = {2020},\n doi = {10.3233/FAIA200431},\n url_Paper = {https://ebooks.iospress.nl/pdf/doi/10.3233/FAIA200431},\n abstract = {String constraint solving is an important emerging field, \n\t\tgiven the ubiquity of strings over different fields such as\n\t\tformal analysis, automated testing, database query processing,\n\t\tand cybersecurity. This paper highlights the current \n\t\tstate-of-the-art for string constraint solving, and \n\t\tidentifies future challenges in this field.},\n keywords = {String solvers},\n}\n\n\n
@inproceedings{Ama-Gan-Sch-Son-Stu_LOPSTR20,\n author = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey}, \n title = {Algorithm Selection for Dynamic Symbolic Execution: \n\t\tA Preliminary Study},\n editor = {M. Fern{\\'a}ndez},\n booktitle = {Logic-Based Program Synthesis and Transformation},\n series = {Lecture Notes in Computer Science},\n volume = {12561}, \n pages = {192--209},\n year = {2020},\n doi = {10.1007/978-3-030-68446-4_10},\n abstract = {Given a portfolio of algorithms, the goal of Algorithm Selection\n\t\t(\\textsf{AS}) is to select the best algorithm(s) for a new, \n\t\tunseen problem instance. Dynamic Symbolic Execution \n\t\t(\\textsf{DSE}) brings together concrete and symbolic execution \n\t\tto maximise the program coverage. \\textsf{DSE} uses a \n\t\tconstraint solver to solve the path conditions and generate \n\t\tnew inputs to explore. In this paper we join these lines of \n\t\tresearch by introducing a model that combines \\textsf{DSE} and\n\t\t\\textsf{AS} approaches. The proposed \\textsf{AS/DSE} model is \n\t\ta generic and flexible framework enabling the \\dse engine to \n\t\tsolve the path conditions it collects with a portfolio of \n\t\tdifferent solvers, by exploiting and extending the well-known\n\t\t\\textsf{AS} techniques that have been developed over the last \n\t\tdecade. In this way, one can increase the coverage and \n\t\tsometimes even outperform the aggregate coverage achievable \n\t\tby running simultaneously all the solvers of the portfolio.},\n keywords = {Symbolic execution},\n}\n\n\n
@inproceedings{Ama-And-Gan-Sch-Son-Stu_CPAIOR19,\n author = {Roberto Amadini and \n\t\tMak Andrlon and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Constraint Programming for Dynamic Symbolic Execution of\n {JavaScript}},\n editor = {L.-M. Rousseau and K. Stergiou},\n booktitle = {Integration of Constraint Programming, Artificial Intelligence,\n\t\tand Operations Research: Proceedings of the 16th International\n\t\tConference (CPAIOR 2019)},\n series = {Lecture Notes in Computer Science},\n volume = {11494}, \n pages = {1--19},\n year = {2019},\n doi = {10.1007/978-3-030-19212-9_1},\n url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/b999c802-90f6-5ce4-8b79-c298bfc62afa/retrieve},\n abstract = {Dynamic Symbolic Execution (DSE) combines concrete and symbolic \n\t\texecution, usually for the purpose of generating good test \n\t\tsuites automatically. It relies on constraint solvers to solve\n\t\tpath conditions and to generate new inputs to explore. DSE \n\t\ttools usually make use of SMT solvers for constraint solving.\n\t\tIn this paper, we show that constraint programming (CP) is a\n\t\tpowerful alternative or complementary technique for DSE.\n\t\tSpecifically, we apply CP techniques for DSE of JavaScript, the\n\t\tde facto standard for web programming. We capture the JavaScript\n\t\tsemantics with MiniZinc and integrate this approach into a tool\n\t\twe call \\textsc{Aratha}. We use \\textsc{G-Strings}, a CP solver\n\t\tequipped with string variables, for solving path conditions, \n\t\tand we compare the performance of this approach against \n\t\tstate-of-the-art SMT solvers. Experimental results, in terms of\n\t\tboth speed and coverage, show the benefits of our approach,\n\t\tthus opening new research vistas for using CP techniques in \n\t\tthe service of program analysis.},\n keywords = {Symbolic execution, Constraint programming, String solvers, JavaScript},\n}\n\n\n
@Inproceedings{And-Sch-Son-Stu_ARITH19,\n author = {Mak Andrlon and\n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n title = {Optimal Bounds for Floating-Point Addition in Constant Time},\n editor = {N. Takagi and S. Boldo and M. Langhammer},\n booktitle = {Proceedings of the 26th IEEE Symposium on Computer Arithmetic \n\t\t(ARITH 2019)},\n pages = {159--166},\n publisher = {IEEE Conf. Publ.},\n year = {2019},\n doi = {10.1109/arith.2019.00038},\n abstract = {Reasoning about floating-point numbers is notoriously difficult,\n\t\towing to the lack of convenient algebraic properties such as \n\t\tassociativity. This poses a substantial challenge for program \n\t\tanalysis and verification tools which rely on precise \n\t\tfloating-point constraint solving. Currently, interval methods \n\t\tin this domain often exhibit slow convergence even on simple \n\t\texamples. We present a new theorem supporting efficient \n\t\tcomputation of exact bounds of the intersection of a rectangle \n\t\twith the preimage of an interval under floating-point addition,\n\t\tin any radix or rounding mode. We thus give an efficient method\n\t\tof deducing optimal bounds on the components of an addition, \n\t\tsolving the convergence problem.},\n keywords = {Constraint solving, Machine arithmetic},\n}\n\n\n
@InProceedings{Gan-Nav-Sch-Son-Stu_APLAS19,\n author = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Dissecting Widening: Separating Termination from Information},\n editor = {A. W. Lin},\n booktitle = {Proceedings of the 17th Asian Symposium on Programming \n\t\tLanguages and Systems},\n series = {Lecture Notes in Computer Science},\n volume = {11893},\n pages = {95--114},\n publisher = {Springer},\n year = {2019},\n doi = {10.1007/978-3-030-34175-6_6},\n abstract = {Widening ensures or accelerates convergence of a program \n\t\tanalysis, and sometimes contributes a guarantee of soundness \n\t\tthat would otherwise be absent. In this paper we propose a \n\t\tgeneralised view of widening, in which widening operates on \n\t\tvalues that are not necessarily elements of the given abstract\n\t\tdomain, although they must be in a correspondence, the details\n\t\tof which we spell out. We show that the new view generalizes \n\t\tthe traditional view, and that at least three distinct \n\t\tadvantages flow from the generalization. First, it gives a \n\t\thandle on ``compositional safety'', the problem of creating \n\t\twidening operators for product domains. Second, it adds a \n\t\tdegree of flexibility, allowing us to define variants of \n\t\twidening, such as delayed widening, without resorting to \n\t\tintrusive surgery on an underlying fixpoint engine. Third, \n\t\tit adds a degree of robustness, by making it difficult for\n\t\tan analysis implementor to make certain subtle (syntactic vs \n\t\tsemantic) category mistakes. The paper supports these claims \n\t\twith examples. Our proposal has been implemented in a \n\t\tstate-of-the-art abstract interpreter, and we briefly report \n\t\ton the changes that the revised view necessitated.},\n keywords = {Fixed points, Abstract domains, Abstract interpretation, Widening},\n}\n\n\n
@article{Wan-Son-Stu_JAR19,\n author = {Wenxi Wang and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Wombit: A Portfolio Bit-Vector Solver Using Word-Level \n\t\tPropagation},\n journal = {Journal of Automated Reasoning},\n volume = {63},\n number = {3},\n pages = {723--762},\n year = {2019},\n doi = {10.1007/s10817-018-9493-1},\n abstract = {We develop an idea originally proposed by Michel and Van \n\t\tHentenryck of how to perform bit-vector constraint propagation \n\t\ton the word level. Most operations are propagated in constant \n\t\ttime, assuming the bit-vector fits in a machine word.\n\t\tIn contrast, bit-vector SMT solvers usually solve bit-vector \n\t\tproblems by (ultimately) bit-blasting, that is, mapping the \n\t\tresulting operations to conjunctive normal form clauses, and \n\t\tusing SAT technology to solve them. Bit-blasting generates \n\t\tintermediate variables which can be an advantage, as these can \n\t\tbe searched on and learnt about. As each approach has \n\t\tadvantages, it makes sense to try to combine them. In this \n\t\tpaper, we describe an approach to bit-vector solving using\n\t\tword-level propagation with learning. We have designed \n\t\talternative word-level propagators to Michel and Van \n\t\tHentenryck's, and evaluated different variants of the approach.\n\t\tWe have also experimented with different approaches to learning\n\t\tand back-jumping in the solver. Based on the insights gained, \n\t\twe have built a portfolio solver, Wombit, which essentially \n\t\textends the STP bit-vector solver. Using machine learning \n\t\ttechniques, the solver makes a judicious up-front decision \n\t\tabout whether to use word-level propagation or fall back on \n\t\tbit-blasting.},\n keywords = {SMT solving, Constraint propagation, Program verification},\n}\n\n\n
@inproceedings{Ala-Mil-Son_ASWEC18,\n author = {Eman Alatawi and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n title = {Symbolic Execution with Invariant Inlay:\n Evaluating the Potential},\n booktitle = {Proceedings of the 25th Australasian Software Engineering\n Conference (ASWEC 2018)},\n pages = {26--30},\n publisher = {IEEE Computing Society},\n year = {2018},\n doi = {10.1109/ASWEC.2018.00012},\n url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/38cf593c-2b7f-5e30-aa42-13b74180ed5d/retrieve},\n abstract = {Dynamic symbolic execution (DSE) is a non-standard execution \n\t\tmechanism which, loosely, executes a program symbolically and, \n\t\tsimultaneously, on concrete input. DSE is attractive because of\n\t\tseveral uses in software engineering, including the generation\n\t\tof test data suites with large coverage relative to test suite\n\t\tsize. However, DSE struggles in the face of execution path \n\t\texplosion, and is often unable to cover certain kinds of \n\t\tdifficult-to-reach program points. Invariant inlay is a \n\t\ttechnique that aims to improve a DSE tool by interspersing code\n\t\twith invariants, generated automatically using off-the-shelf \n\t\ttools for static program analysis. To capitalise fully on a \n\t\tstatic analyzer, invariant inlay first applies certain \n\t\ttestability transformations to the program source. In this \n\t\tpaper we account for how we have evaluated the idea \n\t\texperimentally, in order to determine its usefulness for\n\t\tprograms with complex control flow.},\n keywords = {Symbolic execution, Static analysis},\n}\n\n\n
@Article{Ama-Gan-Gau-Jor-Sch-Son-Stu-Zha_FI18,\n author = {Roberto Amadini and \n\t\tGraeme Gange and \n\t\tFran{\\c{c}}ois Gauthier and\n\t\tAlexander Jordan and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and\n\t\tChenyi Zhang},\n title = {Reference Abstract Domains and Applications to String Analysis},\n journal = {Fundamenta Informaticae},\n volume = {158},\n number = {4},\n pages = {297--326},\n year = {2018},\n doi = {10.3233/FI-2018-1650},\n url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/c17b2e2c-f7ea-57c4-8d54-926629bfa3f7/retrieve},\n abstract = {Abstract interpretation is a well established theory that \n\t\tsupports reasoning about the run-time behaviour of programs.\n\t\tIt achieves tractable reasoning by considering abstractions of\n\t\trun-time states, rather than the states themselves. The chosen\n\t\tset of abstractions is referred to as the abstract domain.\n\t\tWe develop a novel framework for combining (a possibly large \n\t\tnumber of) abstract domains. It achieves the effect of the \n\t\tso-called reduced product without requiring a quadratic number\n\t\tof functions to translate information among abstract domains. A\n\t\tcentral notion is a reference domain, a medium for information\n\t\texchange. Our approach suggests a novel and simpler way to \n\t\tmanage the integration of large numbers of abstract domains.\n\t\tWe instantiate our framework in the context of string analysis.\n\t\tBrowser-embedded dynamic programming languages such as \n\t\tJavaScript and PHP encourage the use of strings as a universal\n\t\tdata type for both code and data values. The ensuing \n\t\tvulnerabilities have made string analysis a focus of much\n\t\trecent research. String analysis tends to combine many \n\t\telementary string abstract domains, each designed to capture \n\t\ta specific aspect of strings. For this instance the set of \n\t\tregular languages, while too expensive to use directly for \n\t\tanalysis, provides an attractive reference domain, enabling \n\t\tthe efficient simulation of reduced products of multiple\n\t\tstring abstract domains.},\n keywords = {String analysis, Abstract interpretation},\n}\n\n\n
@article{Kaf-Gal-Gan-Sch-Son-Stu_TPLP18,\n author = {Bishoksan Kafle and \n\t\tJohn Gallagher and \n\t\tGraeme Gange and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {An Iterative Approach to Precondition Inference Using\n Constrained {Horn} Clauses},\n journal = {Theory and Practice of Logic Programming},\n volume = {18},\n pages = {553--570},\n year = {2018},\n doi = {10.1017/S1471068418000091},\n abstract = {We present a method for automatic inference of conditions on \n\t\tthe initial states of a program that guarantee that the safety \n\t\tassertions in the program are not violated. Constrained Horn \n\t\tclauses (CHCs) are used to model the program and assertions in \n\t\ta uniform way, and we use standard abstract interpretations to \n\t\tderive an over-approximation of the set of unsafe initial states.\n\t\tThe precondition then is the constraint corresponding to the \n\t\tcomplement of that set, under-approximating the set of safe \n\t\tinitial states. This idea of complementation is not new, but \n\t\tprevious attempts to exploit it have suffered from loss of \n\t\tprecision. Here we develop an iterative specialisation algorithm\n\t\tto give more precise, and in some cases optimal safety \n\t\tconditions. The algorithm combines existing transformations, \n\t\tnamely constraint specialisation, partial evaluation and a \n\t\ttrace elimination transformation. The last two of these \n\t\ttransformations perform polyvariant specialisation, leading \n\t\tto disjunctive constraints which improve precision. The \n\t\talgorithm is implemented and tested on a benchmark suite of \n\t\tprograms from the literature in precondition inference and \n\t\tsoftware verification competitions.},\n keywords = {Program verification, Horn clauses, Program transformation, Partial evaluation},\n}\n\n\n
@inproceedings{Ala-Son-Mil_ASE17,\n author = {Eman Alatawi and \n\t\tHarald S{\\o}ndergaard and\n\t\tTim Miller},\n title = {Leveraging Abstract Interpretation for Efficient Dynamic \n\t\tSymbolic Execution},\n editor = {G. Rosu and M. {Di Penta} and T. N. Nguyen},\n booktitle = {Proceedings of the 32nd ACM/IEEE International Conference on \n\t\tAutomated Software Engineering},\n pages = {619--624},\n publisher = {IEEE Computing Society},\n year = {2017},\n doi = {10.1109/ASE.2017.8115672},\n url_Paper = {https://minerva-access.unimelb.edu.au/rest/bitstreams/7d0c6934-364c-5596-b09d-d0e55f344c13/retrieve},\n abstract = {Dynamic Symbolic Execution (DSE) is a technique to automatically\n\t\tgenerate test inputs by executing the program simultaneously \n\t\twith concrete and symbolic values. A key challenge in DSE is \n\t\tscalability, as executing all feasible program paths is not \n\t\tpossible, owing to the possibly exponential or infinite number \n\t\tof program paths. Loops, in particular those where the number \n\t\tof iterations depends on an input of the program, are a source \n\t\tof path explosion. They cause problems because DSE maintains \n\t\tsymbolic values that capture only the data dependencies on\n\t\tsymbolic inputs. This ignores control dependencies, including \n\t\tloop dependencies that depend indirectly on the inputs. We \n\t\tpropose a method to increase the coverage achieved by DSE in \n\t\tthe presence of input-data dependent loops and loop dependent \n\t\tbranches. We combine DSE with abstract interpretation to find \n\t\tindirect control dependencies, including loop and branch \n\t\tindirect dependencies. Preliminary results show that this \n\t\tresults in better coverage, within considerably less time \n\t\tcompared to standard DSE.},\n keywords = {Symbolic execution, Static analysis, Abstract interpretation},\n}\n\n\n
@InProceedings{Ama-Jor-Gan-Gau-Sch-Son-Stu-Zha_TACAS17,\n author = {Roberto Amadini and \n\t\tAlexander Jordan and \n\t\tGraeme Gange and \n\t\tFran{\\c{c}}ois Gauthier and\n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and\n\t\tChenyi Zhang},\n title = {Combining String Abstract Domains for {JavaScript} Analysis: \n\t\tAn Evaluation},\n editor = {A. Legay and T. Margaria},\n booktitle = {TACAS 2017: Proceedings of the 23rd International\n\t\tConference on Tools and Algorithms for the Construction \n\t\tand Analysis of Systems},\n series = {Lecture Notes in Computer Science},\n volume = {10205},\n pages = {41--57},\n publisher = {Springer},\n year = {2017},\n doi = {10.1007/978-3-662-54577-5_3},\n abstract = {Strings play a central role in JavaScript and similar scripting\n\t\tlanguages. Owing to dynamic features such as the eval function\n\t\tand dynamic property access, precise string analysis is a \n\t\tprerequisite for automated reasoning about practically any \n\t\tkind of runtime property. Although the literature presents \n\t\ta considerable number of abstract domains for capturing and \n\t\trepresenting specific aspect of strings, we are not aware of \n\t\ttools that allow flexible combination of string abstract \n\t\tdomains. Indeed, support for string analysis is often confined\n\t\tto a single, dedicated string domain. In this paper we \n\t\tdescribe a framework that allows us to combine multiple string\n\t\tabstract domains for the analysis of JavaScript programs. \n\t\tIt is implemented as an extension of SAFE, an open-source \n\t\tstatic analysis tool. We investigate different combinations \n\t\tof abstract domains that capture various aspects of strings.\n\t\tOur evaluation suggests that a combination of few, simple \n\t\tabstract domains suffice to outperform the precision of \n\t\tstate-of-the-art static analysis tools for JavaScript.},\n keywords = {String solvers, Abstract domains, Abstract interpretation, JavaScript},\n}\n\n\n
@inproceedings{Kaf-Gan-Sch-Son-Stu_SAT17,\n author = {Bishoksan Kafle and \n\t\tGraeme Gange and \n\t\tPeter Schachte and\n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {A {Benders} Decomposition Approach to Deciding Modular\n Linear Integer Arithmetic},\n editor = {S. Gaspers and T. Walsh},\n booktitle = {Theory and Applications of Satisfiability Testing (SAT 2017)},\n series = {Lecture Notes in Computer Science},\n volume = {10491},\n pages = {380--397},\n publisher = {Springer International Publishing},\n year = {2017},\n doi = {10.1007/978-3-319-66263-3_24},\n abstract = {Verification tasks frequently require deciding systems of \n\t\tlinear constraints over modular (machine) arithmetic. \n\t\tExisting approaches for reasoning over modular arithmetic \n\t\tuse bit-vector solvers, or else approximate machine integers \n\t\twith mathematical integers and use arithmetic solvers. \n\t\tNeither is ideal; the first is sound but inefficient, and the \n\t\tsecond is efficient but unsound. We describe a linear encoding \n\t\twhich correctly describes modular arithmetic semantics, \n\t\tyielding an optimistic but sound approach. Our method \n\t\tabstracts the problem with linear arithmetic, but \n\t\tprogressively refines the abstraction when modular semantics \n\t\tis violated. This preserves soundness while exploiting the \n\t\tmostly integer nature of the constraint problem. We present \n\t\ta prototype implementation, which gives encouraging \n\t\texperimental results.},\n keywords = {Program verification, Constraint solving, Machine arithmetic},\n}\n\n\n
@Inproceedings{Men-Son-Ven_AAEE17,\n author = {Antonette Mendoza and\n\t\tHarald S{\\o}ndergaard and \n\t\tAnne Venables},\n title = {Making Sense of Learning Management System's Quiz Analytics\n \tin Understanding Students' Learning Difficulties},\n booktitle = {Proceedings of the 28th Annual Conference of the\n \tAustralasian Association for Engineering Education (AAEE2017)},\n pages = {112--119},\n location = {Manly, New South Wales},\n publisher = {Australasian Association for Engineering Education},\n year = {2017},\n keywords = {Education},\n}\n\n\n
@inproceedings{Ala-Mil-Son_MET16,\n author = {Eman Alatawi and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n title = {Generating Source Inputs for Metamorphic Testing\n Using Dynamic Symbolic Execution},\n booktitle = {MET'16: Proceedings of the First International Workshop on\n Metamorphic Testing},\n pages = {19--25},\n publisher = {ACM},\n year = {2016},\n doi = {10.1145/2896971.2896980},\n isbn = {978-1-4503-4163-9},\n abstract = {Metamorphic testing uses domain-specific properties about\n a program's intended behaviour to alleviate the oracle\n problem. From a given set of source test inputs, a set of\n follow-up test inputs are generated which have some\n relation to the source inputs, and their outputs are\n compared to outputs from the source tests, using\n metamorphic relations. We evaluate the use of an\n automated test input generation technique called dynamic\n symbolic execution (DSE) to generate the source test\n inputs for metamorphic testing. We investigate whether\n DSE increases source-code coverage and fault finding\n effectiveness of metamorphic testing compared to the use\n of random testing, and whether the use of metamorphic\n relations as a supportive technique improves the test\n inputs generated by DSE. Our results show that DSE\n improves the coverage and fault detection rate of\n metamorphic testing compared to random testing using\n significantly smaller test suites, and the use of\n metamorphic relations increases code coverage of both\n DSE and random tests considerably, but the improvement\n in the fault detection rate may be marginal and depends\n on the used metamorphic relations.},\n keywords = {Symbolic execution, Testing},\n}\n\n\n
@inproceedings{Gan-Nav-Sch-Son-Stu_SAS16,\n author = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Exploiting Sparsity in Difference-Bound Matrices},\n editor = {X. Rival},\n booktitle = {Static Analysis:\n Proceedings of the 23rd International Symposium},\n series = {Lecture Notes in Computer Science},\n volume = {9837},\n pages = {189--211},\n publisher = {Springer},\n year = {2016},\n doi = {10.1007/978-3-662-53413-7},\n abstract = {Relational numeric abstract domains are very important in \n\t\tprogram analysis. Common domains, such as Zones and Octagons, \n\t\tare usually conceptualised with weighted digraphs and \n\t\timplemented using difference-bound matrices (DBMs). \n\t\tUnfortunately, though conceptually simple, direct \n\t\timplementations of graph-based domains tend to perform\n\t\tpoorly in practice, and are impractical for analyzing large \n\t\tcode-bases. We propose new DBM algorithms that exploit \n\t\tsparsity and closed operands. In particular, a new \n\t\trepresentation which we call split normal form reduces graph\n\t\tdensity on typical abstract states. We compare the resulting \n\t\timplementation with several existing DBM-based abstract \n\t\tdomains, and show that we can substantially reduce the time\n\t\tto perform full DBM analysis, without sacrificing precision.},\n keywords = {Abstract domains, Abstract interpretation, Static analysis},\n}\n\n\n
@article{Gan-Nav-Sch-Son-Stu_TCS16,\n author = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {A Complete Refinement Procedure for Regular Separability\n of Context-Free Languages},\n journal = {Theoretical Computer Science},\n volume = {625},\n pages = {1--24},\n month = {April},\n year = {2016},\n doi = {10.1016/j.tcs.2016.01.026},\n abstract = {Often, when analyzing the behaviour of systems modelled \n\t\tas context-free languages, we wish to know if two \n\t\tlanguages overlap. To this end, we present a class of \n\t\tsemi-decision procedures for regular separability of \n\t\tcontext-free languages, based on counter-example guided \n\t\tabstraction refinement. We propose two effective\n\t\tinstances of this approach, one that is complete but \n\t\trelatively expensive, and one that is inexpensive and \n\t\tsound, but for which we do not have a completeness proof.\n\t\tThe complete method will prove disjointness whenever the \n\t\tinput languages are regularly separable. Both methods \n\t\twill terminate whenever the input languages overlap. \n\t\tWe provide an experimental evaluation of these procedures,\n\t\tand demonstrate their practicality on a range of \n\t\tverification and language-theoretic instances.},\n keywords = {String analysis, Formal languages, Program verification},\n}\n\n\n
@inproceedings{Gan-Nav-Sch-Son-Stu_VMCAI15,\n author = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {An Abstract Domain of Uninterpreted Functions},\n editor = {B. Jobstmann and K. R. M. Leino},\n booktitle = {Verification, Model Checking and Abstract Interpretation:\n Proceedings of the 17th International Conference},\n series = {Lecture Notes in Computer Science},\n volume = {9583},\n pages = {85--103},\n publisher = {Springer},\n year = {2016},\n isbn = {978-3-662-49121-8},\n doi = {10.1007/978-3-662-49122-5},\n abstract = {We revisit relational static analysis of numeric variables.\n\t\tSuch analyses face two difficulties. First, even inexpensive \n\t\trelational domains scale too poorly to be practical for large \n\t\tcode-bases. Second, to remain tractable they have extremely \n\t\tcoarse handling of non-linear relations. In this paper, we \n\t\tintroduce the subterm domain, a weakly relational abstract \n\t\tdomain for inferring equivalences amongst sub-expressions,\n\t\tbased on the theory of uninterpreted functions. This provides \n\t\tan extremely cheap approach for enriching non-relational \n\t\tdomains with relational information, and enhances precision \n\t\tof both relational and non-relational domains in the presence\n\t\tof non-linear operations. We evaluate the idea in the context \n\t\tof the software verification tool SeaHorn.},\n keywords = {Abstract domains, Abstract interpretation, Static analysis, Program verification},\n}\n\n\n
@inproceedings{Lin-Mil-Son_APSEC16,\n author = {Yude Lin and \n\t\tTim Miller and\n\t\tHarald S{\\o}ndergaard},\n title = {Compositional Symbolic Execution: Incremental Solving Revisited},\n editor = {A. Potanin and G. Murphy},\n booktitle = {APSEC'16: Proceedings of the 23rd Asia-Pacific Software \n\t\tEngineering Conference},\n pages = {273--280},\n location = {Hamilton, New Zealand},\n year = {2016},\n doi = {10.1109/APSEC.2016.046},\n abstract = {Symbolic execution can automatically explore different execution\n \t\tpaths in a system under test and generate tests to precisely \n\t\tcover them. It has two main advantages---being automatic and \n\t\tthorough within a theory---and has many successful applications.\n\t\tThe bottleneck of symbolic execution currently is the \n\t\tcomputation consumption for complex systems. Compositional \n\t\tSymbolic Execution (CSE) introduces a summarisation module to \n\t\teliminate the redundancy in the exploration of repeatedly \n\t\tencountered code. In our previous work, we generalised the \n\t\tsummarisation for any code fragments instead of functions. \n\t\tIn this paper, we transplant this idea onto LLVM with many \n\t\tadditional features, one of them being the use of incremental \n\t\tsolving. We show that the combination of CSE and incremental \n\t\tsolving is mutually beneficial. The obvious weakness of CSE is \n\t\tthe lack of context during summarisation. We discuss the use\n \t\tof assumption-based features, available in modern constraint\n \t\tsolvers, as a way to overcome this problem.},\n keywords = {Symbolic execution, LLVM},\n}\n\n\n
@inproceedings{Wan-Son-Stu_CPAIOR16,\n author = {Wenxi Wang and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {A Bit-Vector Solver with Word-Level Propagation},\n editor = {C.-G. Quimper},\n booktitle = {Integration of AI and OR Techniques in Constraint Programming:\n Proceedings of the 13th International Conference},\n series = {Lecture Notes in Computer Science},\n volume = {9676},\n pages = {374--391},\n publisher = {Springer International Publishing},\n year = {2016},\n isbn = {978-3-319-33953-5},\n doi = {10.1007/978-3-319-33954-2_27},\n abstract = {Reasoning with bit-vectors arises in a variety of \n\t\tapplications in verification and cryptography. \n\t\tMichel and Van Hentenryck have proposed an interesting \n\t\tapproach to bit-vector constraint propagation on the \n\t\tword level. Each of the operations except comparison \n\t\tare constant time, assuming the bit-vector fits in a \n\t\tmachine word. In contrast, bit-vector SMT solvers \n\t\tusually solve bit-vector problems by bit-blasting, \n\t\tthat is, mapping the resulting operations to conjunctive \n\t\tnormal form clauses, and using SAT technology to solve \n\t\tthem. This also means generating intermediate variables \n\t\twhich can be an advantage, as these can be searched on \n\t\tand learnt about. Since each approach has advantages \n\t\tit is important to see whether we can benefit from \n\t\tthese advantages by using a word-level propagation \n\t\tapproach with learning. In this paper we describe an \n\t\tapproach to bit-vector solving using word-level \n\t\tpropagation with learning. We provide alternative \n\t\tword-level propagators to Michel and Van Hentenryck, \n\t\tand give the first empirical evaluation of their approach\n\t\tthat we are aware of. We show that, with careful \n\t\tengineering, a word-level propagation based approach \n\t\tcan compete with (or complement) bit-blasting.},\n keywords = {SMT solving, Constraint propagation, Program verification},\n}\n\n\n
@inproceedings{Ala-Mil-Son_ASWEC15,\n author = {Eman Alatawi and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n title = {Using Metamorphic Testing to Improve Dynamic Symbolic Execution},\n booktitle = {Proceedings of the 24th Australasian Software Engineering\n Conference (ASWEC 2015)},\n pages = {38--47},\n publisher = {IEEE Computing Society},\n year = {2015},\n doi = {10.1109/ASWEC.2015.16},\n abstract = {Dynamic symbolic execution (DSE) is an approach for automatically\n\t\tgenerating test inputs from source code using constraint \n\t\tinformation. It is used in \\emph{fuzzing}: the execution of \n\t\ttests while monitoring for generic properties such as buffer \n\t\toverflows and other security violations. Limitations of DSE for\n\t\tfuzzing are two-fold: (1) only generic properties are checked: \n\t\tmany deviations from specified behaviour are not found; and \n\t\t(2) many programs are not entirely amenable to DSE because they\n\t\tgive rise to hard constraints, so that some parts of a program \n\t\tremain uncovered. In this paper, we discuss how to mitigate \n\t\tthese problems using \\emph{metamorphic testing} (MT). \n\t\tMetamorphic testing uses domain-specific properties about \n\t\tprogram behaviour, relating pairs of inputs to pairs of outputs.\n\t\tFrom a given test suite, \\emph{follow-up tests inputs} are \n\t\tgenerated, and their outputs are compared to outputs from the \n\t\toriginal tests, using \\emph{metamorphic relations}. Our \n\t\thypothesis is that using metamorphic testing increases the \n\t\tability of a DSE test suite to find faults, and that the \n\t\tfollow-up tests execute some previously-uncovered segments. \n\t\tWe have experimented with seven small but non-trivial libraries,\n\t\tcomparing DSE test suites with DSE+MT test suites, demonstrating\n\t\tthat DSE+MT test suites improve coverage marginally, but find \n\t\tmore faults.},\n keywords = {Symbolic execution, Testing},\n}\n\n\n
@inproceedings{Cor-Gan-Nav-Sch-Son-Stu_LOPSTR14,\n author = {J. Robert M. Cornish and \n\t\tGraeme Gange and \n\t\tJorge Navas and\n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Analyzing Array Manipulating Programs by Program Transformation},\n editor = {M. Proietti and H. Seki},\n booktitle = {Logic-Based Program Synthesis and Transformation:\n Proceedings of the 24th International Symposium},\n series = {Lecture Notes in Computer Science},\n volume = {8981},\n pages = {3--20},\n publisher = {Springer International},\n year = {2015},\n doi = {10.1007/978-3-319-17822-6_1},\n abstract = {We explore a transformational approach to the problem \n\t\tof verifying simple array-manipulating programs. \n\t\tTraditionally, verification of such programs requires \n\t\tintricate analysis machinery to reason with universally\n\t\tquantified statements about symbolic array segments, \n\t\tsuch as ``every data item stored in the segment A[i] \n\t\tto A[j] is equal to the corresponding item stored in the\n\t\tsegment B[i] to B[j].'' We define a simple abstract \n\t\tmachine which allows for set-valued variables and we \n\t\tshow how to translate programs with array operations to\n\t\tarray-free code for this machine. For the purpose of \n\t\tprogram analysis, the translated program remains faithful\n\t\tto the semantics of array manipulation. Based on our \n\t\timplementation in LLVM, we evaluate the approach with\n\t\trespect to its ability to extract useful invariants and \n\t\tthe cost in terms of code size.},\n keywords = {Program transformation, Static analysis, Array analysis},\n}\n\n\n
@inproceedings{Dav-Pea-Stu-Son_AAMAS15,\n author = {Toby O. Davies and \n\t\tAdrian R. Pearce and\n Peter J. Stuckey and \n\t\tHarald S{\\o}ndergaard},\n title = {Optimisation and Relaxation for Planning in the Situation\n Calculus},\n editor = {R. H. Bordini and E. Elkind and G. Weiss and P. Yolum},\n booktitle = {Proceedings of the 14th International Conference\n on Autonomous Agents and Multiagent Systems (AAMAS 2015)},\n pages = {1141--1149},\n publisher = {IFAAMAS},\n year = {2015}, \n url_Paper = {http://www.aamas2015.com/en/AAMAS_2015_USB/aamas/p1141.pdf},\n abstract = {The situation calculus can express rich agent behaviours and \n\t\tgoals and facilitates the reduction of complex planning \n\t\tproblems to theorem proving. However, in many planning \n\t\tproblems, solution quality is critically important, and the \n\t\tachievable quality is not necessarily known in advance.\n\t\tExisting \\textsc{Golog} implementations merely search for a \n\t\t\\emph{legal} plan, typically relying on depth-first search to \n\t\tfind an execution. We illustrate where existing strategies \n\t\twill not terminate when quality is considered, and to overcome\n\t\tthis limitation we formally introduce the notion of \\emph{cost}\n\t\tto simplify the search for a solution. The main contribution is\n\t\ta new class of relaxations of the planning problem, termed \n\t\t\\emph{precondition relaxations}, based on Lagrangian relaxation.\n\t\tWe show how this facilitates optimisation of a restricted class\n\t\tof \\textsc{Golog} programs for which plan existence (under a \n\t\tcost budget) is decidable. It allows for tractably computing \n\t\trelaxations to the planning problem and leads to a general, \n\t\t\\emph{blackbox}, approach to optimally solving multi-agent \n\t\tplanning problems without explicit reference to the semantics \n\t\tof interleaved concurrency.},\n keywords = {Planning, Artificial intelligence},\n}\n\n\n
@inproceedings{Gan-Nav-Sch-Son-Stu_NFM15,\n author = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {A Tool for Intersecting Context-Free Grammars and Its\n Applications},\n editor = {K. Havelund and G. Holzmann and R. Joshi},\n booktitle = {NASA Formal Methods:\n Proceedings of the Seventh International Symposium},\n series = {Lecture Notes in Computer Science},\n volume = {9058},\n pages = {422--428},\n publisher = {Springer},\n year = {2015},\n doi = {10.1007/978-3-319-17524-9},\n abstract = {This paper describes a tool for intersecting context-free\n\t\tgrammars. Since this problem is undecidable the tool \n\t\tfollows a refinement-based approach and implements a \n\t\tnovel refinement which is complete for regularly \n\t\tseparable grammars. We show its effectiveness for safety\n\t\tverification of recursive multi-threaded programs.},\n keywords = {String analysis, Formal languages, Program verification},\n}\n\n\n
@article{Gan-Nav-Sch-Son-Stu_TOPLAS15,\n author = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and \n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Interval Analysis and Machine Arithmetic: \n Why Signedness Ignorance Is Bliss},\n journal = {ACM Transactions on Programming Languages and Systems},\n volume = {37},\n number = {1},\n pages = {1:1--1:35},\n month = {january},\n year = {2015},\n doi = {http://dx.doi.org/10.1145/2651360},\n abstract = {The most commonly used integer types have fixed bit-width,\n\t\tmaking it possible for computations to ``wrap around'',\n\t\tand many programs depend on this behaviour. Yet much \n\t\twork to date on program analysis and verification of \n\t\tinteger computations treats integers as having infinite \n\t\tprecision, and most analyses that do respect fixed width\n\t\tlose precision when overflow is possible. We present a \n\t\tnovel integer interval abstract domain that correctly \n\t\thandles wrap-around. The analysis is signedness agnostic.\n\t\tBy treating integers as strings of bits, only considering\n\t\tsignedness for operations that treat them differently, \n\t\twe produce precise, correct results at a modest cost in \n\t\texecution time.},\n keywords = {Abstract domains, Abstract interpretation, Interval analysis, Machine arithmetic, C analysis, LLVM},\n}\n\n\n
@article{Gan-Nav-Sch-Son-Stu_TPLP15,\n author = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Horn Clauses As an Intermediate Representation for \n Program Analysis and Transformation},\n journal = {Theory and Practice of Logic Programming},\n volume = {15},\n number = {4--5},\n pages = {526--542},\n year = {2015},\n doi = {http://dx.doi.org/10.1017/S1471068415000204},\n abstract = {Many recent analyses for conventional imperative programs \n\t\tbegin by transforming programs into logic programs, \n\t\tcapitalising on existing LP analyses and simple LP \n\t\tsemantics. We propose using logic programs as an \n\t\tintermediate program representation throughout the \n\t\tcompilation process. With restrictions ensuring \n\t\tdeterminism and single-modedness, a logic program can \n\t\teasily be transformed to machine language or other \n\t\tlow-level language, while maintaining the simple semantics\n \t\tthat makes it suitable as a language for program analysis\n\t\tand transformation. We present a simple LP language that\n\t\tenforces determinism and single-modedness, and show that\n\t\tit makes a convenient program representation for analysis\n\t\tand transformation.},\n keywords = {Static analysis, Intermediate representations, Horn clauses, Logic programming},\n}\n\n\n
@inproceedings{Lin-Mil-Son_ASWEC15,\n author = {Yude Lin and \n\t\tTim Miller and \n\t\tHarald S{\\o}ndergaard},\n title = {Compositional Symbolic Execution Using Fine-Grained Summaries},\n booktitle = {Proceedings of the 24th Australasian Software Engineering\n Conference (ASWEC 2015)},\n pages = {213--222},\n publisher = {IEEE Computing Society},\n year = {2015},\n doi = {10.1109/ASWEC.2015.32},\n url_Paper = {http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=7365810},\n abstract = {Compositional symbolic execution has been proposed as a way to \n\t\tincrease the efficiency of symbolic execution. Essentially, \n\t\twhen a function is symbolically executed, a \\emph{summary} of \n\t\tthe path that was executed is stored. This summary records the \n\t\tprecondition and postcondition of the path, and on subsequent \n\t\tcalls that satisfy that precondition, the corresponding \n\t\tpostcondition can be returned instead of executing the function\n\t\tagain. However, using functions as the unit of summarisation \n\t\tleaves the symbolic execution tool at the mercy of a program \n\t\tdesigner, essentially resulting in an arbitrary summarisation \n\t\tstrategy. In this paper, we explore the use of \n\t\t\\emph{fine-grained} summaries, in which blocks \\emph{within} \n\t\tfunctions are summarised. We propose three types of \n\t\tsummarisation and demonstrate how to generate these. At such a \n\t\tfine-grained level, symbolic execution of a path effectively \n\t\tbecomes the concatenation of the summaries along that path. \n\t\tUsing a prototype symbolic execution tool, we perform a \n\t\tpreliminary experimental evaluation of our summary approaches, \n\t\tdemonstrating that they can improve the speed of symbolic \n\t\texecution by reducing the number of calls sent to the \n\t\tunderlying constraint solver.},\n keywords = {Symbolic execution},\n}\n\n\n
@inproceedings{Dav-Pea-Stu-Son_ICAPS14,\n author = {Toby O. Davies and \n\t\tAdrian R. Pearce and\n Peter J. Stuckey and \n\t\tHarald S{\\o}ndergaard},\n title = {Fragment-Based Planning Using Column Generation},\n editor = {S. Chien and M. Do and A. Fern and W. Ruml},\n booktitle = {Proceedings of the 24th International Conference\n on Automated Planning and Scheduling},\n pages = {83--91},\n publisher = {AAAI},\n year = {2014}, \n url_Paper = {www.aaai.org/ocs/index.php/ICAPS/ICAPS14/paper/view/7907/8015},\n doi = {10.1609/icaps.v24i1.13628},\n abstract = {We introduce a novel algorithm for temporal planning in \n\t\t\\textsc{Golog} using shared resources, and describe the \n\t\tBulk Freight Rail Scheduling Problem, a motivating example\n\t\tof such a temporal domain. We use the framework of column \n\t\tgeneration to tackle complex resource constrained temporal \n\t\tplanning problems that are beyond the scope of current planning\n\t\ttechnology by combining: the global view of a linear programming\n\t\trelaxation of the problem; the strength of search in finding \n\t\taction sequences; and the domain knowledge that can be encoded \n\t\tin a \\textsc{Golog} program. We show that our approach \n\t\tsignificantly outperforms state-of-the-art temporal planning \n\t\tand constraint programming approaches in this domain, in \n\t\taddition to existing temporal \\textsc{Golog} implementations.\n\t\tWe also apply our algorithm to a temporal variant of \n\t\tblocks-world where our decomposition speeds proof of optimality\n\t\tsignificantly compared to other anytime algorithms. We discuss \n\t\tthe potential of the underlying algorithm being applicable to\n\t\tSTRIPS planning, with further work.},\n keywords = {Planning, Artificial intelligence},\n}\n\n\n
@article{Gan-Hor-Nai-Son_TCAD14,\n author = {Graeme Gange and \n\t\tBenjamin Horsfall and \n\t\tLee Naish and\n Harald S{\\o}ndergaard},\n title = {Four-Valued Reasoning and Cyclic Circuits},\n journal = {IEEE Transactions on Computer-Aided Design of Integrated\n Circuits and Systems},\n volume = {33},\n number = {7},\n pages = {1003--1016},\n month = {july},\n year = {2014},\n url_Paper = {http://dx.doi.org/10.1109/TCAD.2014.2304176},\n doi = {10.1109/TCAD.2014.2304176},\n abstract = {Allowing cycles in a logic circuit can be advantageous,\n\t\tfor example, by reducing the number of gates required to \n\t\timplement a given Boolean function, or set of functions.\n\t\tHowever, a cyclic circuit may easily be ill-behaved.\n\t\tFor instance it may have some output wire oscillate \n\t\tinstead of reaching a steady state. Propositional \n\t\tthree-valued logic has long been used in tests for\n\t\twell-behaviour of cyclic circuits: a symbolic evaluation \n\t\tmethod known as ternary analysis provides one criterion \n\t\tfor well-behaviour under certain assumptions about wire \n\t\tand gate delay. We revisit ternary analysis and argue \n\t\tfor the use of four truth values. The fourth truth \n\t\tvalue allows for the distinction of ``undefined'' and\n\t\t``underspecified'' behaviour. Ability to under-specify \n\t\tbehaviour is useful, because, in a quest for smaller \n\t\tcircuits, an implementor can capitalize on degrees of \n\t\tfreedom offered in the specification. Moreover, a fourth\n\t\ttruth value is attractive because, rather than complicating\n\t\t(ternary) circuit analysis, it introduces a pleasant \n\t\tsymmetry, in the form of contra-duality, as well as \n\t\tproviding a convenient framework for manipulating\n\t\tspecifications. We use this symmetry to provide fixed \n\t\tpoint results that clarify how two-, three-, and four-valued\n\t\tanalyses are related, and to explain some observations about\n\t\tternary analysis.},\n keywords = {Fixed points, Many-valued logic, Logic circuits, Circuit synthesis},\n}\n\n\n
@misc{Gan-Nav-Sch-Son-Stu_ArXiv14,\n author = {Graeme Gange and \n\t\tJorge Navas and \n\t\tPeter Schachte and\n Harald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {A Partial-Order Approach to Array Content Analysis},\n note = {ArXiv preprint {https://arxiv.org/abs/1408.1754}},\n year = {2014},\n doi = {10.48550/arxiv.1408.1754},\n abstract = {We present a parametric abstract domain for array content \n\t\tanalysis. The method maintains invariants for contiguous \n\t\tregions of the array, similar to the methods of Gopan, Reps \n\t\tand Sagiv, and of Halbwachs and Peron. However, it introduces \n\t\ta novel concept of an array content graph, avoiding the need \n\t\tfor an up-front factorial partitioning step. The resulting \n\t\tanalysis can be used with arbitrary numeric relational \n\t\tabstract domains; we evaluate the domain on a range of \n\t\tarray manipulating program fragments.},\n keywords = {Abstract domains, Array analysis},\n}\n\n\n
@article{Gan-Son-Stu_TODAES14,\n author = {Graeme Gange and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Synthesizing Optimal Switching Lattices},\n journal = {ACM Transactions on Design Automation of Electronic Systems},\n volume = {20},\n number = {1},\n pages = {6:1--6:14},\n month = {november},\n year = {2014},\n doi = {10.1145/2661632},\n abstract = {The use of nanoscale technologies to create electronic devices \n\t\thas revived interest in the use of regular structures for \n\t\tdefining complex logic functions. One such structure is the \n\t\tswitching lattice, a two-dimensional lattice of four-terminal \n\t\tswitches. We show how to directly construct switching lattices \n\t\tof polynomial size from arbitrary logic functions; we also show\n\t\thow to synthesize minimal-sized lattices by translating the \n\t\tproblem to the satisfiability problem for a restricted class of\n\t\tquantified Boolean formulas. The synthesis method is an anytime\n\t\talgorithm which uses modern SAT solving technology and \n\t\tdichotomic search. It improves considerably on an earlier \n\t\tproposal for creating switching lattices for arbitrary logic \n\t\tfunctions.},\n keywords = {Boolean logic, Logic circuits, Circuit synthesis},\n}\n\n\n
@article{Nai-Son_TPLP14,\n author = {Lee Naish and \n\t\tHarald S{\\o}ndergaard},\n title = {Truth versus Information in Logic Programming},\n journal = {Theory and Practice of Logic Programming},\n volume = {14},\n number = {6},\n pages = {803--840},\n month = {november},\n year = {2014},\n url_Paper = {http://journals.cambridge.org/repo_A89XyMV9},\n doi = {10.1017/S1471068413000069},\n abstract = {The semantics of logic programs was originally described in \n\t\tterms of two-valued logic. Soon, however, it was realised that\n\t\tthree-valued logic had some natural advantages, as it provides \n\t\tdistinct values not only for truth and falsehood, but also for \n\t\t``undefined''. The three-valued semantics proposed by Fitting \n\t\tand by Kunen are closely related to what is computed by a \n\t\tlogic program, the third truth value being associated with \n\t\tnon-termination. A different three-valued semantics, proposed \n\t\tby Naish, shared much with those of Fitting and Kunen but \n\t\tincorporated allowances for programmer intent, the third truth \n\t\tvalue being associated with underspecification. Naish used an \n\t\t(apparently) novel ``arrow'' operator to relate the intended\n\t\tmeaning of left and right sides of predicate definitions. In \n\t\tthis paper we suggest that the additional truth values of \n\t\tFitting/Kunen and Naish are best viewed as duals. We use \n\t\tBelnap's four-valued logic, also used elsewhere by Fitting, \n\t\tto unify the two three-valued approaches. The truth values are \n\t\tarranged in a bilattice which supports the classical ordering \n\t\ton truth values as well as the ``information ordering''. We \n\t\tnote that the ``arrow'' operator of Naish (and our four-valued\n\t\textension) is essentially the information ordering, whereas the\n\t\tclassical arrow denotes the truth ordering. This allows us to \n\t\tshed new light on many aspects of logic programming, including \n\t\tprogram analysis, type and mode systems, declarative debugging \n\t\tand the relationships between specifications and programs, and \n\t\tsuccessive executions states of a program.},\n keywords = {Logic programming, Semantics, Many-valued logic},\n}\n\n\n
@Inproceedings{Dav-Sch-Som-Son_MSPC13,\n author = {Matthew Davis and \n\t\tPeter Schachte and \n\t\tZoltan Somogyi and\n\t\tHarald S{\\o}ndergaard},\n title = {A Low Overhead Method for Recovering Unused Memory \n\t\tInside Regions},\n booktitle = {Proceedings of the 2013 ACM Workshop on Memory Systems \n\t\tPerformance and Correctness (MSPC 2013)},\n publisher = {ACM Press},\n year = {2013},\n doi = {10.1145/2492408.2492415},\n abstract = {Automating memory management improves both resource safety\n\t\tand programmer productivity. One approach, region-based\n\t\tmemory management (RBMM), applies compile-time reasoning\n\t\tto identify points in a program at which memory can be \n\t\tsafely reclaimed. The main advantage of RBMM over \n\t\ttraditional garbage collection (GC) is the avoidance of \n\t\texpensive runtime analysis, which makes reclaiming memory\n\t\tmuch faster. On the other hand, GC requires no static \n\t\tanalysis, and, operating at runtime, can have \n\t\tsignificantly more accurate information about object \n\t\tlifetimes. In this paper we propose a hybrid system\n\t\tthat seeks to combine the advantages of both methods\n\t\twhile avoiding the overheads that previous hybrid systems\n\t\tincurred. Our system can also reclaim array segments \n\t\twhose elements are no longer reachable.},\n keywords = {Memory management, Compilation, Go},\n}\n\n\n
@InProceedings{Gan-Nav-Sch-Son-Stu_SAS13,\n author = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Abstract Interpretation over Non-Lattice Abstract Domains},\n editor = {F. Logozzo and M. F{\\"a}hndrich},\n booktitle = {Static Analysis},\n series = {Lecture Notes in Computer Science},\n volume = {7935},\n pages = {6--24},\n publisher = {Springer},\n year = {2013},\n doi = {10.1007/978-3-642-38856-9_3},\n abstract = {The classical theoretical framework for static analysis of\n\t\tprograms is abstract interpretation. Much of the power and \n\t\telegance of that framework rests on the assumption that an \n\t\tabstract domain is a lattice. Nonetheless, and for good reason,\n\t\tthe literature on program analysis provides many examples of \n\t\tnon-lattice domains, including non-convex numeric domains. \n\t\tThe lack of domain structure, however, has negative \n\t\tconsequences, both for the precision of program analysis and \n\t\tfor the termination of standard Kleene iteration. In this paper \n\t\twe explore these consequences and present general remedies.},\n keywords = {Fixed points, Abstract domains, Lattice theory},\n}\n\n\n
@Article{Gan-Nav-Sch-Son-Stu_TPLP13,\n author = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter Schachte and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Failure Tabled Constraint Logic Programming by Interpolation},\n journal = {Theory and Practice of Logic Programming},\n volume = {13},\n number = {4--5},\n pages = {593--607},\n year = {2013},\n doi = {10.1017/S1471068413000379},\n abstract = {We present a new execution strategy for constraint logic\n\t\tprograms called \\emph{Failure Tabled CLP}. Similarly to\n\t\t\\emph{Tabled CLP} our strategy records certain derivations\n\t\tin order to prune further derivations. However, our method\n\t\tonly learns from \\emph{failed derivations}. This allows us\n\t\tto compute \\emph{interpolants} rather than \n\t\t\\emph{constraint projection} for generation of \n\t\t\\emph{reuse conditions}. As a result, our technique can\n\t\tbe used where projection is too expensive or does not \n\t\texist. Our experiments indicate that Failure Tabling can\n\t\tspeed up the execution of programs with many redundant \n\t\tfailed derivations as well as achieve termination in the\n\t\tpresence of infinite executions.},\n keywords = {Logic programming, Interpolants},\n}\n\n\n
@InProceedings{Gan-Nav-Stu-Son-Sch_TACAS13,\n author = {Graeme Gange and \n\t\tJorge A. Navas and \n\t\tPeter J. Stuckey and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter Schachte},\n title = {Unbounded Model-Checking with Interpolation for Regular\n Language Constraints},\n editor = {N. Piterman and S. Smolka},\n booktitle = {TACAS 2013: Proceedings of the 19th International\n\t\tConference on Tools and Algorithms for the Construction \n\t\tand Analysis of Systems},\n series = {Lecture Notes in Computer Science},\n volume = {7795},\n pages = {277--291},\n publisher = {Springer},\n year = {2013},\n doi = {10.1007/978-3-642-36742-7_20},\n abstract = {We present a decision procedure for the problem of, \n\t\tgiven a set of regular expressions $R_1, \\ldots, R_n$,\n\t\tdetermining whether $R = R_1 \\cap \\cdots \\cap R_n$ is \n\t\tempty. Our solver, \\sys{revenant}, finitely unrolls \n\t\tautomata for $R_1, \\ldots, R_n$, encoding each as a \n\t\tset of propositional constraints. If a SAT solver \n\t\tdetermines satisfiability then $R$ is non-empty. Otherwise\n\t\tour solver uses unbounded model checking techniques \n\t\tto extract an interpolant from the bounded proof. \n\t\tThis interpolant serves as an overapproximation of $R$. \n\t\tIf the solver reaches a fixed-point with the constraints\n\t\tremaining unsatisfiable, it has proven $R$ to be empty.\n\t\tOtherwise, it increases the unrolling depth and repeats.\n\t\tWe compare \\textsc{revenant} with other state-of-the-art \n\t\tstring solvers. Evaluation suggests that it behaves\n\t\tbetter for constraints that express the intersection of\n\t\tsets of regular languages, a case of interest in the \n\t\tcontext of verification.},\n keywords = {Static analysis, Formal languages, Model checking, Interpolants, String constraints},\n}\n\n\n
@InProceedings{Gan-Son-Stu-Sch_CADE13,\n author = {Graeme Gange and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and \n\t\tPeter Schachte},\n title = {Solving Difference Constraints over Modular Arithmetic},\n editor = {M. P. Bonacina},\n booktitle = {CADE 2013: Proceedings of the 24th International\n\t\tConference on Automated Deduction},\n series = {Lecture Notes in Artificial Intelligence},\n volume = {7898},\n pages = {215--230},\n publisher = {Springer},\n year = {2013},\n doi = {10.1007/978-3-642-38574-2_15},\n abstract = {Difference logic is commonly used in program verification \n\t\tand analysis. In the context of fixed-precision integers,\n\t\tas used in assembly languages for example, the use of \n\t\tclassical difference logic is unsound. We study the problem\n\t\tof deciding difference constraints in the context of modular\n\t\tarithmetic and show that it is strongly NP-complete. We \n\t\tdiscuss the applicability of the Bellman-Ford algorithm and \n\t\trelated shortest-distance algorithms to the context of modular\n\t\tarithmetic. We explore two approaches, namely a complete \n\t\tmethod implemented using SMT technology and an incomplete \n\t\tfixpoint-based method, and the two are experimentally \n\t\tevaluated. The incomplete method performs considerably \n\t\tfaster while maintaining acceptable accuracy on a range\n\t\tof instances.},\n keywords = {Constraint solving, Program verification, Static analysis, Machine arithmetic, Fixed points},\n}\n\n\n
@Inproceedings{Dav-Sch-Som-Son_MSPC12,\n author = {Matthew Davis and \n\t\tPeter Schachte and \n\t\tZoltan Somogyi and\n\t\tHarald S{\\o}ndergaard},\n title = {Towards Region Based Memory Management for {Go}},\n booktitle = {Proceedings of the 2012 ACM Workshop on Memory Systems \n\t\tPerformance and Correctness (MSPC 2012)},\n pages = {58--67},\n publisher = {ACM Press},\n year = {2012},\n doi = {10.1145/2247684.2247695},\n abstract = {Region-based memory management aims to lower the cost of \n\t\tdeallocation through bulk processing: instead of \n\t\trecovering the memory of each object separately, it\n\t\trecovers the memory of a region containing many objects.\n\t\tIt relies on static analysis to determinethe set of \n\t\tmemory regions needed by a program, the program points at\n\t\twhich each region should be created and removed, and, for\n\t\teach memory allocation, the region that should supply the\n\t\tmemory. The concurrent language Go has features that pose\n\t\tinteresting challenges for this analysis. We present a \n\t\tnovel design for region-based memory management for Go,\n\t\tcombining static analysis, to guide region creation, and\n\t\tlightweight runtime bookkeeping, to help control \n\t\treclamation. The main advantage of our approach is that\n\t\tit greatly limits the amount of re-work that must be done\n\t\tafter eachchange to the program source code, making our\n\t\tapproach more practical than existing RBMM systems. Our\n\t\tprototype implementation covers most of the sequential\n\t\tfragment of Go, and preliminary results are encouraging.},\n keywords = {Memory management, Go},\n}\n\n\n
@InProceedings{Nai-Son-Hor_CATS12,\n author = {Lee Naish and \n\t\tHarald S{\\o}ndergaard and \n\t\tBenjamin Horsfall},\n title = {Logic Programming: From Underspecification to Undefinedness},\n editor = {J. Mestre},\n booktitle = {Theory of Computing 2012},\n series = {Conferences in Research and Practice in Information Technology},\n volume = {128},\n pages = {49--58},\n year = {2012},\n url = {http://crpit.com/Vol128.html},\n abstract = {The semantics of logic programs was originally described in \n\t\tterms of two-valued logic. Soon, however, it was realised that \n\t\tthree-valued logic had some natural advantages, as it provides \n\t\tdistinct values not only for truth and falsehood, but also for\n\t\t"undefined". The three-valued semantics proposed by Fitting and\n\t\tby Kunen are closely related to what is computed by a logic \n\t\tprogram, the third truth value being associated with \n\t\tnon-termination. A different three-valued semantics, proposed \n\t\tby Naish, shared much with those of Fitting and Kunen but \n\t\tincorporated allowances for programmer intent, the third truth \n\t\tvalue being associated with underspecification. Naish used an \n\t\t(apparently) novel ``arrow'' operator to relate the intended \n\t\tmeaning of left and right sides of predicate definitions. In \n\t\tthis paper we suggest that the additional truth values of\n\t\tFitting/Kunen and Naish are best viewed as duals. We use \n\t\tFitting's later four-valued approach to unify the two \n\t\tthree-valued approaches. The additional truth value has very \n\t\tlittle affect on the Fitting three-valued semantics, though it\n\t\tcan be useful when finding approximations to this semantics for\n\t\tprogram analysis. For the Naish semantics, the extra truth \n\t\tvalue allows intended interpretations to be more expressive, \n\t\tallowing us to verify and debug a larger class of programs. \n\t\tWe also explain that the "arrow" operator of Naish (and our \n\t\tfour-valued extension) is essentially the information ordering.\n\t\tThis sheds new light on the relationships between \n\t\tspecifications and programs, and successive executions states \n\t\tof a program.},\n keywords = {Logic programming, Semantics, Many-valued logic},\n}\n\n\n
@InProceedings{Nav-Sch-Son-Stu_APLAS12,\n author = {Jorge A. Navas and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and\n\t\tPeter J. Stuckey},\n title = {Signedness-Agnostic Program Analysis: \n\t\tPrecise Integer Bounds for Low-Level Code},\n editor = {R. Jhala and A. Igarashi},\n booktitle = {APLAS 2012: Proceedings of the 10th Asian Symposium \n \ton Programming Languages and Systems},\n series = {Lecture Notes in Computer Science},\n volume = {7705},\n pages = {115--130},\n publisher = {Springer},\n year = {2012},\n doi = {10.1007/978-3-642-35182-2_9},\n abstract = {Many compilers target common back-ends, thereby \n\t\tavoiding the need to implement the same analyses \n\t\tfor many different source languages. This has led\n\t\tto interest in static analysis of LLVM code. \n\t\tIn LLVM (and similar languages) most signedness \n\t\tinformation associated with variables has been \n\t\tcompiled away. Current analyses of LLVM code tend\n\t\tto assume that either all values are signed or all\n\t\tare unsigned (except where the code specifies the \n\t\tsignedness). We show how program analysis can \n\t\tsimultaneously consider each bit-string to be both \n\t\tsigned and unsigned, thus improving precision, \n\t\tand we implement the idea for the specific case of \n\t\tinteger bounds analysis. Experimental evaluation\n\t\tshows that this provides higher precision at little\n\t\textra cost. Our approach turns out to be beneficial\n\t\teven when all signedness information is available,\n\t\tsuch as when analysing C or Java code.},\n keywords = {Abstract domains, Abstract interpretation, Interval analysis, Machine arithmetic, C analysis, LLVM},\n}\n\n\n
@article{Son-Mul_CSE12,\n author = {Harald S{\\o}ndergaard and \n\t\tRaoul Mulder},\n title = {Collaborative Learning through Formative Peer Review:\n\t\tPedagogy, Programs and Potential},\n journal = {Computer Science Education},\n volume = {22},\n number = {4},\n pages = {343--367},\n year = {2012},\n doi = {10.1080/08993408.2012.728041},\n abstract = {We examine student peer review, with an emphasis on \n\t\tformative practice and collaborative learning, rather \n\t\tthan peer grading. Opportunities to engage students in \n\t\tsuch formative peer assessment are growing, as a range \n\t\tof online tools become available to manage and simplify \n\t\tthe process of administering student peer review. \n\t\tWe consider whether pedagogical requirements for student\n\t\tpeer review are likely to be discipline-specific, taking\n\t\tcomputer science and software engineering as an example.\n\t\tWe then summarise what attributes are important for a \n\t\tmodern generic peer review tool, and classify tools \n\t\taccording to four prevalent emphases, using currently \n\t\tavailable, mature tools to illustrate each. We conclude \n\t\tby identifying some gaps in current understanding of \n\t\tformative peer review, and discuss how online tools for \n\t\tstudent peer review can help create opportunities to \n\t\tanswer some of these questions.},\n keywords = {Education, Peer review},\n}\n\n\n
@article{Hen-Sch-Son-Whi_CJTCS10,\n author = {Kevin Henshall and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and\n\t\tLeigh Whiting},\n title = {An Algorithm for Affine Approximation of Binary Decision \n\t\tDiagrams},\n journal = {Chicago Journal of Theoretical Computer Science},\n volume = {2010},\n number = {11},\n publisher = {University of Chicago},\n month = {June},\n year = {2010},\n doi = {10.4086/cjtcs.2010.011},\n abstract = {This paper is concerned with the problem of Boolean approximation\n\t\tin the following sense: given a Boolean function class and an\n\t\tarbitrary Boolean function, what is the function's best proxy in\n\t\tthe class? Specifically, what is its strongest logical \n\t\tconsequence (or \\emph{envelope}) in the class of \\emph{affine} \n\t\tBoolean functions. We prove various properties of affine Boolean\n\t\tfunctions and their representation as ROBDDs. Using these \n\t\tproperties, we develop an ROBDD algorithm to find the affine \n\t\tenvelope of a Boolean function.},\n keywords = {Boolean logic, Boolean approximation, Knowledge compilation},\n}\n\n\n
@Inproceedings{Kin-Son_VMCAI10,\n author = {Andy King and \n\t\tHarald S{\\o}ndergaard},\n title = {Automatic Abstraction for Congruences},\n editor = {G. Barthe and M. Hermenegildo},\n booktitle = {Verification, Model Checking and Abstract Interpretation},\n series = {Lecture Notes in Computer Science},\n volume = {5944},\n pages = {197--213},\n publisher = {Springer},\n year = {2010},\n doi = {10.1007/978-3-642-11319-2_16},\n abstract = {One approach to verifying bit-twiddling algorithms is to derive \n\t\tinvariants between the bits that constitute the variables of a \n\t\tprogram. Such invariants can often be described with systems\n\t\tof congruences where in each equation \n\t\t$\\vec{c} \\cdot \\vec{x} = d \\mod m$, $m$ is a power of two, \n\t\t$\\vec{c}$ is a vector of integer coefficients, and $\\vec{x}$ is\n\t\ta vector of propositional variables (bits). Because of the \n\t\tlow-level nature of these invariants and the large number of\n\t\tbits that are involved, it is important that the transfer \n\t\tfunctions can be derived automatically. We address this problem,\n\t\tshowing how an analysis for bit-level congruence relationships \n\t\tcan be decoupled into two parts:\n\t\t(1) a SAT-based abstraction (compilation) step which can be \n\t\tautomated, and\n\t\t(2) an interpretation step that requires no SAT-solving.\n\t\tWe exploit triangular matrix forms to derive transfer functions\n\t\tefficiently, even in the presence of large numbers of bits. \n\t\tFinally we propose program transformations that improve the \n\t\tanalysis results.},\n keywords = {Congruence analysis, Abstract interpretation, Abstract domains, Boolean logic},\n}\n\n\n
@Article{Sch-Son-Whi-Hen_AIJ10,\n author = {Peter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tLeigh Whiting and\n\t\tKevin Henshall},\n title = {Information Loss in Knowledge Compilation: \n\t\tA Comparison of {Boolean} Envelopes},\n journal = {Artificial Intelligence},\n volume = {174},\n number = {9--10},\n pages = {585--596},\n year = {2010},\n doi = {10.1016/j.artint.2010.03.003},\n abstract = {Since Selman and Kautz's seminal work on the use of Horn \n\t\tapproximation to speed up the querying of knowledge bases, \n\t\tthere has been great interest in Boolean approximation for \n\t\tAI applications. There are several Boolean classes with \n\t\tdesirable computational properties similar to those of the\n\t\tHorn class. The class of affine Boolean functions, for example,\n\t\thas been proposed as an interesting alternative to Horn for \n\t\tknowledge compilation. To investigate the trade-offs between\n\t\tprecision and efficiency in knowledge compilation, we compare, \n\t\tanalytically and empirically, four well-known Boolean classes,\n\t\tand their combinations, for ability to preserve information.\n\t\tWe note that traditional evaluation which explores unit-clause\n\t\tconsequences of random hard 3-CNF formulas does not tell the \n\t\tfull story, and we complement that evaluation with experiments \n\t\tbased on a variety of assumptions about queries and the \n\t\tunderlying knowledge base.},\n keywords = {Boolean logic, Boolean approximation, Knowledge compilation, Artificial intelligence},\n}\n\n\n
@Inproceedings{Cha-Ste-Son-Had_REES09,\n author = {Rosemary Chang and\n\t\tLinda Stern and\n\t\tHarald S{\\o}ndergaard and\n\t\tRoger Hadgraft},\n title = {Places for Learning Engineering: \n\t\tA Preliminary Report on Informal Learning Spaces},\n booktitle = {Proceedings of the 2009 Research in Engineering Education \n\t\tSymposium},\n location = {Palm Cove, Queensland},\n year = {2009},\n keywords = {Education},\n}\n\n\n
@Inproceedings{Han-Sch-Son_RV09,\n author = {Trevor Hansen and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title = {State Joining and Splitting for the Symbolic Execution \n\t\tof Binaries},\n editor = {S. Bensalem and D. A. Peled},\n booktitle = {Runtime Verification},\n series = {Lecture Notes in Computer Science},\n volume = {5779},\n pages = {76--92},\n publisher = {Springer},\n year = {2009},\n doi = {10.1007/978-3-642-04694-0_6},\n abstract = {Symbolic execution can be used to explore the possible run-time \n\t\tstates of a program. It makes use of a concept of ``state'' \n\t\twhere a variable's value has been replaced by an expression \n\t\tthat gives the value as a function of program input. \n\t\tAdditionally, a state can be equipped with a summary of \n\t\tcontrol-flow history: a ``path constraint'' keeps track of the\n\t\tclass of inputs that would have caused the same flow of control.\n\t\tBut even simple programs can have trillions of paths, so a \n\t\tpath-by-path analysis is impractical. We investigate a \n\t\t``state joining'' approach to making symbolic execution more \n\t\tpractical and describe the challenges of applying state joining\n\t\tto the analysis of unmodified Linux x86 executables. The results\n\t\tso far are mixed, with good results for some code. On other\n\t\texamples, state joining produces cumbersome constraints that \n\t\tare more expensive to solve than those generated by normal \n\t\tsymbolic execution.},\n keywords = {Symbolic execution},\n}\n\n\n
@InProceedings{Hen-Sch-Son-Whi_CATS09,\n author = {Kevin Henshall and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard and \n\t\tLeigh Whiting},\n title = {Boolean Affine Approximation with Binary Decision Diagrams},\n editor = {R. Downey and P. Manyem},\n booktitle = {Theory of Computing 2009},\n series = {Conferences in Research and Practice in Information Technology},\n volume = {94},\n pages = {121--129},\n year = {2009},\n url = {http://crpit.com/Vol94.html},\n abstract ={Selman and Kautz's work on knowledge compilation has\n\t\testablished how approximation (strengthening and/or\n\t\tweakening) of a propositional knowledge-base can be used to\n\t\tspeed up query processing, at the expense of completeness.\n\t\tIn the classical approach, the knowledge-base is assumed\n\t\tto be presented as a propositional formula in conjunctive\n\t\tnormal form (CNF), and Horn functions are used to over-\n\t\tand under-approximate it (in the hope that many queries\n\t\tcan be answered efficiently using the approximations only).\n\t\tHowever, other representations are possible, and functions\n\t\tother than Horn can be used for approximations, as long\n\t\tas they have deduction-computational properties similar to\n\t\tthose of the Horn functions. Zanuttini has suggested that\n\t\tthe class of affine Boolean functions would be especially\n\t\tuseful in knowledge compilation and has presented various\n\t\taffine approximation algorithms. Since CNF is awkward\n\t\tfor presenting affine functions, Zanuttini considers both\n\t\ta sets-of-models representation and the use of modulo 2\n\t\tcongruence equations. Here we consider the use of reduced\n\t\tordered binary decision diagrams (ROBDDs), a representation\n\t\twhich is more compact than the sets of models and which\n\t\t(unlike modulo 2 congruences) can express any source\n\t\tknowledge-base. We present an ROBDD algorithm to find\n\t\tstrongest affine upper-approximations of a Boolean function\n\t\tand we argue its correctness.},\n keywords = {Boolean logic, Boolean approximation, Knowledge compilation, ROBDDs},\n}\n\n\n
@InProceedings{Son_ITiCSE09,\n author = {Harald S{\\o}ndergaard},\n title = {Learning From and With Peers:\n\t\tThe Different Roles of Student Peer Reviewing},\n booktitle = {Proceedings of the 14th Annual SIGCSE/SIGCUE Conference on\n\t\tInnovation and Technology in Computer Science Education},\n pages = {31--35},\n publisher = {Association for Computing Machinery},\n address = {New York, NY},\n year = {2009},\n location = {Paris, France},\n doi = {10.1145/3263191},\n abstract = {There are many different approaches to student peer assessment.\n\t\tIn this paper I lay out the pedagogical philosophy behind my \n\t\town use of student peer reviews. These should not only be \n\t\tseen as adding to the amount of formative feedback in a class,\n\t\tnor are they only about the development of certain higher-order\n\t\tcognitive skills. Properly aligned with an overall assessment\n\t\tstrategy, peer reviewing can help build a stronger learning \n\t\tcommunity. I describe such a strategy and my experience using\n\t\tPRAZE, an online tool for student peer reviewing, as well as \n\t\tstudents' response to the tool and its use.},\n keywords = {Education, Peer review},\n}\n\n\n
@Inproceedings{Kin-Son_CAV08,\n author = {Andy King and \n\t\tHarald S{\\o}ndergaard},\n title = {Inferring Congruence Equations with {SAT}},\n editor = {A. Gupta and S. Malik},\n booktitle = {Computer Aided Verification},\n series = {Lecture Notes in Computer Science},\n volume = {5123},\n pages = {281--293},\n publisher = {Springer},\n year = {2008},\n doi = {10.1007/978-3-540-70545-1_26},\n abstract = {This paper proposes a new approach for deriving invariants \n\t\tthat are systems of congruence equations where the modulo\n\t\tis a power of 2. The technique is an amalgam of SAT-solving,\n\t\twhere a propositional formula is used to encode the semantics \n\t\tof a basic block, and abstraction, where the solutions to the\n\t\tformula are systematically combined and summarised as a system\n\t\tof congruence equations. The resulting technique is more\n\t\tprecise than existing congruence analyses since a single optimal\n\t\ttransfer function is derived for a basic block as a whole.},\n keywords = {Congruence analysis, Abstract interpretation, Abstract domains, Boolean logic},\n}\n\n\n
@Article{Her-Sch-Son_IJFCS07,\n author = {Brian Herlihy and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title = {Un-{Kleene} {Boolean} Equation Solving},\n journal = {International Journal on Foundations of Computer Science},\n volume = {18},\n number = {2},\n pages = {227--250},\n year = {2007},\n doi = {10.1142/S0129054107004668},\n abstract = {We present a new method for finding closed forms of recursive \n\t\tBoolean function definitions. Traditionally, these closed forms\n\t\tare found by Kleene iteration: iterative approximation until a \n\t\tfixed point is reached. Conceptually, our new method replaces \n\t\teach $k$-ary function by $2^k$ Boolean constants defined by \n\t\tmutual recursion. The introduction of an exponential number of\n\t\tconstants is mitigated by the simplicity of their definitions \n\t\tand by the use of a novel variant of ROBDDs to avoid repeated \n\t\tcomputation. Experiments suggest that this approach is \n\t\tsignificantly faster than Kleene iteration for examples that \n\t\trequire many Kleene iteration steps.},\n keywords = {Fixed points, ROBDDs},\n}\n\n\n
@Inproceedings{Sch-Son_SARA07,\n author = {Peter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title = {Boolean Approximation Revisited},\n editor = {I. Miguel and W. Ruml},\n booktitle = {Abstraction, Reformulation and Approximation:\n \tProceedings of {SARA} 2007},\n series = {Lecture Notes in Artificial Intelligence},\n volume = {4612},\n pages = {329--343},\n publisher = {Springer},\n year = {2007},\n doi = {10.1007/978-3-540-73580-9_26},\n abstract = {Most work to date on Boolean approximation assumes that\n\t\tBoolean functions are represented by formulas in\n\t\tconjunctive normal form. That assumption is appropriate \n\t\tfor the classical applications of Boolean approximation \n\t\tbut potentially limits wider use. We revisit, in a \n\t\tlattice-theoretic setting, so-called envelopes and cores\n\t\tin propositional logic, identifying them with upper and\n\t\tlower closure operators, respectively. This leads to \n\t\trecursive representation-independent characterisations\n\t\tof Boolean approximation for a large class of classes.\n\t\tWe show that Boolean development can be applied in a \n\t\trepresentation-independent setting to develop approximation\n\t\talgorithms for a broad range of Boolean classes, including\n\t\tHorn and Krom functions.},\n keywords = {Boolean logic, Boolean approximation, Lattice theory},\n}\n\n\n
@Proceedings{Son-Had_AAEE07,\n editor = {Harald S{\\o}ndergaard and \n\t\tRoger Hadgraft},\n title = {Proceedings of the 18th Conference of the \n\t\tAustralasian Association for Engineering Education},\n publisher = {Department of Computer Science and Software Engineering,\n\t\tThe University of Melbourne, Vic 3010, Australia},\n month = {December},\n year = {2007},\n isbn = {978--0--9757172--1--9},\n note = {Published on CD-ROM and online},\n keywords = {Education},\n}\n\n\n
@InProceedings{Her-Sch-Son_CATS06,\n author = {Brian Herlihy and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title = {Boolean Equation Solving as Graph Traversal},\n editor = {J. Gudmundsson and B. Jay},\n booktitle = {Theory of Computing 2006},\n series = {Conferences in Research and Practice in Information Technology},\n volume = {51},\n pages = {123--132},\n year = {2006},\n url = {http://crpit.com/Vol51.html},\n abstract = {We present a new method for finding closed forms of recursive \n\t\tBoolean function definitions. Traditionally, these closed forms\n\t\tare found by iteratively approximating until a fixed point is \n\t\treached. Conceptually, our new method replaces each $k$-ary \n\t\tfunction by $2^k$ Boolean variables defined by mutual recursion.\n\t\tThe introduction of an exponential number of variables is \n\t\tmitigated by the simplicity of their definitions and by the use\n\t\tof a novel variant of ROBDDs to avoid repeated computation.\n\t\tExperimental evaluation suggests that this approach is \n\t\tsignificantly faster than Kleene iteration for examples that \n\t\twould require many Kleene iteration steps.},\n keywords = {Fixed points, ROBDDs},\n}\n\n\n
@Inproceedings{Sch-Son_VMCAI06,\n author = {Peter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title = {Closure Operators for {ROBDDs}},\n editor = {E. A. Emerson and K. S. Namjoshi},\n booktitle = {Proceedings of the Seventh International Conference on\n \tVerification, Model Checking and Abstract Interpretation},\n series = {Lecture Notes in Computer Science},\n volume = {3855},\n pages = {1--16},\n publisher = {Springer},\n year = {2006},\n doi = {10.1007/11609773_1},\n abstract = {Program analysis commonly makes use of Boolean functions to \n\t\texpress information about run-time states. Many important \n\t\tclasses of Boolean functions used this way, such as the\n\t\tmonotone functions and the Boolean Horn functions, have\n\t\tsimple semantic characterisations. They also have well-known \n\t\tsyntactic characterisations in terms of Boolean formulae, say,\n\t\tin conjunctive normal form. Here we are concerned with \n\t\tcharacterisations using binary decision diagrams. Over the \n\t\tlast decade, ROBDDs have become popular as representations of \n\t\tBoolean functions, mainly for their algorithmic properties.\n\t\tAssuming ROBDDs as representation, we address the following \n\t\tproblems: Given a function $\\psi$ and a class of functions \n\t\t$\\class$, how to find the strongest $\\varphi \\in \\class$ \n\t\tentailed by $\\psi$ (when such a $\\varphi$ is known to exist)?\n\t\tHow to find the weakest $\\varphi \\in \\class$ that entails \n\t\t$\\psi$? How to determine that a function $\\psi$ belongs to a \n\t\tclass $\\class$? Answers are important, not only for several \n\t\tprogram analyses, but for other areas of computer science, \n\t\twhere Boolean approximation is used. We give, for many commonly\n\t\tused classes $\\class$ of Boolean functions, algorithms to \n\t\tapproximate functions represented as ROBDDs, in the sense \n\t\tdescribed above. The algorithms implement upper closure \n\t\toperators, familiar from abstract interpretation. They \n\t\timmediately lead to algorithms for deciding class membership.},\n keywords = {Boolean logic, Boolean approximation, ROBDDs},\n}\n\n\n
@InProceedings{Mof-Hug-Son-Gru_ACE05,\n author = {Alistair Moffat and \n\t\tBaden Hughes and \n\t\tHarald S{\\o}ndergaard and \n\t\tPaul Gruba},\n title = {Making Connections: First Year Transition for Computer\n\t\tScience and Software Engineering Students},\n editor = {A. Young and D. Tolhurst},\n booktitle = {Proceedings of the Seventh Australasian Computing Education \n\t\tConference (ACE2005)},\n series = {Conferences in Research and Practice in Information Technology},\n volume = {42},\n pages = {229--238},\n year = {2005},\n abstract = {During the last decade, an increasing emphasis has been placed \n\t\ton the need for carefully planned transition programs to help \n\t\tfirst-year students integrate into university. In this paper \n\t\twe critically examine our experiences in designing and running\n\t\tsuccessive transition programs for Computer Science and \n\t\tSoftware Engineering students. Over the last three years we \n\t\thave trialled several models. At present, our program requires \n\t\tall entering students to be enrolled in a transition subject,\n\t\t``Making Connections'', which runs for half a semester. The\n\t\tsubject, led by designated academic staff, serves as a forum \n\t\tfor students to learn about each other, the department and the \n\t\tuniversity. The program includes a computer-based language and\n\t\tstudy skills assessment component, including self-assessment \n\t\ttasks. Students can extend the subject by taking academic \n\t\tskills workshops run by the university's student support \n\t\tservices. We have found compulsion to be a useful facilitator \n\t\tof student engagement, and the addition of an objective \n\t\tassessment task has been beneficial.},\n keywords = {Education, Student transition programs},\n}\n\n\n
@InProceedings{Gru-Mof-Son-Zob_ACE04,\n author = {Paul Gruba and \n\t\tAlistair Moffat and \n\t\tHarald S{\\o}ndergaard and \n\t\tJustin Zobel},\n title = {What Drives Curriculum Change?},\n editor = {R. Lister and A. Young},\n booktitle = {Proceedings of the Sixth Australasian Computing\n Education Conference (ACE2004)},\n location = {Dunedin, New Zealand},\n pages = {109--117},\n month = {jan},\n year = {2004},\n abstract = {While promotional literature about computer science programs \n\t\tmay claim that curricula are determined by the needs of the \n\t\tstudents and by international best practice, the reality is \n\t\toften different. In this paper we reflect on the factors \n\t\tunderlying curriculum change in computer science departments \n\t\tand schools, from institutional requirements and financial \n\t\tpressures to purely academic considerations. We have used \n\t\tthese reflections as the basis of an investigation of \n\t\tcurriculum management practices at institutions in Australasia,\n\t\tvia a survey instrument sent to a range of colleagues. Our \n\t\tfindings from the survey are consistent with our own \n\t\texperiences, namely, that curriculum change is driven or \n\t\tinhibited by factors such as vocal individuals and practical\n\t\tconstraints rather than higher academic motives.},\n keywords = {Education, Computer science curricula},\n}\n\n\n
@InProceedings{Son-Tho_FIE04,\n author = {Harald S{\\o}ndergaard and \n\t\tDoreen Thomas},\n title = {Effective Feedback to Small and Large Classes},\n booktitle = {Proceedings of the 2004 ASEE/IEEE Frontiers in\n\t\tEducation Conference (FIE2004)},\n pages = {F1E-9--F1E-14},\n location = {Savannah GA},\n month = {oct},\n year = {2004},\n doi = {10.1109/FIE.2004.1408573},\n abstract = {Educational experts appear to be in broad agreement when it \n\t\tcomes to the importance of feedback for effective learning. \n\t\tStudents benefit from plenty of opportunity and encouragement \n\t\tto express their understanding, and from informed, supportive, \n\t\tpossibly challenging, feedback. At the same time, we observe \n\t\tthat many students at our university do not find that they \n\t\treceive helpful feedback. One in three Engineering students \n\t\tdisagree or strongly disagree with the Quality of Teaching \n\t\tquestionnaire's ``I received helpful feedback on how I was \n\t\tgoing'' in the individual course, and most other disciplines \n\t\tfind themselves in a similar situation. For the university as \n\t\ta whole, student responses to this question are clearly less \n\t\tpositive than to other questions on quality of teaching,\n\t\tintellectual stimulation, staff interest, workload, and so on,\n\t\tand this state of affairs seems quite common in the Australian\n\t\tcontext. We discuss best practice in feedback provision, partly\n\t\tbased on our interviews with students and staff. We have been \n\t\tparticularly interested in identifying cost-effective ways of \n\t\tproviding informed and constructive feedback to large classes.\n\t\tFeedback is often understood, by Engineering students and staff\n\t\talike, simply as comments on submitted work---typically written\n\t\tassignments. We argue in favour of a broader concept that \n\t\tcovers a multitude of ways for a student to develop deep \n\t\tlearning through conversation, including questions and answers\n\t\tprovided by others, team work, study groups, and formative\n\t\tteacher-provided feedback during an assessment task. We \n\t\temphasise the coaching role of the teacher, and feedback \n\t\tdesigned to encourage students to monitor own learning. Large \n\t\tclasses pose particular logistic problems. We identify staff \n\t\tdevelopment as a crucial factor for consistent, effective \n\t\tfeedback, and point to web-based feedback provision as a \n\t\tworkable solution to some logistic problems. We briefly discuss\n\t\tthe role of information technology more broadly, both for \n\t\tlearning enhancement and for automated feedback provision.},\n keywords = {Education},\n}\n\n\n
@Incollection{Cod-Son_JonesFest02,\n author = {Michael Codish and \n\t\tHarald S{\\o}ndergaard},\n title = {Meta-Circular Abstract Interpretation in {Prolog}},\n editor = {T. Mogensen and D. Schmidt and I. H. Sudborough},\n booktitle = {The Essence of Computation: Complexity, Analysis, Transformation},\n series = {Lecture Notes in Computer Science},\n volume = {2566},\n pages = {109--134},\n publisher = {Springer},\n year = {2002},\n doi = {10.1007/3-540-36377-7_6},\n abstract = {We give an introduction to the meta-circular approach to the\n\t\tabstract interpretation of logic programs. This approach is\n\t\tparticularly useful for prototyping and for introductory \n\t\tclasses on abstract interpretation. Using interpreters, \n\t\tstudents can immediately write, adapt, and experiment with \n\t\tinterpreters and working dataflow analysers. We use a simple\n\t\tmeta-circular interpreter, based on a ``non-ground $T_P$'' \n\t\tsemantics, as a generic analysis engine. Instantiating the\n\t\tengine is a matter of providing an appropriate domain of\n\t\tapproximations, together with definitions of ``abstract'' \n\t\tunification and disjunction. Small changes of the interpreter\n\t\tlet us vary both what can be ``observed'' by an analyser, and\n\t\thow fixed point computation is done. Amongst the dataflow \n\t\tanalyses used to exemplify this approach are a parity analysis,\n\t\tgroundness dependency analysis, call patterns, depth-$k$ \n\t\tanalysis, and a ``pattern'' analysis to establish most specific\n\t\tgeneralisations of calls and success sets.},\n keywords = {Abstract interpretation, Abstract domains, Logic programming, Prolog},\n}\n\n\n
@Inproceedings{Gly-Stu-Sul-Son_ICFP02,\n author = {Kevin Glynn and \n\t\tPeter J. Stuckey and \n\t\tMartin Sulzmann and\n\t\tHarald S{\\o}ndergaard},\n title = {Exception Analysis for Non-Strict Languages},\n booktitle = {Proceedings of the 2002 ACM SIGPLAN International Conference on\n \tFunctional Programming},\n pages = {98--109},\n publisher = {ACM Press},\n year = {2002},\n doi = {10.1145/581478.581488},\n abstract = {In this paper we present the first exception analysis\n\t\tfor a non-strict language. We augment a simply-typed \n\t\tfunctional language with exceptions, and show that we \n\t\tcan define a type-based inference system to detect \n\t\tuncaught exceptions. We have implemented this exception\n\t\tanalysis in the GHC compiler for Haskell, which has been\n\t\trecently extended with exceptions. We give empirical \n\t\tevidence that the analysis is practical.},\n keywords = {Exception analysis, Functional programming},\n}\n\n\n
@Inproceedings{Cod-Gen-Son-Stu_ICLP01,\n author = {Michael Codish and \n\t\tSamir Genaim and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Higher-Precision Groundness Analysis},\n editor = {P. Codognet},\n booktitle = {Logic Programming: Proceedings of the 17th International \n\t\tConference},\n series = {Lecture Notes in Computer Science},\n volume = {2237},\n pages = {135--149},\n publisher = {Springer},\n year = {2001},\n doi = {10.1007/3-540-45635-x_17},\n abstract = {Groundness analysis of logic programs using Pos-based abstract\n\t\tinterpretation is one of the clear success stories of the last\n\t\tdecade in the area of logic program analysis. In this work we\n\t\tidentify two problems with the Pos domain, the multiplicity and\n\t\tsign problems, that arise independently in groundness and \n\t\tuniqueness analysis. We describe how these problems can be \n\t\tsolved using an analysis based on a domain Size for inferring \n\t\tterm size relations. However this solution has its own \n\t\tshortcomings because it involves a widening operator which leads\n\t\tto a loss of Pos information. Inspired by Pos, Size and the\n\t\tLsign domain for abstract linear arithmetic constraints we\n\t\tintroduce a new domain Lpos, and show how it can be used for\n\t\tgroundness and uniqueness analysis. The idea is to use the sign\n\t\tinformation of Lsign to improve the widening of Size so that\n\t\tit does not lose Pos information. We prove that the resulting \n\t\tanalyses using Lpos are uniformly more precise than those using\n\t\tPos.},\n keywords = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n\n
@Inproceedings{Gly-Stu-Sul-Son_PADO01,\n author = {Kevin Glynn and \n\t\tPeter J. Stuckey and \n\t\tMartin Sulzmann and\n\t\tHarald S{\\o}ndergaard},\n title = {Boolean Constraints for Binding-Time Analysis},\n editor = {O. Danvy and A. Filinsky},\n booktitle = {Programs as Data Objects: Proceedings of the Second Symposium},\n series = {Lecture Notes in Computer Science},\n volume = {2053},\n pages = {39--63},\n publisher = {Springer},\n year = {2001},\n doi = {10.1007/3-540-44978-7_4},\n abstract = {To achieve acceptable accuracy, many program analyses for\n\t\tfunctional programs are ``property polymorphic''.\n\t\tThat is, they can infer different input-output relations \n\t\tfor a function at separate applications of the function, \n\t\tin a manner similar to type inference for a polymorphic \n\t\tlanguage. We extend a property polymorphic (or ``polyvariant'')\n\t\tmethod for binding-time analysis, due to Dussart, Henglein, \n\t\tand Mossin, so that it applies to languages with ML-style \n\t\ttype polymorphism. The extension is non-trivial \n\t\tand we have implemented it for Haskell. While we follow \n\t\tothers in specifying the analysis as a non-standard type\n\t\tinference, we argue that it should be realised through a \n\t\ttranslation into the well-understood domain of Boolean \n\t\tconstraints. The expressiveness offered by Boolean \n\t\tconstraints opens the way for smooth extensions to \n\t\tsophisticated language features and it allows for \n\t\tmore accurate analysis.},\n keywords = {Boolean logic, Binding-time analysis, Haskell},\n}\n\n\n
@Article{Gru-Son_CSE01,\n author = {Paul Gruba and \n\t\tHarald S{\\o}ndergaard},\n title = {A Constructivist Approach to Communication Skills Instruction \n\t\tin Computer Science},\n journal = {Computer Science Education},\n volume = {11},\n number = {3},\n pages = {203--219},\n year = {2001},\n doi = {10.1076/csed.11.3.203.3833},\n abstract = {From the social constructivist perspective of education, \n\t\tlearning is best achieved when students face complex, real \n\t\tworld problems in which there are no clear answers. Faced with\n\t\ta sizable common goal, students work collaboratively towards \n\t\toutcomes and maintain ownership over key decisions. The role \n\t\tof staff is that of facilitators whose role is to challenge l\n\t\tearners to explore multiple aspects of the problem as they go \n\t\tabout reaching viable solutions. Such a role contrasts, for \n\t\texample, to an approach which sets out to lead students to a \n\t\tpresumed correct solution that is already possessed by the\n\t\tinstructor. Based on these principles we designed and \n\t\timplemented a course on communication skills in Computer \n\t\tScience. Here, we describe our experiences using a student-run \n\t\tconference as a means to teach communication skills. In this \n\t\tapproach, students were charged with the task of planning and \n\t\torganising a conference, including peer review, publicity, \n\t\tbudget, sponsorship, web design, conference program, \n\t\tpresentation schedule, speaker support, and catering. We \n\t\tdescribe the principles and their implementation and reflect \n\t\ton the outcome.},\n keywords = {Education, Communication skills instruction},\n}\n\n\n
@Proceedings{Son_PPDP01,\n editor = {Harald S{\\o}ndergaard},\n title = {Principles and Practice of Declarative Programming},\n publisher = {ACM Press},\n year = {2001},\n keywords = {PPDP 2001, logic programming, functional programming},\n}\n\n\n
@InProceedings{Gru-Son_ACSC00,\n author = {Paul Gruba and \n\t\tHarald S{\\o}ndergaard},\n title = {Transforming Communication Skills Instruction:\n The Conference Approach},\n editor = {J. Edwards},\n booktitle = {Proceedings of the 23rd Australasian Computer Science \n\t\tConference},\n series = {Australian Computer Science Communications 22},\n number = {1},\n pages = {88--94},\n year = {2000},\n publisher = {IEEE Computer Society},\n location = {Canberra, Australia},\n doi = {10.1109/ACSC.2000.824385},\n abstract = {From the social constructivist perspective of education, \n\t\tlearning is best achieved when students face complex, real \n\t\tworld problems in which there are no clear answers. Faced with\n\t\ta sizable common goal, students work collaboratively towards \n\t\toutcomes and maintain ownership over key decisions. The role \n\t\tof staff is that of facilitators whose role is to challenge \n\t\tlearners to explore multiple aspects of the problem as they go \n\t\tabout reaching viable solutions. Such a role contrasts, for \n\t\texample, to an approach which sets out to lead students to a \n\t\tpresumed correct solution that is already possessed by the\n\t\tinstructor. Based on these principles we designed and \n\t\timplemented a course on communication skills in Computer \n\t\tScience. Here, we describe our experiences using a student-run \n\t\tconference as a means to teach communication skills. In this \n\t\tapproach, students were charged with the task of planning and \n\t\torganising a conference, including peer review, publicity, \n\t\tbudget, sponsorship, web design, conference program, \n\t\tpresentation schedule, speaker support, and catering.\n\t\tWe describe the principles and their implementation and \n\t\treflect on the outcome.},\n keywords = {Education, Communication skills instruction},\n}\n\n\n
@Article{Cod-Son-Stu_TOPLAS99,\n author = {Michael Codish and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Sharing and Groundness Dependencies in Logic Programs},\n journal = {ACM Transactions on Programming Languages and Systems},\n volume = {21},\n number = {5},\n pages = {948--976},\n year = {1999},\n doi = {10.1145/330249.330252},\n abstract = {We investigate Jacobs and Langen's \\textsf{Sharing} domain, \n\t\tintroduced for the analysis of variable sharing in logic \n\t\tprograms, and show that it is isomorphic to Marriott and \n\t\tS{\\o}ndergaard's \\textsf{Pos} domain, introduced for the \n\t\tanalysis of groundness dependencies. Our key idea\n\t\tis to view the sets of variables in a \\textsf{Sharing} \n\t\tdomain element as the models of a corresponding Boolean \n\t\tfunction. This leads to a recasting of sharing analysis \n\t\tin terms of the property of ``not being affected by the \n\t\tbinding of a \\emph{single} variable.'' Such an\n\t\t``unaffectedness dependency'' analysis has close \n\t\tconnections with groundness dependency analysis using \n\t\tpositive Boolean functions. This new view improves our \n\t\tunderstanding of sharing analysis, and leads to an elegant\n\t\texpression of its combination with groundness dependency\n\t\tanalysis based on the reduced product of \\textsf{Sharing} \n\t\tand \\textsf{Pos}. It also opens up new avenues for the \n\t\tefficient implementation of sharing analysis, for example \n\t\tusing reduced order binary decision diagrams, as well as\n\t\tefficient implementation of the reduced product, using \n\t\tdomain factorizations.},\n keywords = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n\n
@InProceedings{Gab-Gly-Son_LOPSTR98,\n author = {Tihomir Gabri{\\'c} and \n\t\tKevin Glynn and \n\t\tHarald S{\\o}ndergaard},\n title = {Strictness Analysis as Finite-Domain Constraint Solving},\n editor = {P. Flener},\n booktitle = {Logic-Based Program Synthesis and Transformation},\n series = {Lecture Notes in Computer Science},\n volume = {1559},\n pages = {255--270},\n publisher = {Springer},\n address = {Berlin, Germany},\n year = {1999},\n doi = {10.1007/3-540-48958-4_14},\n abstract = {It has become popular to express dataflow analyses in logical \n\t\tform. In this paper we investigate a new approach to the \n\t\tanalysis of functional programs, based on synthesis of \n\t\tconstraint logic programs. We sketch how the language Toupie,\n\t\toriginally designed with logic program analysis as one \n\t\tobjective, lends itself also to sophisticated strictness \n\t\tanalysis. Strictness analysis is straightforward in the \n\t\tsimplest case, that of analysing a first-order functional \n\t\tlanguage using just two strictness values, namely divergence \n\t\tand ``don't know''. Mycroft's classical translation immediately\n\t\tyields perfectly valid Boolean constraint logic programs, \n\t\twhich, when run, provide the desired strictness information.\n\t\tHowever, more sophisticated analysis requires more complex \n\t\tdomains of strictness values. We recast Wadler's classical \n\t\tanalysis over a $2n$-point domain as finite-domain \n\t\tconstraint solving. This approach has several advantages.\n\t\tFirst, the translation is relatively simple. We translate a \n\t\trecursive function definition into one or two constraint \n\t\tprogram clauses, in a manner which naturally extends Mycroft's\n\t\ttranslation for the 2-point case, where the classical approach\n\t\ttranslate the definition of an $n$-place function over lists \n\t\tinto $4^n$ mutually recursive equations. Second, the resulting \n\t\tprogram produces \\emph{relational} information, allowing for \n\t\texample to ask which combinations of properties of input will \n\t\tproduce a given output. Third, the approach allows us to \n\t\tleverage from established technology, for solving finite-domain \n\t\tconstraints, as well as for finding fixed points. Finally, the\n\t\tuse of (disjunctive) constraints can yield a higher precision \n\t\tin the analysis of some programs.},\n keywords = {Strictness analysis, Abstract interpretation, Abstract domains, Functional programming, Constraint programming},\n}\n\n\n
@Inproceedings{Gly-Son_ACSC99,\n author = {Kevin Glynn and \n\t\tHarald S{\\o}ndergaard},\n title = {Immediate Fixpoints for Strictness Analysis},\n editor = {J. Edwards},\n booktitle = {Proceedings of the 22nd Australasian Computer Science \n\t\tConference},\n series = {Australian Computer Science Communications},\n volume = {21},\n number = {1},\n year = {1999},\n pages = {336--347},\n publisher = {Springer},\n abstract = {This paper is concerned with the problem of finding fixpoints\n without resorting to Kleene iteration. In many cases, a simple\n\t\tsyntactic test suffices to establish that a closed form can be \n\t\tfound without iteration. For example, if the function $F$, \n\t\twhose fixpoint we want to determine, is idempotent, then its \n\t\tleast fixpoint is simply the instantiation $F(0)$, where $0$ \n\t\tis the least element of the lattice. In many cases, idempotence\n\t\tcan be gleaned easily from $F$'s definition. In other cases, \n\t\tsimple symbolic manipulation of a recursive definition may \n\t\treveal shortcuts to a closed form. We explore such cases and \n\t\tshow how the faster fixpoint calculation can have applications\n\t\tin strictness analysis.},\n keywords = {Fixed points, Strictness analysis, Functional programming},\n}\n\n\n
@InProceedings{Ste-Son-Nai_ITiCSE99,\n author = {Linda Stern and \n\t\tHarald S{\\o}ndergaard and \n\t\tLee Naish},\n title = {A Strategy for Managing Content Complexity in\n\t\tAlgorithm Animation},\n editor = {B. Manaris},\n booktitle = {Proceedings of the Fourth Annual SIGCSE/SIGCUE Conference on\n\t\tInnovation and Technology in Computer Science Education},\n pages = {127--130},\n publisher = {Association for Computing Machinery},\n address = {New York, NY},\n year = {1999},\n location = {Cracow, Poland},\n doi = {10.1145/305786.305891},\n abstract = {Computer animation is an excellent medium for capturing \n\t\tthe dynamic nature of data structure manipulations, and \n\t\tcan be used to advantage in the teaching of algorithms \n\t\tand data structures. A major educational issue is the \n\t\tnecessity of providing a means for the student to manage \n\t\tthe complexity of the material. We have addressed this\n\t\tissue in a multimedia teaching tool called ``Algorithms in\n\t\tAction'' by allowing students to view an algorithm at \n\t\tvarying levels of detail. Starting with a high level \n\t\tpseudocode description of the algorithm, with accompanying \n\t\thigh level animation and textual explanation, students can \n\t\texpand sections of the pseudocode to expose more detail.\n\t\tAnimation and explanation are controlled in coordinate\n\t\tfashion, becoming correspondingly more detailed as the \n\t\tpseudocode is expanded. Student feedback suggests that the\n\t\tavailability of multiple levels detail and the facility\n\t\tfor the user to control the level of detail being viewed\n\t\tis an effective way to manage content complexity.},\n keywords = {Education, Algorithm visualisation, Web-based learning},\n}\n\n\n
@Article{Arm-Mar-Sch-Son_SCP98,\n author = {Tania Armstrong and \n\t\tKim Marriott and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title\t = {Two Classes of {Boolean} Functions for Dependency Analysis},\n journal = {Science of Computer Programming},\n volume = {31},\n number = {1},\n pages = {3--45},\n year\t = {1998},\n doi = {10.1016/S0167-6423(96)00039-1},\n abstract = {Many static analyses for declarative programming/database \n\t\tlanguages use Boolean functions to express dependencies \n\t\tamong variables or argument positions. Examples include \n\t\tgroundness analysis, arguably the most important analysis for \n\t\tlogic programs, finiteness analysis and functional dependency \n\t\tanalysis for databases. We identify two classes of Boolean \n\t\tfunctions that have been used: positive and definite functions,\n\t\tand we systematically investigate these classes and their \n\t\tefficient implementation for dependency analyses. On the \n\t\ttheoretical side we provide syntactic characterizations and\n\t\tstudy the expressiveness and algebraic properties of the \n\t\tclasses. In particular, we show that both are closed under \n\t\texistential quantification. On the practical side we \n\t\tinvestigate various representations for the classes based on \n\t\treduced ordered binary decision diagrams (ROBDDs), disjunctive \n\t\tnormal form, conjunctive normal form, Blake canonical form, \n\t\tdual Blake canonical form, and a form specific to definite \n\t\tfunctions. We compare the resulting implementations of \n\t\tgroundness analyzers based on the representations for precision\n\t\tand efficiency.},\n keywords = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n\n
@Inproceedings{Cod-Son_PLILP98,\n author = {Michael Codish and \n\t\tHarald S{\\o}ndergaard},\n title = {The {Boolean} Logic of Set Sharing Analysis},\n editor = {C. Palamidessi and H. Glaser and K. Meinke},\n booktitle = {Principles of Declarative Programming},\n series = {Lecture Notes in Computer Science},\n volume = {1490},\n pages = {89--101},\n publisher = {Springer},\n year = {1998},\n doi = {10.1007/bfb0056609},\n abstract = {We show that Jacobs and Langen's domain for set-sharing \n\t\tanalysis is isomorphic to the domain of positive Boolean \n\t\tfunctions, introduced by Marriott and S{\\o}ndergaard for \n\t\tgroundness dependency analysis. Viewing a set-sharing \n\t\tdescription as a minterm representation of a Boolean \n\t\tfunction leads to re-casting sharing analysis as an\n\t\tinstantiation dependency analysis. The key idea is to \n\t\tview the sets of variables in a sharing domain element \n\t\tas the models of a Boolean function. In this way, sharing \n\t\tsets are precisely dual negated positive Boolean functions. \n\t\tThis new view improves our understanding of sharing \n\t\tanalysis considerably and opens up new avenues for the\n\t\tefficient implementation of this kind of analysis, for \n\t\texample using ROBDDs. To this end we express Jacobs and \n\t\tLangen's abstract operations for set sharing in logical form.},\n keywords = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n\n
@article{Gar-Mar-Stu-Son_JLP98,\n author = {Maria {Garc{\\'\\i}a de la Banda} and \n\t\tKim Marriott and\n\t\tPeter J. Stuckey and \n\t\tHarald S{\\o}ndergaard},\n title = {Differential Methods in Logic Program Analysis},\n journal = {Journal of Logic Programming},\n volume = {35},\n number = {1},\n pages = {1--37},\n year = {1998},\n doi = {10.1016/s0743-1066(97)10002-4},\n abstract = {Program analysis based on abstract interpretation has proven\n\t\tvery useful in compilation of constraint and logic programming\n\t\tlanguages. Unfortunately, the traditional goal-dependent \n\t\tframework is inherently imprecise. This is because it handles \n\t\tcall and return in such a way that dataflow information may be \n\t\tre-asserted unnecessarily, leading to a loss of precision for\n\t\tmany description domains. For a few specific domains, the \n\t\tliterature contains proposals to overcome the problem, and some\n\t\timplementations use various unpublished tricks that sometimes \n\t\tavoid the precision loss. The purpose of this paper is to map \n\t\tthe landscape of goal-dependent, goal-independent, and combined\n\t\tapproaches to generic analysis of logic programs. This includes\n\t\tformalising existing methods and tricks in a way that is \n\t\tindependent of specific description domains. Moreover, we \n\t\tsuggest new methods for overcoming the loss of \n\t\tprecision---altogether eight different semantics are \n\t\tconsidered and compared. We provide theoretical results \n\t\tdetermining the relative accuracy of the approaches. These \n\t\tshow that two of our new semantics are uniformally more \n\t\taccurate than existing approaches. Experiments that we have \n\t\tperformed (for two description domains) with implementations \n\t\tof the eight different approaches enable a discussion of their\n\t\trelative runtime performances. We discuss the expected effect \n\t\ton other domains as well and conclude that our new methods \n\t\tcan be trusted to yield significantly more accurate analysis \n\t\tfor a small extra implementation effort, without compromising \n\t\tthe efficiency of analysis.},\n keywords = {Abstract interpretation, Logic programming, Sharing analysis},\n}\n\n\n
@Article{Kel-Mar-Son-Stu_SPE98,\n author = {Andrew D. Kelly and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {A Practical Object-Oriented Analysis Engine for Constraint\n Logic Programs},\n journal = {Software---Practice and Experience},\n volume = {28},\n number = {2},\n pages = {199--224},\n year = {1998},\n doi = {10.1002/(SICI)1097-024X(199802)28:2<199::AID-SPE150>3.0.CO;2-4},\n abstract = {The incorporation of global program analysis into recent \n\t\tcompilers for constraint logic programming (CLP) languages has \n\t\tgreatly improved the efficiency of these new languages. We \n\t\tpresent a global analyzer based on abstract interpretation.\n\t\tUnlike traditional optimizers, whose designs tend to be ad hoc,\n\t\tthe analyzer has been designed with maxiaml flexibility in\n\t\tmind. The analyzer is incremental, allowing substantial program\n\t\ttransformations by a compiler without requiring redundant\n\t\tre-computation of analysis data. The anayser is also\n\t\tgeneric in that it can perform a large number of different\n\t\tprogram analyses. Furthermore, the analyzer has an\n\t\tobject-oriented design, enabling it to be adapted to different\n\t\tapplications and allowing it to be used with various CLP \n\t\tlanguages with simple modifications. As an example of this \n\t\tgenerality, we sketch the use of the analyzer in two different\n\t\tapplications involving two different CLP languages: an \n\t\toptimizing compiler for CLP(${\\cal R}$) programs and an \n\t\tapplication for detecting occur-check problems in Prolog \n\t\tprograms.},\n keywords = {Constraint programming, Logic programming, Abstract interpretation, Prolog, Static analysis, Program transformation, Compilation, Unification},\n}\n\n\n
@Inproceedings{Bai-Crn-Ram-Son_ICDT97,\n author = {James Bailey and \n\t\tLobel Crnogorac and \n\t\tKotagiri Ramamohanarao and \n\t\tHarald S{\\o}ndergaard},\n title = {Abstract Interpretation of Active Rules and Its Use in\n\t\tTermination Analysis},\n editor = {F. Afrati and P. Kolaitis},\n booktitle = {Database Theory---ICDT'97},\n series = {Lecture Notes in Computer Science},\n volume = {1186},\n pages = {188--202},\n publisher = {Springer},\n year = {1997},\n doi = {10.1007/3-540-62222-5_45},\n abstract = {The behaviour of rules in an active database system can be\n\t\tdifficult to predict, and much work has been devoted to the\n\t\tdevelopment of automatic support for reasoning about properties\n\t\tsuch as confluence and termination. We show how abstract \n\t\tinterpretation can provide a generic framework for analysis of\n\t\tactive rules. Abstract interpretation is a well-understood, \n\t\tsemantics-based method for static analysis. Its advantage, \n\t\tapart from generality, lies in the separation of concerns: \n\t\tOnce the underlying semantics has been captured formally, a \n\t\tvariety of analyses can be derived, almost for free, as \n\t\t\\emph{approximations} to the semantics. Moreover, powerful \n\t\tgeneral theorems enable simple proofs of global correctness \n\t\tand uniform termination of specific analyses. We outline these\n\t\tideas and present, as an example application, a new method for \n\t\ttermination analysis. In terms of precision, the method \n\t\tcompares favourably with previous solutions to the problem.\n\t\tThis is because the method investigates the flow of data \n\t\trather than just the syntax of conditions and actions.},\n keywords = {Termination analysis, Abstract interpretation, Active databases},\n}\n\n\n
@Inproceedings{Kel-Mar-Son-Stu_ACSC97,\n author = {Andrew D. Kelly and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {A Generic Object-Oriented Incremental Analyser for Constraint\n \tLogic Programs},\n editor = {R. Kotagiri and J. Zobel},\n booktitle = {Proceedings of the 20th Australasian Computer Science Conference},\n series = {Australian Computer Science Communications},\n volume = {19},\n number = {1},\n pages = {92--101},\n year = {1997},\n abstract = {The incorporation of global program analysis into compilers \n\t\tfor constraint logic programming (CLP) languages has greatly\n\t\timproved the efficiency of these new languages. We present\n\t\ta global analyser based on abstract interpretation. The \n\t\tanalyser is incremental, allowing substantial program \n\t\ttransformations by a compiler without requiring redundant\n\t\tre-computation of analysis data. The anayser is also\n\t\tgeneric in that it can perform a large number of different\n\t\tprogram analyses. Furthermore, the analyser has an \n\t\tobject-oriented design, enabling it to be easily adapted to\n\t\tdifferent applications and allowing it to be used with various\n\t\tCLP languages. As an example of this generality, we outline \n\t\tthe analyser's role in two different applications each for a\n\t\tdifferent CLP language: an optimizing compiler for CLP(R) \n\t\tprograms and an application for detecting occur-check problems\n\t\tin Prolog programs.},\n keywords = {Constraint programming, Logic programming, Abstract interpretation, Prolog, Static analysis, Program transformation, Compilation, Unification},\n}\n\n\n
@Inproceedings{Spe-Som-Son_SAS97,\n author = {Chris Speirs and \n\t\tZoltan Somogyi and \n\t\tHarald S{\\o}ndergaard},\n title = {Termination Analysis for {Mercury}},\n editor = {P. {Van Hentenryck}},\n booktitle = {Static Analysis: \n\t\tProceedings of the Fourth International Symposium},\n series = {Lecture Notes in Computer Science},\n volume = {1302},\n pages = {157--171},\n publisher = {Springer},\n year = {1997},\n doi = {10.1007/bfb0032740},\n abstract = {Since the late eighties, much progress has been made in the \n\t\ttheory of termination analysis for logic programs. However, the\n\t\tpractical significance of much of this work is hard to judge, \n\t\tsince experimental evaluations rarely get published. Here we \n\t\tdescribe and evaluate a termination analyzer for Mercury, a \n\t\tstrongly typed and moded logic-functional programming language.\n\t\tMercury's high degree of referential transparency and the \n\t\tguaranteed availability of reliable mode information simplify \n\t\ttermination analysis. Our analyzer uses a variant of a method \n\t\tdeveloped by Pl{\\"u}mer. It deals with full Mercury, including \n\t\tmodules and I/O. In spite of these obstacles, it produces \n\t\tstate-of-the-art termination information, while having a \n\t\tnegligible impact on the running time of the compiler of which\n\t\tit is part, even for large programs.},\n keywords = {Logic programming, Mercury, Termination analysis},\n}\n\n\n
@Inproceedings{Crn-Kel-Son_SAS96,\n author = {Lobel Crnogorac and \n\t\tAndrew Kelly and \n\t\tHarald S{\\o}ndergaard},\n title = {A Comparison of Three Occur-Check Analysers},\n editor = {R. Cousot and D. A. Schmidt},\n booktitle = {Static Analysis: Proceedings of the Third International \n\t\tSymposium},\n series = {Lecture Notes in Computer Science},\n volume = {1145},\n pages = {159--173},\n publisher = {Springer},\n year = {1996},\n doi = {10.1007/3-540-61739-6_40},\n abstract = {A well known problem with many Prolog interpreters and compilers\n\t\tis the lack of occur-check in the implementation of the \n\t\tunification algorithm. This means that such systems are unsound\n\t\twith respect to first-order predicate logic. Static analysis \n\t\toffers an appealing approach to the problem of occur-check \n\t\treduction, that is, the safe omission of occur-checks in \n\t\tunification. We compare, for the first time, three static \n\t\tmethods that have been suggested for occur-check reduction, \n\t\ttwo based on assigning ``modes'' to programs and one which \n\t\tuses abstract interpretation. In each case, the analysis or \n\t\tsome essential part of it had not been implemented so far.\n\t\tOf the mode-based methods, one is due to Chadha and Plaisted\n\t\tand the other is due to Apt and Pellegrini. The method using \n\t\tabstract interpretation is based on earlier work by Plaisted, \n\t\tS{\\o}ndergaard and others who have developed groundness and \n\t\tsharing analyses for logic programs. The conclusion is that a \n\t\ttruly global analysis based on abstract interpretation leads \n\t\tto markedly higher precision and hence fewer occur-checks at \n\t\trun-time. Given the potential run-time speedups, a precise \n\t\tanalysis would be well worth the extra time.},\n keywords = {Abstract interpretation, Groundness analysis, Sharing analysis, Unification, Prolog, Compilation},\n}\n\n\n
@Inproceedings{Joh-Mof-Son-Stu_ACSE96,\n author = {Roy Johnston and \n\t\tAlistair Moffat and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title\t = {Low-Contact Learning in a First Year Programming Course},\n editor = {J. Rosenberg},\n booktitle = {Proceedings of the First Australasian Conference on \n\t\tComputer Science Education},\n pages = {19--26},\n publisher = {ACM Press},\n year = {1996},\n location = {Sydney, Australia},\n doi = {10.1145/369585.369589},\n abstract = {Very large class sizes for introductory programming subjects pose\n\t\ta variety of problems, logistic and pedagogic.\n\t\tIn response to these problems, we are experimenting with \n\t\tlow-contact streams in a class of 750 first year students.\n\t\tMotivated and disciplined students are offered the alternative \n\t\tof fewer lectures and tutorials, that is, less direct contact, \n\t\tin return for more (compulsory) homework and increased \n\t\tfeed-back, that is, more indirect contact.\n\t\tWith a home computer and a link to the University, such \n\t\tstudents can take the subject with more flexible timetabling.\n\t\tWe provide material to support the higher degree of self-study \n\t\tand tools for students to monitor their own progress.\n\t\tThis paper describes how the low-contact streams are organised \n\t\tand discusses the advantages and disadvantages of the approach.\n\t\tA thorough evaluation is not possible yet, but our initial \n\t\timpression is that both traditional and self-paced students \n\t\tbenefit---the traditional students are better catered for in \n\t\ta course that is pitched at a slightly lower and less \n\t\tchallenging level, and the self-paced students respond to the \n\t\tchallenge and extend themselves in a manner that might not \n\t\toccur in a large lecture group.},\n keywords = {Education},\n}\n\n\n
@Inproceedings{Kel-Mar-Son-Stu_SAS96,\n author = {Andrew D. Kelly and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Two Applications of an Incremental Analysis Engine for\n (Constraint) Logic Programs (System Description)},\n editor = {R. Cousot and D. A. Schmidt},\n booktitle = {Static Analysis: Proceedings of the Third International \n\t\tSymposium},\n series = {Lecture Notes in Computer Science},\n volume = {1145},\n pages = {385--386},\n publisher = {Springer},\n year = {1996},\n doi = {10.1007/3-540-61739-6_55},\n keywords = {Constraint programming, Logic programming, Prolog, Static analysis, Program transformation, Compilation},\n}\n\n\n
@Inproceedings{Son_FSTTCS96,\n author = {Harald S{\\o}ndergaard},\n title = {Immediate Fixpoints and Their Use in Groundness Analysis},\n editor = {V. Chandru and V. Vinay},\n booktitle = {Foundations of Software Technology and \n\t\tTheoretical Computer Science},\n series = {Lecture Notes in Computer Science},\n volume = {1180},\n pages = {359--370},\n publisher = {Springer},\n year = {1996},\n doi = {10.1007/3-540-62034-6_63},\n abstract = {A theorem by Schr{\\"o}der says that for a certain natural \n\t\tclass of functions $F : B \\rightarrow B$ defined on a Boolean \n\t\tlattice $B$, $F(x) = F(F(F(x)))$ for all $x \\in B$.\n\t\tAn immediate corollary is that if such a function is monotonic \n\t\tthen it is also idempotent, that is, $F(x) = F(F(x))$.\n\t\tWe show how this corollary can be extended to recognize cases\n\t\twhere recursive definitions can immediately be replaced by an \n\t\tequivalent closed form, that is, they can be solved without \n\t\tKleene iteration. Our result applies more generally to \n\t\tdistributive lattices. It has applications for example in the \n\t\tabstract interpretation of declarative programs and deductive \n\t\tdatabases. We exemplify this by showing how to accelerate \n\t\tsimple cases of strictness analysis for first-order functional \n\t\tprograms and, perhaps more successfully, groundness analysis \n\t\tfor logic programs.},\n keywords = {Fixed points, Logic programming, Groundness analysis, Lattice theory},\n}\n\n\n
@Techreport{Garcia_TR95,\n author = {Maria {Garc{\\'\\i}a de la Banda} and \n\t\tKim Marriott and\n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey},\n title = {Improved Analysis of Logic Programs Using a Differential\n\t\tApproach},\n number = {95/15},\n institution = {Department of Computer Science, The University of Melbourne},\n year = {1995},\n abstract = {Abstract interpretation based program analysis has proven\n\t\tvery useful in compilation of constraint and logic programming\n\t\tlanguages. Unfortunately, existing theoretical frameworks are \n\t\tinherently imprecise. This is because of the way the frameworks\n\t\thandle call and return from an atom evaluation---in effect the \n\t\tsame information may be added twice, leading to a loss of \n\t\tprecision in many description domains. For this reason some \n\t\timplementations use seemingly \\emph{ad hoc} tricks. Here we \n\t\tformalize these tricks and suggest three methods for overcoming\n\t\tthis loss of precision. Experimental and theoretical results \n\t\tindicate that use of these methods leads to more accurate and \n\t\tfaster analyses for little extra implementation effort.},\n keywords = {Abstract interpretation, Logic programming},\n}\n\n\n
@Inproceedings{Kel-Mac-Mar-Son-Stu-Yap_CP95,\n author = {Andrew D. Kelly and \n\t\tAndrew Macdonald and \n\t\tKim Marriott and\n \tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and \n\t\tRoland H. C. Yap},\n title = {An Optimizing Compiler for {CLP(R)}},\n editor = {U. Montanari and F. Rossi},\n booktitle = {Principles and Practice of Constraint Programming---CP'95},\n series = {Lecture Notes in Computer Science},\n volume = {976},\n pages = {222--239},\n publisher = {Springer},\n year = {1995},\n doi = {10.1007/3-540-60299-2_14},\n abstract = {The considerable expressive power and flexibility gained by \n\t\tcombining constraint programming with logic programming\n\t\tis not without cost. Implementations of constraint logic \n\t\tprogramming (CLP) languages must include expensive constraint \n\t\tsolving algorithms tailored to specific domains, such as trees,\n\t\tBooleans, or real numbers. The performance of many current CLP\n\t\tcompilers and interpreters does not encourage the widespread \n\t\tuse of CLP. We outline an optimizing compiler for \n\t\tCLP(${\\cal R}$), a CLP language which extends Prolog by \n\t\tallowing linear arithmetic constraints. The compiler uses \n\t\tsophisticated global analyses to determine the applicability of\n\t\tdifferent program transformations. Two important components of \n\t\tthe compiler, the \\emph{analyzer} and the \\emph{optimizer}, \n\t\twork in continual interaction in order to apply \n\t\tsemantics-preserving transformations to the source program.\n\t\tA large suite of transformations are planned. Currently the \n\t\tcompiler applies three powerful transformations, namely \n\t\t``solver bypass'', ``dead variable elimination'' and\n\t\t``nofail constraint detection''. We explain these optimizations\n\t\tand their place in the overall compiler design and show how \n\t\tthey lead to performance improvements on a set of benchmark \n\t\tprograms.},\n keywords = {Constraint programming, Logic programming, Static analysis, Program transformation, Compilation},\n}\n\n\n
@Inproceedings{Arm-Mar-Sch-Son_SAS94,\n author = {Tania Armstrong and \n\t\tKim Marriott and \n\t\tPeter Schachte and \n\t\tHarald S{\\o}ndergaard},\n title = {{Boolean} Functions for Dependency Analysis:\n\t\tAlgebraic Properties and Efficient Representation},\n editor = {B. {Le Charlier}},\n booktitle = {Static Analysis: Proceedings of the First International \n\t\tSymposium},\n series = {Lecture Notes in Computer Science},\n volume = {864},\n pages = {266--280},\n publisher = {Springer-Verlag},\n year = {1994},\n doi = {10.1007/3-540-58485-4_46},\n abstract = {Many analyses for logic programming languages use Boolean \n\t\tfunctions to express dependencies between variables or argument\n\t\tpositions. Examples include groundness analysis, arguably the \n\t\tmost important analysis for logic programs, finiteness analysis \n\t \tand functional dependency analysis. We identify two classes of \n\t\tBoolean functions that have been used: positive and definite \n\t\tfunctions, and we systematically investigate these classes and \n\t\ttheir efficient implementation for dependency analyses. We \n\t\tprovide syntactic characterizations and study their algebraic \n\t\tproperties. In particular, we show that both classes are closed\n\t\tunder existential quantification. We investigate representations\n\t\tfor these classes based on: reduced ordered binary decision \n\t\tdiagrams (ROBDDs), disjunctive normal form, conjunctive normal \n\t\tform, Blake canonical form, dual Blake canonical form, and a \n\t\tform specific to definite functions. We give an empirical \n\t\tcomparison of these different representations for groundness \n\t\tanalysis.},\n keywords = {Boolean logic, Abstract domains, Abstract interpretation, Logic programming},\n}\n\n\n
@Article{Mar-Son-Jon_TOPLAS94,\n author = {Kim Marriott and\n\t\tHarald S{\\o}ndergaard and \n\t\tNeil D. Jones},\n title = {Denotational Abstract Interpretation of Logic Programs},\n journal = {ACM Transactions on Programming Languages and Systems},\n volume = {16},\n number = {3},\n pages = {607--648},\n year = {1994},\n doi = {10.1145/177492.177650},\n abstract = {Logic programming languages are based on a principle of \n\t\tseparation of ``logic'' and ``control.'' This means that they\n\t\tcan be given simple model-theoretic semantics without regard to\n\t\tany particular execution mechanism (or proof procedure, viewing\n\t\texecution as theorem proving). While the separation is desirable\n\t\tfrom a semantical point of view, it does, however, make sound, \n\t\tefficient implementation of logic programming languages \n\t\tdifficult. The lack of ``control information'' in programs \n\t\tcalls for complex dataflow analysis techniques to guide \n\t\texecution. Since dataflow analysis furthermore finds extensive \n\t\tuse in error-finding and transformation tools, there is a need \n\t\tfor a simple and powerful theory of dataflow analysis of logic \n\t\tprograms. The present paper offers such a theory, based on \n\t\tF. Nielson's extension of P. and R. Cousot's abstract \n\t\tinterpretation. We present a denotational definition of the \n\t\tsemantics of definite logic programs. This definition is of \n\t\tinterest in its own right because of its compactness.\n\t\tStepwise we develop the definition into a generic dataflow \n\t\tanalysis which encompasses a large class of dataflow analyses\n\t\tbased on the SLD execution model. We exemplify one instance of\n\t\tthe definition by developing a provably correct groundness \n\t\tanalysis to predict how variables may be bound to ground terms \n\t\tduring execution. We also discuss implementation issues and \n\t\trelated work.},\n keywords = {Abstract interpretation, Abstract domains, Groundness analysis, Logic programming},\n}\n\n\n
@Inproceedings{Mar-Son-Stu-Yap_ACSC94,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPeter J. Stuckey and \n\t\tRoland Yap},\n title\t = {Optimizing Compilation for {CLP(R)}},\n editor = {G. Gupta},\n booktitle = {Proceedings of the 17th Australian Computer Science Conference},\n series = {Australian Computer Science Communications},\n volume = {16},\n number = {1},\n pages = {551--560},\n year\t = {1994},\n abstract = {Constraint Logic Programming (CLP) is a recent innovation in\n\t\tprogramming language design. CLP languages extend logic\n\t\tprogramming by allowing constraints from different domains\n\t\tsuch as real numbers or Boolean functions. This gives\n\t\tconsiderable expressive power and flexibility and CLP\n\t\tprograms have proven to be a high-level programming\n\t\tparadigm for applications based on interactive mathematical\n\t\tmodelling. These advantages, however, are not without cost.\n\t\tImplementations of CLP languages must include expensive\n\t\tconstraint solving algorithms tailored to the specific\n\t\tdomains. Indeed, performance of the current generation of\n\t\tCLP compilers and interpreters is one of the main obstacles\n\t\tto the widespread use of CLP. Here we outline the design\n\t\tof a highly optimizing compiler for CLP($\\Re$), a CLP\n\t\tlanguage which extends Prolog by allowing linear arithmetic\n\t\tconstraints. This compiler is intended to overcome the\n\t\tefficiency problems of the current implementation\n\t\ttechnology. The main innovation in the compiler is a\n\t\tcomprehensive suite of program optimizations and associated\n\t\tglobal analyses which determine applicability of each\n\t\toptimization. We describe these optimizations and report\n\t\tvery promising results from preliminary experiments.},\n keywords = {Constraint programming, Logic programming, Compilation, Static analysis},\n}\n\t\t\n\n
@Proceedings{Ses-Son_PEPM94,\n editor = {Peter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n title\t = {PEPM '94: Proceedings of the ACM SIGPLAN Workshop on Partial \n\t\tEvaluation and Semantics-Based Program Manipulation},\n location = {Orlando, Florida},\n year = {1994},\n doi = {10.1007/BF01019002},\n note\t = {Technical Report 94/9, Department of Computer Science, \n\t\tThe University of Melbourne},\n keywords = {Partial evaluation, Program transformation, Static analysis},\n}\n\n\n
@inproceedings{Bak-Son_ACSC93,\n author = {Naomi Baker and \n\t\tHarald S{\\o}ndergaard},\n title = {Definiteness Analysis for {CLP(R)}},\n editor = {G. Gupta and G. Mohay and R. Topor},\n booktitle = {Proceedings of the Sixteenth Australian Computer Science \n\t\tConference},\n series = {Australian Computer Science Communications},\n volume = {15},\n number = {1},\n pages = {321--332},\n year = {1993},\n abstract = {Constraint logic programming (CLP) languages generalise\n\t\tlogic programming languages, amalgamating logic programming\n\t\tand constraint programming. Combining the best of two\n\t\tworlds, they provide powerful tools for wide classes\n\t\tof problems. As with logic programming languages, code\n\t\toptimization by compilers is an important issue in the\n\t\timplementation of CLP languages. A compiler needs\n\t\tsophisticated global information, collected by dataflow\n\t\tanalyses, to generate competitive code.\n\t\tOne kind of useful dataflow information concerns the point\n\t\tat which variables become definite, that is, constrained\n\t\tto take a unique value. In this paper we present a very\n\t\tprecise dataflow analysis to determine definiteness, and we\n\t\tdiscuss its applications. By separating the two concerns:\n\t\tcorrectness and implementation techniques, abstract\n\t\tinterpretation enables us to develop a sophisticated\n\t\tdataflow analysis in a straightforward manner, in fact in\n\t\ta framework where the correctness of the analysis is easily\n\t\testablished---a feature which is uncommon when complex\n\t\tanalyses are developed in an ad hoc way.\n\t\tWe use a class of Boolean functions, the positive functions,\n\t\tto represent the definiteness relationship between variables.\n\t\tA Boolean function is interpreted as expressing a relation\n\t\twhich holds not simply at the given point in an evaluation,\n\t\tbut in fact during the rest of the evaluation branch.\n\t\tThe nature of variables in a CLP language makes this\n\t\ttreatment both possible and natural.},\n keywords = {Constraint logic programming, Boolean logic, Abstract interpretation},\n}\n\n\n
@Inproceedings{Mar-Son_ILPSworkshop93,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {On Propagation-Based Analysis of Logic Programs},\n editor = {S. Michaylov and W. Winsborough},\n booktitle = {Proceedings of the ILPS 93 Workshop on Global Compilation,\n\t\t\tVancouver, Canada},\n pages = {47--65},\n year\t = {1993},\n abstract = {Notions such as ``reexecution'' and ``propagation'' have recently\n\t\tattracted attention in dataflow analysis of logic programs. \n\t\tBoth techniques promise more accurate dataflow analysis without \n\t\trequiring more complex description domains. Propagation, \n\t\thowever, has never been given a formal definition. It has \n\t\ttherefore been difficult to discuss properties such as \n\t\tcorrectness, precision, and termination of propagation. \n\t\tWe suggest a definition of propagation. Comparing \n\t\tpropagation-based analysis with the more conventional\n\t\tapproach based on abstract interpretation, we find that \n\t\tpropagation involves a certain inherent loss of precision \n\t\twhen dataflow analyses are based on description domains\n\t\twhich are not ``downwards closed'' (including mode analysis).\n\t\tIn the archetypical downwards closed case, groundness \n\t\tanalysis, we contrast approaches using Boolean functions as\n\t\tdescriptions with those using propagation or reexecution.},\n keywords = {Abstract interpretation, Boolean logic, Logic programming, Mode analysis, Groundness analysis},\n}\n\n\n
@Article{Mar-Son_LOPLAS93,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {Precise and Efficient Groundness Analysis for Logic Programs},\n journal = {ACM Letters on Programming Languages and Systems},\n volume = {2},\n number = {1--4},\n pages\t = {181--196},\n year\t = {1993},\n doi = {10.1145/176454.176519},\n abstract = {We show how precise groundness information can be extracted from\n\t\tlogic programs. The idea is to use abstract interpretation with\n\t\tBoolean functions as ``approximations'' to groundness \n\t\tdependencies between variables. This idea is not new, and \n\t\tdifferent classes of Boolean functions have been used. We argue,\n\t\thowever, that one class, the \\emph{positive} functions, is more \n\t\tsuitable than others. Positive Boolean functions have a certain\n\t\tproperty which we (inspired by A. Langen) call ``condensation.''\n\t\tThis property allows for rapid computation of groundness \n\t\tinformation.},\n keywords = {Abstract interpretation, Abstract domains, Logic programming, Boolean logic, Groundness analysis},\n}\n\n\n
@Article{Mar-Son_NGC93,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {Difference-List Transformation for {Prolog}},\n journal = {New Generation Computing},\n volume = {11},\n number = {2},\n pages = {125--157}, \n year\t = {1993},\n doi = {10.1007/BF03037156},\n abstract = {Difference-lists are terms that represent lists.\n\t\tThe use of difference-lists can speed up most list-processing \n\t\tprograms considerably. Prolog programmers routinely use \n\t\t``difference-list versions'' of programs, but very little \n\t\tinvestigation has taken place into difference-list \n\t\ttransformation. Thus, to most programmers it is either \n\t\tunknown that the use of difference-lists is far from safe in \n\t\tall contexts, or else this fact is known but attributed to \n\t\tProlog's infamous ``occur check problem.'' In this paper we \n\t\tstudy the transformation of list-processing programs into \n\t\tprograms that use difference-lists. In particular we are \n\t\tconcerned with finding circumstances under which the \n\t\ttransformation is safe. We show that dataflow analysis can \n\t\tbe used to determine whether the transformation is applicable \n\t\tto a given program, thereby allowing for automatic \n\t\ttransformation. We prove that our transformation preserves \n\t\tstrong operational equivalence.},\n keywords = {Program transformation, Static analysis, Logic programming, Prolog},\n}\n\n\n
@Article{Mar-Son_JLP92,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {Bottom-Up Dataflow Analysis of Normal Logic Programs},\n journal = {Journal of Logic Programming},\n volume = {13},\n number = {2 \\& 3},\n pages\t = {181--204}, \n year\t = {1992},\n doi = {10.1016/0743-1066(92)90031-W},\n abstract = {A theory of semantics-based dataflow analysis using a notion of\n\t\t``insertion'' is presented. This notion relaxes the Galois \n\t\tconnections used in P. and R. Cousot's theory of abstract \n\t\tinterpretation. The aim is to obtain a firm basis for the \n\t\tdevelopment of dataflow analyses of normal logic programs.\n\t\tA dataflow analysis is viewed as a non-standard semantics that\n\t\tapproximates the standard semantics by manipulating descriptions\n\t\tof data objects rather than the objects themselves. A Kleene \n\t\tlogic-based semantics for normal logic programs is defined, \n\t\tsimilar to Fitting's $\\Phi_P$ semantics. This provides the \n\t\tneeded semantic base for ``bottom-up'' dataflow analyses. \n\t\tSuch analyses give information about the success and failure \n\t\tsets of a program. A major application of bottom-up analysis \n\t\tis therefore type inference. We detail a dataflow analysis \n\t\tusing descriptions similar to Sato and Tamaki's depth $k$ \n\t\tabstractions and another using Marriott, Naish and Lassez's\n\t\t``singleton'' abstractions. We show that both are sound with \n\t\trespect to our semantics and outline various uses of the \n\t\tanalyses. Finally we justify our choice of semantics by showing\n\t\tthat it is the most abstract of a number of possible semantics.\n\t\tThis means that every analysis based on our semantics is \n\t\tcorrect with respect to these other semantics, including \n\t\tKunen's semantics, SLDNF resolution, and the common (sound) \n\t\tProlog semantics.},\n keywords = {Abstract interpretation, Logic programming, Many-valued logic, Depth k analysis, Semantics},\n}\n\n\n
@Article{Son-Ses_CJ92,\n author = {Harald S{\\o}ndergaard and \n\t\tPeter Sestoft},\n title = {Non-Determinism in Functional Languages},\n journal = {The Computer Journal},\n volume = {35},\n number = {5},\n pages\t = {514--523},\n year\t = {1992},\n doi = {10.1093/comjnl/35.5.514},\n abstract = {The introduction of a non-deterministic operator in even a\n\t\tvery simple functional programming language gives rise to\n\t\ta plethora of semantic questions. These questions are not \n\t\tonly concerned with the choice operator itself. A\n\t\tsurprisingly large number of different parameter passing\n\t\tmechanisms are made possible by the introduction of bounded\n\t\tnon-determinism. The diversity of semantic possibilities\n\t\tis examined systematically using denotational definitions\n\t\tbased on mathematical structures called powerdomains. This\n\t\tresults in an improved understanding of the different kinds\n\t\tof non-determinism and the properties of different kinds of\n\t\tnon-deterministic languages.},\n keywords = {Programming language concepts, Nondeterminism, Functional programming, Semantics},\n}\n\n\n
@Inproceedings{Cre-Mar-Son_AI90,\n author = {Roberto Cremonini and \n\t\tKim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {A General Theory for Abstraction},\n editor = {C. P. Tsang},\n booktitle = {AI '90: Proceedings of the Fourth Australian Joint Conference on \n\t\t\tArtificial Intelligence}, \n pages = {121--134},\n publisher = {World Scientific Publ.},\n year\t = {1990},\n abstract = {Abstraction is the process of mapping one representation of \n\t\ta problem into another representation so as to simplify \n\t\treasoning while preserving the essence of the problem.\n\t\tIt has been investigated in many different areas of computer\n\t\tscience. In artificial intelligence, abstraction is used in\n\t\tautomated theorem proving, problem solving, planning, common\n\t\tsense reasoning and qualitative reasoning. In dataflow \n\t\tanalysis of programs, it has been formalized in the theory \n\t\tof abstract interpretation. However, the possible \n\t\tconnections between research on abstraction in artificial \n\t\tintelligence and dataflow analysis have gone unnoticed until\n\t\tnow. This paper investigates these connections and\n\t\tprovides a general theory of abstraction for artificial \n\t\tintelligence. In particular, we present a method for \n\t\tbuilding an ``optimal'' abstraction and give general \n\t\tsufficient conditions for the abstraction to be ``safe.''\n\t\tThe usefulness and generality of our theory is illustrated \n\t\twith a number of example applications and by comparisons \n\t\twith related work.},\n keywords = {Abstract interpretation, Artificial intelligence},\n}\n\n\n
@Inproceedings{Mar-Son-Dar_NACLP90,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard and \n\t\tPhilip Dart},\n title\t = {A Characterization of Non-floundering Logic Programs},\n editor = {S. Debray and M. Hermenegildo},\n booktitle = {Logic Programming: \n\t\tProceedings of the 1990 North American Conference},\n pages\t = {661--680},\n publisher = {MIT Press}, \n year\t = {1990},\n abstract = {SLDNF resolution provides an operational semantics which\n\t\t(unlike many Prolog implementations of negation) is both \n\t\tcomputationally tractable and sound with respect to a \n\t\tdeclarative semantics. However, SLDNF is not complete, and \n\t\tit achieves soundness only by discarding certain queries to \n\t\tprograms because they ``flounder.''\n\t\tWe present dataflow analyses that for a given normal logic \n\t\tprogram detects (1) whether naive Prolog-style handling of \n\t\tnegation is sound and (2) whether SLDNF resolution avoids \n\t\tfloundering. The analyses are presented in a \n\t\tlanguage-independent framework of abstract interpretation based\n\t\ton denotational definitions. To our knowledge they are more \n\t\tprecise than any of their kind. In particular we obtain a \n\t\tcharacterization of a class of non-floundering programs larger\n\t\tthan any other that we know of.},\n keywords = {Floundering analysis, Logic programming, Prolog},\n}\n\n\n
@Inproceedings{Mar-Son_GULP90,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {Abstract Interpretation of Logic Programs:\n\t\t\tThe Denotational Approach},\n editor = {A. Bossi},\n booktitle = {Proceedings of the Fifth Italian Conference on Logic Programming},\n pages = {399--425},\n organization = {Gruppo Ricercatori ed Utenti di Logic Programming,\n\t\t\tPadova, Italy}, \n year\t = {1990},\n note\t = {Invited paper},\n abstract = {Logic programming languages are based on a principle of \n\t\tseparation of ``logic'' and ``control.'' This means that \n\t\tthey can be given simple model-theoretic semantics\n\t\twithout regard to any particular execution mechanism.\n\t\tWhile this is desirable from a semantical point of view, \n\t\tit does make sound, efficient implementation of logic \n\t\tprogramming languages difficult. The lack of ``control \n\t\tinformation'' in programs calls for complex dataflow\n\t\tanalyses to guide execution. Since dataflow analysis also \n\t\tfinds extensive use in error-finding and transformation \n\t\ttools, there is a need for a simple and powerful theory\n\t\tof dataflow analysis of logic programs.\n\t\tWe offer such a theory, inspired by F. Nielson's extension\n\t\tof P. and R. Cousot's \\emph{abstract interpretation}.\n\t\tWe then present a denotational definition of the semantics \n \t\tof definite logic programs. This definition is of interest \n\t\tin its own right because of its novel application of \n\t\t\\emph{parametric substitutions} which makes it very compact\n\t\tand yet transparent. Stepwise we develop the definition \n\t\tinto a generic dataflow analysis which encompasses a large \n\t\tclass of dataflow analyses based on the SLD execution model.\n\t\tWe exemplify one instance of the definition by developing a \n\t\tprovably correct groundness analysis to predict how \n\t\tvariables may be bound to ground terms during execution.\n\t\tWe also discuss related work and other applications.},\n keywords = {Abstract interpretation, Abstract domains, Groundness analysis, Logic programming},\n}\n\n\n
@Inproceedings{Mar-Son_NACLP90,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {Analysis of Constraint Logic Programs},\n editor = {S. Debray and M. Hermenegildo},\n booktitle = {Logic Programming: \n\t\tProceedings of the 1990 North American Conference},\n pages\t = {531--547},\n publisher = {MIT Press}, \n year\t = {1990},\n abstract = {Increasingly, compilers for logic programs are making use \n\t\tof information from dataflow analyses to aid in the generation\n\t\tof efficient target code. However, generating efficient target\n\t\tcode from constraint logic programs is even more difficult than\n\t\tfrom logic programs. Thus we feel that good compilation of\n\t\tconstraint logic programs will require sophisticated analysis \n\t\tof the source code. Here we present a simple framework for\n\t\tthe abstract interpretation of constraint logic programs\n\t\twhich is intended to serve as the basis for developing such \n\t\tanalyses. We show the framework's practical usefulness by \n\t\tsketching two dataflow analyses that are based on it\n\t\tand indicating how they can be used to improve the code \n\t\tgenerated by a compiler.},\n keywords = {Constraint programming, Logic programming, Abstract interpretation, Semantics, Compilation},\n}\n\n\n
@Article{Son-Ses_ACTA90,\n author = {Harald S{\\o}ndergaard and \n\t\tPeter Sestoft},\n title\t = {Referential Transparency, Definiteness and Unfoldability},\n journal = {Acta Informatica},\n volume = {27},\n number = {6},\n pages\t = {505--517},\n year\t = {1990},\n note\t = {Reviewed in Computing Reviews 32(3): 144--145, 1991},\n doi = {10.1007/BF00277387},\n abstract = {The term ``referential transparency'' is frequently used\n\t\tto indicate that a programming language has certain useful\n\t\tproperties. We observe, however, that the formal and\n\t\tinformal definitions given in the literature are not\n\t\tequivalent and we investigate their relationships.\n\t\tTo this end, we study the definitions in the context of\n\t\ta simple expression language and show that in the presence\n\t\tof non-determinism, the differences are manifest. We \n\t\tpropose a definition of ``referential transparency'', based\n\t\ton Quine's, as well as of the related notions: definiteness\n\t\tand unfoldability. We demonstrate that these notions are\n\t\tuseful to characterize programming languages.},\n keywords = {Programming language concepts, Nondeterminism, Semantics},\n}\n\n\n
@Article{Jon-Ses-Son_LASC89,\n author = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n title = {Mix: A Self-Applicable Partial Evaluator for Experiments in\n\t\tCompiler Generation},\n journal = {Lisp and Symbolic Computation},\n volume = {2},\n number = {1},\n pages = {9--50},\n year = {1989},\n doi = {10.1007/BF01806312},\n abstract = {The program transformation principle called partial \n\t\tevaluation has interesting applications in compilation\n\t\tand compiler generation. Self-applicable partial evaluators\n\t\tmay be used for transforming interpreters into corresponding\n\t\tcompilers and even for the generation of compiler generators.\n\t\tThis is useful because interpreters are significantly easier\n\t\tto write than compilers, but run much slower than compiled\n\t\tcode. A major difficulty in writing compilers (and compiler\n\t\tgenerators) is the thinking in terms of distinct binding\n\t\ttimes: run time and compile time (and compiler generation\n\t\ttime). The paper gives an introduction to partial\n\t\tevaluation and describes a fully automatic though \n\t\texperimental partial evaluator, called mix, able to generate\n\t\tstand-alone compilers as well as a compiler generator.\n\t\tMix partially evaluates programs written in Mixwell,\n\t\tessentially a first-order subset of statically scoped pure\n\t\tLisp. For compiler generation purposes it is necessary\n\t\tthat the partial evaluator be self-applicable. Even though\n\t\tthe potential utility of a self-applicable partial evaluator\n\t\thas been recognized since 1971, a 1984 version of mix\n\t\tappears to be the first successful implementation.\n\t\tThe overall structure of mix and the basic ideas behind its\n\t\tway of working are sketched. Finally, some results of using\n\t\ta version of mix are reported.},\n keywords = {Partial evaluation, Program transformation, Compilation},\n}\n\n\n
@Inproceedings{Mar-Son_IFIP89,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title = {Semantics-Based Dataflow Analysis of Logic Programs},\n editor = {G. X. Ritter},\n booktitle = {Information Processing 89},\n pages = {601--606},\n publisher = {North-Holland},\n year = {1989},\n abstract = {The increased acceptance of Prolog has motivated widespread \n\t \tinterest in the semantics-based dataflow analysis of logic \n\t\tprograms and a number of different approaches have been \n\t\tsuggested. However, the relationships between these \n\t\tapproaches are not clear. The present paper provides a \n\t\tunifying introduction to the approaches by giving novel \n\t\tdenotational semantic definitions which capture their \n\t\tessence. In addition, the wide range of analysis tools \n\t\tsupported by semantics-based dataflow analysis are discussed.},\n keywords = {Logic programming, Prolog, Static analysis, Semantics},\n}\n\n\n
@Article{Mar-Son_occur89,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title = {On {Prolog} and the Occur Check Problem},\n journal = {SIGPLAN Notices},\n volume = {24},\n number = {5},\n pages = {76--82},\n year = {1989},\n doi = {10.1145/66068.66075},\n abstract = {It is well known that omission of the occur check in \n\t\tunification leads to unsound Prolog systems. Nevertheless,\n\t \tmost Prolog systems omit the occur check because this\n\t\tmakes unification much faster and unsoundness allegedly\n\t\tseldom manifests itself. We revisit the occur check\n\t\tproblem and point to two aspects that have previously \n\t\treceived no attention. Firstly, ``unification without the\n\t\toccur check'' is ambiguous, and in practice, Prolog systems\n\t\tvary markedly in their reaction to programs having occur\n\t\tcheck problems. Secondly, even very simple program\n\t\ttransformations are unsafe for pure Prolog when the occur\n\t\tcheck is omitted. We conclude that the occur check problem\n\t\tis important, and in particular, that the current effort to\n\t\tstandardize Prolog should address it.},\n keywords = {Logic programming, Prolog, Unification},\n}\n\n\n
@Unpublished{Mar-Son_tutorial89,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title = {Notes for a Tutorial on Abstract Interpretation of Logic \n\t\tPrograms},\n year = {1989},\n note = {Tutorial Notes for the 1989 North American Conference on \n\t\tLogic Programming. Assocation for Logic Programming, 1989},\n keywords = {Abstract interpretation, Abstract domains, Logic programming, Unification, Semantics, Groundness analysis},\n}\n\n\n
@Phdthesis{Son_phd89,\n author = {Harald S{\\o}ndergaard},\n title\t = {Semantics-Based Analysis and Transformation of Logic Programs},\n school = {University of Copenhagen, Denmark},\n year\t = {1989},\n note\t = {Also available as: Technical Report 89/21,\n\t\tDepartment of Computer Science, \n\t\tThe University of Melbourne, 1990,\n\t\tDIKU Report 89/22, Department of Computer Science, \n\t\tUniversity of Copenhagen, 1990,\n\t\tand Technical Report \\#12,\n\t\tKey Center for Knowledge Based Systems,\n\t\tRMIT and the University of Melbourne, 1990},\n abstract = {Dataflow analysis is an essential component of many programming \n\t\ttools. One use of dataflow information is to identify errors in\n\t\ta program, as done by program ``debuggers'' and type checkers.\n\t\tAnother is in compilers and other program transformers, where \n\t\tthe analysis may guide various optimisations.\n\n\t\tThe correctness of a programming tool's dataflow analysis \n\t\tcomponent is usually of paramount importance. The theory of \n\t\tabstract interpretation, originally developed by P. and R. \n\t\tCousot aims at providing a framework for the development of \n\t\tcorrect analysis tools. In this theory, dataflow analysis is \n\t\tviewed as ``non-standard'' semantics, and abstract \n\t\tinterpretation prescribes certain relations between standard\n\t\tand non-standard semantics, in order to guarantee the \n\t\tcorrectness of the non-standard semantics with respect to the \n\t\tstandard semantics.\n\n\t\tThe increasing acceptance of Prolog as a practical programming \n\t\tlanguage has motivated widespread interest in dataflow analysis\n\t\tof logic programs and especially in abstract interpretation.\n\t\tLogic programmming languages are attractive from a semantical \n\t\tpoint of view, but dataflow analysis of logic programs is more \n\t\tcomplex than that of more traditional programming languages, \n\t\tsince dataflow is bi-directional (owing to unification) and \n\t\tcontrol flow is in terms of backtracking.\n\n\t\tThe present thesis is concerned with semantics-based dataflow \n\t\tanalysis of logic programs. We first set up a theory for \n\t\tdataflow analysis which is basically that of abstract \n\t\tinterpretation as introduced by P. and R. Cousot. We do, \n\t\thowever, relax the classical theory of abstract interpretation\n\t\tsomewhat, by giving up the demand for (unique) \\emph{best} \n\t\tapproximations.\n\n\t\tTwo different kinds of analysis of logic programs are then \n\t\tidentified: a \\emph{bottom-up} analysis yields an approximation\n\t\tto the success set (and possibly the failure set) of a program,\n\t\twhereas a \\emph{top-down} analysis yields an approximation to \n\t\tthe call patterns that would appear during an execution based \n\t\ton SLD resolution. We investigate some of the uses of the two \n\t\tkinds of analysis. In the bottom-up case, we pay special \n\t\tattention to (bottom-up) type inference and its use in program \n\t\tspecialisation. In the top-down case, we present a generic \n\t\t``dataflow semantics'' that encompasses many useful dataflow \n\t\tanalyses. As an instance of the generic semantics, a groundness\n\t\tanalysis is detailed, and a series of other applications are \n\t\tmentioned.\n\n\t\tWe finally present a transformation technique, based on \n\t\ttop-down analysis, for introducing difference-lists in a \n\t\tlist-manipulating program, without changing the program's \n\t\tsemantics. This may lead to substantial improvement in a \n\t\tprogram's execution time.},\n keywords = {Abstract interpretation, Abstract domains, Logic programming, \n\t \tSemantics, Many-valued logic, Unification, Groundness analysis,\n\t\tProgram transformation},\n}\n\n\n
@Inproceedings{Mar-Son_ICSC88,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title = {Prolog Program Transformation by Introduction of \n\t\tDifference Lists},\n booktitle = {Proceedings of the International Computer Science \n\t\tConference '88},\n pages = {206--213},\n publisher = {IEEE Computer Society},\n location = {Hong Kong},\n year = {1988},\n keywords = {Program transformation, Logic programming, Prolog},\n}\n\n\n
@Inproceedings{Mar-Son_ICSLP88,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title\t = {Bottom-Up Abstract Interpretation of Logic Programs},\n editor = {R. A. Kowalski and K. A. Bowen},\n booktitle = {Logic Programming: Proceedings of the Fifth International \n\t\tConference and Symposium},\n pages\t = {733--748},\n publisher = {MIT Press}, \n year\t = {1988},\n abstract = {Abstract interpretation is useful in the design and \n\t\tverification of dataflow analyses. Given a programming\n\t\tlanguage and a fixpoint characterization of its semantics,\n\t\tabstract interpretation enables the explicit formulation\n\t\tof the relation between a dataflow analysis and the \n\t\tsemantics. A key notion in this is that of a ``collecting''\n\t\tsemantics---an extension of the semantics that attaches\n\t\tinformation about run-time behaviour to program points.\n\t \tDifferent collecting semantics lead to different types of\n\t\tdataflow analysis. We present a novel collecting semantics \n\t\tfor normal logic programs. It is based on a Fitting-style \n\t\tbottom-up semantics and forms a firm semantic base for\n\t\tbottom-up dataflow analyses. One such analysis, using\n\t\tdescriptions similar to Sato and Tamaki's depth k\n\t\tabstractions, is detailed and shown to be sound with \n\t\trespect to the collecting semantics. Its use in program\n\t\tspecialization and termination analysis is demonstrated.},\n keywords = {Abstract interpretation, Logic programming, Many-valued logic, Depth k analysis, Program transformation, Semantics, Termination analysis},\n}\n\n\n
@Techreport{Mar-Son_success_patterns88,\n author = {Kim Marriott and \n\t\tHarald S{\\o}ndergaard},\n title = {On Describing Success Patterns of Logic Programs},\n number = {88/12},\n institution = {Department of Computer Science, The University of Melbourne},\n year = {1988},\n abstract = {Sato and Tamaki's depth $k$ abstractions are reviewed, and their\n\t\tuse as (finite) descriptions of (infinite) sets of expressions\n\t\t(terms and atomic formulas) is discussed. This leads to an\n\t\tinvestigation of other classes of expressions. A certain\n\t\tproperty of a class is identified as being sufficient for the\n\t\texistence of a Galois insertion from the class to the set of\n\t\tall expressions, and thus for abstract interpretation to be\n\t\tapplicable. Sato and Tamaki's class does not have the property,\n\t\tbut it is shown how it can be extended to a class having the\n\t\tproperty while still remaining finite.},\n keywords = {Abstract interpretation, Abstract domains, Depth k analysis},\n}\n\n\n
@Article{Ses-Son_pebibl88,\n author = {Peter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n title = {A Bibliography on Partial Evaluation},\n journal = {SIGPLAN Notices},\n volume = {23},\n number = {2},\n pages = {19--27},\n year = {1988},\n doi = {10.1145/43908.43910},\n abstract = {This note gives a short introduction to partial evaluation \n\t\tand an extensive, annotated bibliography to the field. \n\t\tThe bibliography is restricted to cover public-domain \n\t\tliterature in English.},\n keywords = {Partial evaluation},\n}\n\n\n
@Incollection{Jon-Ses-Son_mix-abstract87,\n author = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n title = {Mix: A Self-Applicable Partial Evaluator for Experiments in\n\t\tCompiler Generation---Extended Abstract},\n editor = {M. Main et al.},\n booktitle = {Mathematical Foundations of Programming Language Semantics},\n series = {Lecture Notes in Computer Science},\n volume = {298},\n publisher = {Springer-Verlag},\n pages = {386--413},\n year = {1987},\n doi = {10.1007/3-540-19020-1_21},\n keywords = {Partial evaluation, Program transformation, Compilation},\n}\n\n\n
@Incollection{Jon-Son_absint87,\n author = {Neil D. Jones and \n\t\tHarald S{\\o}ndergaard},\n title = {A Semantics-Based Framework for the Abstract Interpretation of \n\t\t{Prolog}},\n editor = {S. Abramsky and C. Hankin},\n booktitle = {Abstract Interpretation of Declarative Languages},\n chapter = {6},\n pages = {123--142},\n publisher = {Ellis Horwood},\n year = {1987},\n keywords = {Abstract interpretation, Logic programming, Prolog, Groundness analysis, Sharing analysis},\n}\n\n\n
@Inproceedings{Son_ESOP86,\n author = {Harald S{\\o}ndergaard},\n title = {An Application of Abstract Interpretation of Logic Programs:\n\t\tOccur Check Reduction},\n editor = {B. Robinet and R. Wilhelm},\n booktitle = {Proceedings of the European Symposium on Programming---ESOP 86},\n series = {Lecture Notes in Computer Science},\n volume = {213},\n pages = {327--338},\n publisher = {Springer-Verlag},\n year = {1986},\n doi = {10.1007/3-540-16442-1_25},\n abstract = {The occur check in Robinson unification is superfluous in most \n\t\tunifications that take place in practice. The present paper is \n\t\tconcerned with the problem of determining circumstances under \n\t\twhich the occur may safely be dispensed with. The method given \n\t\tdraws on one outlined by David Plaisted. The framework, \n\t\thowever, differs in that we systematically apply the abstract \n\t\tinterpretation principle to logic programs. The aim is to give\n\t\ta clear presentation and to facilitate justification of the \n\t\tsoundness of the method.},\n keywords = {Abstract interpretation, Logic programming, Prolog, Unification},\n}\n\n\n
@Incollection{Jon-Ses-Son_RTA85,\n author = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n title = {An Experiment in Partial Evaluation:\n\t\t\tThe Generation of a Compiler Generator},\n editor = {J.-P. Jouannaud},\n booktitle = {Rewriting Techniques and Applications},\n series = {Lecture Notes in Computer Science},\n volume = {202},\n pages = {124--140},\n publisher = {Springer-Verlag},\n year = {1985},\n note = {Reviewed in Computing Reviews 27(7): 353, 1986},\n doi = {10.1007/3-540-15976-2_6},\n keywords = {Partial evaluation, Program transformation, Compilation},\n}\n\n\n
@Article{Jon-Ses-Son_SIGPLANN85,\n author = {Neil D. Jones and \n\t\tPeter Sestoft and \n\t\tHarald S{\\o}ndergaard},\n title = {An Experiment in Partial Evaluation: The Generation of a \n\t\tCompiler Generator (Extended Abstract)},\n journal = {SIGPLAN Notices},\n volume = {20},\n number = {8},\n pages = {82--87},\n year = {1985},\n doi = {10.1145/988346.988358},\n keywords = {Partial evaluation, Program transformation, Compilation},\n}\n\n\n