<script src="https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fimuntean%40gmail.com&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fimuntean%40gmail.com");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fzotero-mypublications%2Fimuntean%40gmail.com"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@misc{munteanMoralCoherentismAge2024, title = {Moral coherentism in the age of {Artificial} {Intelligence}. {A} pattern-based project in machine moral learning}, copyright = {All rights reserved. Do not quote without permission}, abstract = {Abstract The current project focuses on models of ‘artificial moral learning’ (as a type of moral cognition) and ‘moral coherentism.’ It clarifies how artificial moral agency sheds light on some meta-ethical questions in the coherentism framework (Brink, Dorsey, Lynch, Sayre-McCord). Data in artificial moral cognition is assumed to be divided into two subspaces, moral and factual, and to contain complex, machine-learnable patterns. Inspired by Lynch’s ‘moral concordance,’ some schematic models of this type of two-dimensional data are proposed and assessed. The last, more comprehensive model is premised on the theoretical concept of ‘distributed concordance’ over a population of artificial moral agents. The paper concludes that coherentism, when generalized to machine ethics and artificial moral learning, has some advantages over foundationalist or reliabilist approaches in meta-ethics.}, author = {Muntean, Ioan}, year = {2024}, note = {To appear in American Philosophical Quarterly, volume 61, issue 2. Preprint available on demand.}, }
@unpublished{noauthor_facts-values_2024, address = {UTRGV}, title = {Facts-values, apriorism, coherentism, naturalism: reassessing philosophical dichotomies in the age of {AI}}, abstract = {In this talk, I argue that the Fact-Value (V/F) dichotomy, in a modified version, can be used to develop AI models in “machine ethics” and philosophical approaches to AI. More generally, it is reviewed here whether (a) philosophy's history and some celebrated results within are relevant to the current development of AI and ML (machine learning). I also reflect on (b) how AI architectures can be constructed to reflect what philosophers have discussed for centuries. In the line of (a) and recent literature on the philosophy of AI (Buckner, Magnani, Humphreys, Chalmers, etc.), I evaluate the way canonical dichotomies from philosophy help understand better current progress and limitations in AI and ML. For (b) I focus on a modified F/V dichotomy and how it can be reconstructed in the age of AI and used to design machines that may develop “moral cognition.” The roots of the V/F dichotomy, its dismissal, and the connection to the analytic-synthetic distinction are also explored as potential components of ‘philosophical models’ of AI.}, language = {2. Philosophy of computation}, year = {2024}, }
@unpublished{ScientificProgressFunction2023, title = {Scientific progress as a function of creativity: the ‘distributive’ approach}, copyright = {All rights reserved}, abstract = {This paper aims to show how creativity, interpreted as a distributive process, contributes to the progress of a scientific discipline D. First, we show that two mainstream approaches to scientific progress (qua advancement of knowledge and qua problem-solving) both imply the interaction with an epistemic environment. Second, we interpret creativity as a distributed property over a solution to a problem (or an idea, proof, etc.) and multiple epistemic environments. Progress of D is both constrained and boosted by distributed creativity of D. In this functional interpretation, creativity operates as a “feedback loop” (both positive or negative) within the dynamics of a scientific discipline D. The conclusion is that, given the distributive nature of progress and creativity, the latter plays a functional role in the dynamics of the former.}, language = {1. Philosophy of science}, month = may, year = {2024}, }
@techreport{noauthor_mechanisms_2024, title = {Mechanisms “all the way down”? {Review} article of {Mechanisms} in {Physics} and {Beyond} by {B}. {Falkeburg} and {G}. {Schiemann} (editors)}, copyright = {All rights reserved}, year = {2024}, note = {Published in Philosophical Problems in Science (ZFN), Poland}, }
@unpublished{ExplanationRepresentationComputational2022a, title = {Explanation without representation in computational models: the machine learning case}, copyright = {All rights reserved}, language = {1. Philosophy of science}, month = jun, year = {2024}, note = {Presented at APA Pacific 2022}, }
@unpublished{munteanAlgorithmicExplanationsMachine2023, title = {Algorithmic explanations in machine learning: in search for explananda}, copyright = {All rights reserved}, abstract = {This paper evaluates the explanatory power of a class of machine-learning algorithms (MLAs) when they are used on Big-Data datasets. By acknowledging that as powerful categorization and classification tools, MLAs discover patterns in data (rather than properties of real systems or processes), this paper investigates whether MLA explain something at all, without representing a target. The upshot of this paper is to accept that MLAs explain second- or higher-order properties of Big Data. Based on an analogy between MLA and ensembles of scientific models, this paper considers model explanation as a separate issue from the accuracy of model representation. Similar to some “non-representational” models (e.g. “minimal models”, “exploratory models” etc.), some MLA can explain features present in Big Data, without representing reality or a real system. Overall, the paper argues that MLAs do not offer ‘how-actually’ explanations of real-world targets but answer some “how-possibly” questions about explananda, such as scales of representation, categories of systems, or parameters of theories. Even if MLA do not directly represent a target, they convey information about patterns in data, which are called here “quasi-target systems”. Although MLAs do not directly represent a target system (this failed connection is called “link uncertainty” by E. Sullivan), they can be explanatory (in a weaker sense than typical explanations) because they provide information about an explanandum, in this case quasi-targets. Some possible candidates for MLA explananda are considered here based mainly on the structure of MLA.}, language = {2. Philosophy of computation}, author = {Muntean, Ioan}, year = {2024}, note = {Presented at IACAP 2022 and APA 2023}, }
@unpublished{zotero-19046, title = {Ontic and epistemic explanations: their coexistence, interdependence and interoperability in epistemic environments}, copyright = {All rights reserved}, abstract = {It is now commonly held that there are several ways of conceiving scientific explanation, but the most preeminent are the ontic and the epistemic conceptions (the distinction can be traced back to W. Salmon and A. Coffa). The class of Ontic Conceptions (OC) takes explanations as exhibiting relations (causal, mechanistic, etc.) or structures among real entities in the world (Salmon, Craver, Strevens, Glennan). OC is usually contrasted with and opposed conceptually to the epistemic conception (EC), according to which explanations are epistemic activities of scientists that operate with representations of entities and their relations (the explanandum and explanans together) rather than entities or structures themselves (Bechtel, Bokulich, Illari, Wright, i.a.). Recent literature shows a third trend aiming to diffuse and diminish the difference between OC and EC (Craver, Illari, van Eck, and partially Sheredos) by stating that best explanations fulfill both ontic and epistemic norms. This paper follows this third line of thought of reconciliation. As Sheredos argued, an OC-EC reconciliation can be framed in terms of norms and constraints on explanatory power. Sheredos takes Illari’s and Craver’s reconciliation argument as too strong and shows that epistemic and ontic norms cannot be fulfilled simultaneously. This paper offers another framework in which reconciliation is possible and argues for the coexistence and interdependence of epistemic and ontic types of explanation. It introduces the idea of “epistemic environments” in which activities such as: (i) knowledge-production, (ii) problem-solving and (iii) norm-generation, depend on both OC and EC explanations. In the present account, we need to focus on the outcome of successful explanation as a function operating on epistemic environments. Sheredos and others suggest that the same explanation can display epistemic and ontic phases: the suggestion presented here is to take two or more explanations operating and explaining the same explanandum as functions operating in epistemic environments. We transition from analyzing successful individual explanations to talking about clusters of explanations (both in EC and the OC framework) that successfully transform an epistemic environment. The approach is perspectival, as success is defined here based on scientific aims or standards. The paper concludes by showing this approach’s weaknesses and strengths for some examples of mathematical, mechanistic, and nomological explanations.}, year = {2024}, }
@unpublished{AutonomyIntelligenceArtificial2022a, title = {Autonomy and intelligence of artificial agents: modeling, experimentation and simulation in {AI}}, copyright = {All rights reserved}, language = {2. Philosophy of computation}, month = oct, year = {2023}, note = {Presented at PSX6 in Saint Louis, October 2021}, }
@unpublished{ProbabilisticfunctionalApproachPerspectivism2021a, title = {A probabilistic-functional approach to perspectivism}, copyright = {All rights reserved}, abstract = {2. This paper promotes a functional approach to ‘scientific perspectivism’ (M. Massimi and P. Teller), as applied to scientific models and clarifies why perspectivism entails model pluralism. A functional approach to perspectivism offers a path of reconciliation between model pluralism and scientific realism. The argument consist of these steps: i) revisits issues in contemporary perspectivism and how they relate to models; ii) reconsiders an account of models (Frigg and Nguyen) in which representation accuracy are central; iii) reformulates such an account in functionalist terms; iv) relates scientific perspectives to various representational functions in probabilistic terms; v) characterizes model pluralism as a functionalist feature and vi) discusses model convergence in functionalist terms and the prospect of reconciling realism and pluralism. The paper emphasizes the advantages of a functional/probabilistic approach to perspectivism: ability to formalize convergence of some models and the stability of some models to a change of perspectives.}, language = {1. Philosophy of science}, month = feb, year = {2023}, note = {Presented at ECAP10, August 2020. Potential root ideas for the APA Central 2022 presentation}, }
@unpublished{munteanFunctionalInterventionistApproach2023, title = {A functional and interventionist approach to scientific progress: computational science at work}, copyright = {All rights reserved}, abstract = {A functional and interventionist approach to scientific progress: 1. Abstract We start with a plausible assumption: ‘scientific progress’ integrates philosophy and history of science. We can informally define scientific progress as a process that connects two (or more) stages of a scientific discipline D: it is said that D progresses from stage D1 to stage D2 if the latter improves compared to D1, based on a standard S. Knowing enough about D1 and about D2 is sometimes a daunting task for the historian; understanding what S is supposed to be is for the philosopher to discover. And there is the normative aspect of progress: what does it mean “to improve” a discipline, and what is S? Most philosophers and historians would agree that scientific progress is not an internal process to science, but a complex interplay of economical, educational, professional, societal, and technological dimensions. This paper discusses historical and philosophical aspects of the latter dimension: in what sense is technological advancement a crucial component of scientific progress? If this correlation is granted, we can call it ‘techno-scientific progress.’ It assumes that the interplay between science and technology is adamant to scientific progress. This paper adopts a dual approach to standard S: first functionalist (as “problem-solving” function) and second interventionist. In short, technologies solve problems (or “puzzles”) in science and improve our knowledge about possible, albeit not actual, interventions (mostly causal). One technology and two episodes in science illustrate well this dual nature of progress. The technology that played a vital role in the progress of science is computer science (computational science is the application of computer science to scientific disciplines). This paper investigates in what sense recent scientific progress is correlated to advancement in computation. We take computer simulations and more recently machine learning algorithms as methodological tools used to solve problems in science and to expand our knowledge about possible interventions. To relate scientific progress to computational science and its advancement, we need to clarify a S, the standard of improvement. In the functionalist approach, S is more or less a function, typically a problem-solving function. Progress is obtained when this function is fulfilled (Laudan 1978; Shan 2019). In the epistemic approach, progress is defined as the accumulation of scientific knowledge (Bird 2008). In the present approach, we will use a specific epistemic approach in which knowledge obtained from computational methods is related to interventions. The backbone of the present argument is to suggest that the combination of the functional and epistemic approaches to techno-scientific progress is more accurate both from a descriptive (historical) and normative point of view. It captures relevant episodes in the development of recent scientific disciplines. Scientific progress can be conceptualized in many ways, but in a functionalist vein, one can define it as an effective and efficient way of solving new and old problems: “science progresses just in case successive theories solve more problems than their predecessors” (Laudan 1981). The functionalist approach to progress adopted here retains the role of explanation in clarifying and solving problems in science, as well as it being part of the solution. What does it mean to solve a problem by numerical simulations? Here our approach follows the standard approach in the philosophy of science that considers numerical simulations as solutions to intractable problems (Humphreys 2009; Parker 2009; Winsberg 2010). Numerical simulations are typical problem-solving tools and from a functional point of view, we take them as a component of scientific progress. In respect of interventionism, we define an intervention as a change in the setup of a scientific experiment or observation. According to Bain, scientific progress can be identified with the accumulation of knowledge. We argue in this paper that the advance of knowledge those computational methods bring to science is interventionist in nature: without performing actual experiments computer simulations can inform scientists about possible interventions and their results. An intervention is defined here in a more pragmatic way, as an activity that enables the scientist to change both the initial conditions under which we gain information about a system and the rules (or laws) that govern such a system. This paper concludes with a discussion of two case studies. First, as a clear case of interventionist progress, we offer a short history of the Lazarus project. This project simulated a binary black hole exclusively based on numerical simulation (Baker, Campanelli, and Lousto 2002). The history of this project and its results are discussed in some detail both as a functionalist and interventionist techno-scientific progress. Second, as a case of functionalist techno-scientific progress, the DeepMind project called AlphaFold2 is a cluster of models that can predict the way a protein may fold in 3D when the input is the amino-acid sequence of the protein. It can compete with real experimental methods (X-ray crystallography or cryo-electron microscopy). The problem here (CASP=Critical Assessment of Structure Prediction) is a competition to predict spatial structures of proteins that have been solved using experimental methods, “but for which the structures have not been made public.” (Callaway 2022) In conclusion, we showed how the concept of ‘techno-scientific progress’ integrates (recent) history and the philosophy of science with the advancement in computational science.}, language = {1. Philosophy of science}, author = {Muntean, Ioan}, year = {2023}, note = {Presented at HPS9, 2023 03}, }
@unpublished{muntean_artificial_2023, title = {Artificial creativity and artificial agency in science}, copyright = {All rights reserved}, language = {2. Philosophy of computation}, author = {Muntean, Ioan}, year = {2023}, }
@unpublished{DigitalSelfknowledgeModel2022a, title = {Digital self-knowledge: model building, understanding, explanation of the self in the digital world}, copyright = {All rights reserved}, language = {2. Philosophy of computation}, month = apr, year = {2022}, note = {Presented as an internal talk at UTRGV in April 2022}, }
@unpublished{TrustArtificialAgency2022, title = {Trust and artificial agency: a metacognitive proposal}, copyright = {All rights reserved}, abstract = {This paper argues for a form of trust (called here ‘A-trust’) in artificial agents (e.g. AI, machine learning algorithms, robots, autonomous vehicles, etc.) with a certain degree of autonomy (called ‘artificial autonomous agents’=AAA). Several requirements on A-trust are discussed: trust in the creators (humans, companies, institutions) of the AAA, trust in the science and the technology used in their design, or reliability in the training data. We argue that A-trust is better approached in epistemology as a multi-level concept in which the AAA displays certain cognitive and metacognitive dispositions. What dispositions does the AAA need to be trustworthy to humans? We adopt a form of naturalized virtue epistemology of AAA, emphasizing metacognitive dispositions of AAA important to A-trust. We relate meta cognitivism about trust to recent literature in philosophy, cognitive science, and AI: we discuss the advantages of a multi-level virtue epistemology (Sosa, Greco, Baehr, Carter), some neural models of metacognition (Nelson, Fleming, Timmermans) and relate them to the debate about uncertainty in Machine Learning (McKay, Gal, Ghahramani). Regarding the rationality of A-trust, we sketch a Bayesian model of confidence, error detection, and model uncertainty. The A-trust proposed here requires that AAA can deal in the right way with uncertainty. We emphasize the advantages of a multi-level approach to A-trust and discuss briefly the ethical implications of metacognitive requirements on A-trust.}, language = {2. Philosophy of computation}, year = {2022}, }
@unpublished{PerspectivesModelsFunctional2022a, title = {Perspectives and models: a functional and relational approach. {How} perspectives make models represent}, copyright = {All rights reserved}, abstract = {This paper proposes a functional and relational approach to perspectival realism (PR) a form of realism initially proposed by R. Giere (2006) and B. van Fraassen (2008), and reiterated in new forms by M. Massimi (2016, 2018, 2020) and P. Teller (2018,2020), i.a. What role do perspectives play in the ways in which a model (M) represents a target (T)? We start with the DEKI (delineation, exemplification, key-in, imputation) approach to models as `representation-as' proposed by R. Frigg and J. Nguyen (2016, 2017, 2018) and with a classification of perspectives as used in PR arguments, based on their characteristics and relations (mostly the perspectives qua modeling schemes in Teller and as standards of model assessment in Massimi). We use the DEKI approach and the functional delineation of perspectives to elucidate how scientific perspectives change M's representational function of T. A perspective can select properties of M or, more germane to our approach, it constrains or expands the mapping functions of the DEKI model. Here we focus on the dependence of the ‘key-in’ (K) map (as a central component of the DEKI approach) on a perspective. The main aims of this paper are: (a) to expose a functional and probabilistic role for perspectives in M's representational success and (b) to show how this approach improves the realist component of PR arguments by promoting the idea of ‘perspectival convergence’ of models. We suggest a couple of case studies that exemplify our approach.}, language = {1. Philosophy of science}, month = jan, year = {2022}, note = {a 30+ pages manuscript in preparation; presented at Principia Symposium (Brasil), SIFA 2021 and APA 2022}, }
@unpublished{MetacognitionTrustArtificial2021, title = {Metacognition of trust: artificial agents, science, and {Bayes}}, copyright = {All rights reserved}, abstract = {What do we need to trust non-humans? Is our trust, in this case, rational? This paper analyses different dimensions of trust in artificial agents (especially AI algorithms based on Machine Learning). We focus on some metacognitive requirements that a human trustor H can impose on an artificial agent (AA) as a trustee. The analysis is inspired by an analogy with the acceptance of scientific theories in the absence of direct evidence. We show that similar to the case in which a human H trusts a scientific field (collections of theories, models, disciplines, etc.), we can demand from AA several metacognitive capacities that will make this relationship of trust rational for H. We apply a version of the Bayesian cognitive science to some Machine Learning algorithms and infer that a series of intrinsic features can grant rationality and trust to these special AA agents}, language = {3. Philosophy of cognitive science}, month = jun, year = {2021}, note = {Full paper, about 3000 words long. Available on demand. Presented at CEPE in May 2019.}, }
@unpublished{ConsciousnessDigitalIdentity2021, title = {Consciousness and the digital identity. {A} {Bayesian} proposal}, copyright = {All rights reserved}, abstract = {This is not a full paper, abstract only.}, language = {2 .Philosophy of computation}, month = jan, year = {2021}, }
@unpublished{ScienceHumanismAccuracy2020a, title = {Science and humanism between accuracy and confidence: a plea for a metacognitive approach to trust in science}, copyright = {All rights reserved}, abstract = {Some philosophers claim that science has a privileged epistemic position and that we should trust foremostly science in representing reality, pursuing truth, understanding, and changing the world (see “scientism” J. Ladyman, D. Ross and its criticism by B. Williams i.a.). Humanists may retort and press further on this question: when is it rational to trust science, or more specifically a scientific representation? This paper focuses on the rationality of trust in science (N. Cartwright, N. Oreskes, M. Morrison) and ascertains the role of a certain metacognitive component of rational trust in a scientific representation SR (a model, a theory, a hypothesis). By insisting on the metacognition nature of the rational trust in our best scientific representations, one can enhance the public understanding of science and avoid science-denialism. We discuss (i) accuracy as first-order cognitive feature and (ii) confidence level as a second-order metacognitive property of SR. Confidence is typically associated to a metacognitive ability in humans (or artificial agents): a “second-order” ability to evaluate the representation of a target system (J. Proust, P. Carruthers) or a “high-level knowledge” as a epistemic competence (Sosa, Baehr). In a naturalistic vein, metacognition encodes better the epistemic humbleness of the scientific representation, a feature needed, as the argument goes, in humanism. When the accuracy of SR is complemented (and augmented) with a certain level of confidence in this representation, it is more rational to trust SR and less rational to adopt some form of denialism about SR. We suggest a connection between confidence as a metacognitive ability and the way a scientific model represents (the DEKI approach of R. Frigg \& J. Nguyen), especially with the K ‘mapping’. The conclusion is that by striking the balance between accuracy and confidence of SR, humanists can strengthen the argument about the rationality of trust in science.}, language = {1. Philosophy of science}, month = nov, year = {2020}, note = {Presented at a workshop at U of Miami in November 2020}, }
@unpublished{MetacognitiveApproachTrust2019, title = {A metacognitive approach to trust and a case study: artificial agency}, abstract = {Trust is defined as a belief of a human H (‘the trustor’) about the ability of an agent A (the ‘trustee’) to perform future action(s). We adopt here dispositionalism and internalism about trust: H trusts A iff A has some internal dispositions as competences. The dispositional competences of A are high-level metacognitive requirements, in the line of a naturalized virtue epistemology. (Sosa, Carter) We advance a Bayesian model of two (i) confidence in the decision and (ii) model uncertainty. To trust A, H demands A to be self-assertive about confidence and able to self-correct its own models. In the Bayesian approach trust can be applied not only to humans, but to artificial agents (e.g. Machine Learning algorithms). We explain the advantage the metacognitive trust when compared to mainstream approaches and how it relates to virtue epistemology. The metacognitive ethics of trust is swiftly discussed.}, language = {3. Philosophy of cognitive science}, month = sep, year = {2019}, note = {3000 words manuscript, available on demand}, }
@unpublished{CoherentismDataPatterns2019a, title = {Coherentism, data, and patterns in ethics. {The} machine moral learning case}, copyright = {All rights reserved}, shorttitle = {4. {Applied} ethics: machine ethics}, month = apr, year = {2019}, }
@unpublished{ErrorTractabilityFallibilism2019a, title = {Error, tractability, and fallibilism in machine learning. {One} case study from particle physics}, copyright = {All rights reserved}, language = {2. Philosophy of computation}, month = mar, year = {2019}, }
@unpublished{MoralTechnologicalCognition2019a, title = {Moral and technological cognition: evolution and learning}, copyright = {All rights reserved}, language = {4. Applied ethics: machine ethics}, month = jan, year = {2019}, }
@unpublished{noauthor_can_2019, title = {Can quantum metaphysical indeterminacy be relational? {An} approach based on decoherence}, copyright = {All rights reserved}, abstract = {The topic of quantum metaphysical indeterminacy (QMI) is thriving in the philosophy of physics and in the metaphysics literature. (Skow 2010; Darby 2010; Wolff 2015; but see Glick 2017 for the opposite view; Calosi and Wilson 2018) On some interpretations of quantum mechanics, a quantum system Sq violates the supposition of ‘property value determinacy,’ according to which a property PD of a physical system (which can be an observable) does not have precise values at all times, so it is indeterminate. When SQ is in an eigenstate for some observable, it definitely has the value for that observable; when SQ is not in an eigenstate of an observable, it is indeterminate for each property whether it has the value of the observable. On some other interpretations (most notably, the Bohmian interpretation) there is no quantum indeterminacy. This paper explores an idea suggested in the literature, but not extensively: a relational concept of indeterminacy. In this specific case, we reconstruct indeterminacy as a determinable/determinate distinction of relational indeterminacy. As it is logically evident, such a distinction is available for monadic properties and for relations. (Wolff 2015) Moving from monadic predicates to n-place predicates can illuminate, as the arguments goes, interesting aspects of indeterminacy in which the interactions of a quantum system Sq play a major role in its determinate/indeterminate nature. On a strong reading of the present argument, relational indeterminacy can help reduce the intrinsic, monadic type of QMI, as discussed by the mainstream approach to indeterminacy. A less ambitious task is to accept two ways of talking about indeterminacy, and to suggest a research program in metaphysics on both and on their interconnectedness. One can open a conceptual space for relational quantum indeterminacy at least on these lines of thought: I-01 ‘Spatiotemporal QMI’: The relation of a quantum system SQ with spacetime structures entails the QMI of SQ: The quantum indeterminacy of SQ is conceptually dependent on the ‘indeterminacy’ of the spacetime structures (SPT) in which SQ evolves. I-02 ‘Relativistic QMI’: The quantum indeterminacy of SQ is not a property of SQ, but it is dependent of the frame reference of the observers of SQ. I-03 ‘Decoherent QMI’: The quantum indeterminacy of SQ is conceptually dependent on the indeterminacy of the other system(s) with which SQ interacts (be them other quantum systems, or classical systems). I-04 ‘Intrinsic relational QMI’: The quantum indeterminacy of SQ is grounded in the interdependence of quantum properties of SQ alone. (see for example Kochen-Specker theorem, (Skow 2010). The I-03 is probably the strongest relational QMI stance and it is the only one we focus in this abstract. Macroscopic systems (e.g. tables, trees, cats, people) usually have determinate properties and determinables, although they are always composed of ensembles of quantum systems. But as decoherentists argue, quantum systems should sometimes have determinates such they can combine in macroscopic systems that have determinate properties. Decoherence is an interaction between a quantum system and an environment; it singles out ‘preferred’ states called “pointer basis” and the observables to receive definite values.(Fortin and Lombardi 2014) The literature on decoherence assumes the pervasiveness of entanglement of two systems, for which the total wave function in very rare situations is the sum of both systems being in definite states. For decoherentists, any sufficiently effective interaction induces correlations. Non-quantum systems interact over large distances, so we do not have effectively closed quantum systems, except the universe as a whole. (Zurek and Paz 1999; Crull 2014). The paper argues that I-03 can explain some features of QMI, especially the idea of classicality of some quantum indeterminate and their priority as an interaction with the environment.}, language = {5. Philosophy of physics}, month = apr, year = {2019}, }
@unpublished{AggregatingMultilevelMechanistic2018, title = {Aggregating multilevel mechanistic models from {Big} {Data} with {Machine} {Learning}}, copyright = {All rights reserved}, abstract = {The nature of scientific evidence and its logical and conceptual relations with hypotheses, theories, and models have been among the most enticing topics in the philosophy of science. Philosophers and scientists alike were always toiling to find new methodologies to relate better scientific evidence to theories, models, and hypotheses. Scientific revolutions, as well as everyday progress of science, depend on developing methods to deal with an increase in complexity of the information science processes: climate change, political and economic instability, devastating natural disasters, new challenges coming from emerging technologies, new data from particle accelerators, telescopes, the vast data from various ‘-omics’ disciplines, and ultimately the whole Internet as a vast source of information. The advancement of computational tools and the presence of massive amounts of data, called Big Data, became arguably part of the practice of some scientific disciplines in the three decades. Presumably, philosophers of science should pay more attention to the epistemology of data mining in these new contexts finds its natural place in the already convoluted problem of scientific evidence. We ask this question from an epistemological point of view: what is (a) specific to and (b) novel about how data-driven, and computational-intensive scientific disciplines build their models?}, language = {1. Philosophy of science}, year = {2018}, }
@unpublished{ArtificialPowerArtificial2018, title = {Artificial power and artificial morality: the multiobjective approach}, copyright = {All rights reserved}, language = {4. Applied ethics: machine ethics}, month = dec, year = {2018}, }
@unpublished{StanfordUnconceivedAlternatives2018a, title = {K. {Stanford}’s “unconceived alternatives” from the perspective of scientific unification and coherence}, copyright = {All rights reserved}, language = {1. Philosophy of science}, month = apr, year = {2018}, }
@unpublished{AIRiskFallibilism2018a, title = {{AI}: risk, fallibilism, and trust. {A} {Bayesian} approach}, copyright = {All rights reserved}, language = {2. Philosophy of computation}, month = nov, year = {2018}, }
@unpublished{ConsciousnessMachineLearning2018b, title = {Consciousness and machine learning: fallibilism, self-awareness and rationality. {A} {Bayesian} proposal}, copyright = {All rights reserved}, language = {6. Miscellanea}, month = nov, year = {2018}, }
@unpublished{MultiobjectiveDecisionTheory2018a, title = {Multi-objective decision theory in artificial intelligence and artificial morality}, copyright = {All rights reserved}, language = {4. Applied ethics: machine ethics}, month = nov, year = {2018}, }
@unpublished{RationalityUncertaintyMachine2018a, title = {Rationality and uncertainty in machine learning. {A} {Bayesian} approach}, copyright = {All rights reserved}, abstract = {What do we need to gain trust in the actions and reasons of artificial agents? We propose here several capacities and formulate some requirements that artificial agents should meet in order to be trusted qua rational agents. We link the rationality of artificial agents to three capacities: one is to reason under uncertainty, especially regarding data uncertainty. The second is fallibilism, defined roughly as the capacity to know the limits of one’s knowledge of the world. These two capacities are related to one form or another of skepticism about perception and knowledge with deep philosophical roots that go back to Plato and to the suspension of judgment (epochê). The third refers to the capacity to learn constructively, and it is less related to skepticism: the agent can build new knowledge by recombining, eliminating, or ignoring previous pieces of knowledge. The focus is on Machine Learning techniques for building artificial agents. We use a form of probabilistic decision theory (also known as Bayesian statistics) to show that some Machine Learning (ML) algorithms, more specifically artificial neural networks used in Deep Learning, may display a specific form of rationality: for artificial agents, a working definition of ‘artificial rationality’ is provided. We delineate it from a concept of human rationality inspired by scientific reasoning based on some requirements: reasoning under data uncertainty, fallibilism (being able to represent its own limits), and active/corrective learning (when existing submodels are combined to create new knowledge or existing knowledge is divided into subdomains). The techniques that can instantiate these components of rationality are based on the idea of deleting, comparing, and combining models. We offer an analysis of the dropout method and a short speculative discussion on evolutionary algorithms. It is argued that (i) comparing and (ii) combining populations of representations is a form of rationality. The aim is to shed light on the philosophy of machine learning and its relation to Bayesian decision theory. The epistemology of different techniques is used to illustrate why we do not trust some of the ML results and how this situation can be partially corrected.}, language = {2. Philosophy of computation}, year = {2018}, }
@unpublished{AddingStructuresFuture2018a, title = {Adding structures to the future. {The} case of forecast models}, copyright = {All rights reserved}, language = {1. Philosophy of science}, month = jun, year = {2018}, }
@unpublished{AddingOrderFuture2018a, title = {Adding order to the future. {Maps} and structures in forecast models}, copyright = {All rights reserved}, language = {1. Philosophy of science}, month = sep, year = {2018}, }
@unpublished{PrinciplebasedComputationalbasedVarieties2018a, title = {Principle-based and computational-based varieties of scientific realism}, copyright = {All rights reserved}, abstract = {In this paper we propose a version of scientific realism as an alternative to the mainstream realism, which is in most cases based on empirical adequacy. First, we argue for the role of principles and laws of nature for the realist commitments of theories. Second, we investigate a form of computational realism in which integrability (called here “inference to the best computation”) entails a form of realism. A couple of examples and counterexamples are briefly discussed, as illustrations of these types of scientific realism.}, language = {1. Philosophy of science}, year = {2018}, }
@unpublished{NewWorkAccount2017, title = {New work for an account of principles as meta-laws: to protect, constrain, or enable inter-field relations}, abstract = {The nature of inter‐field relations, defined here as relations among scientific domains, has received far too little attention compared to the widely discussed inter‐theory relations (reduction, emergence, handshaking, etc.) How can we characterize the relations among scientific domains when it comes to inference and reasoning? This paper argues that one answer to this question resides in the way principles belonging to one domain relate, logically, to principles and laws belonging to other domains. Through the process of scientific inquiry: some principles can clash, some principles explain laws of another domain, or constrain them, or are inconsistent with laws or principles in other domains. There is nevertheless another set of cases when principles act as enablers of other laws or facilitate hybrid model building and unification of domains. We explore here the inter‐field explanatory power of principles. These inter‐field relationships among principles and laws are important for the advancement of science and play a similar explanatory role as the widely discussed inter‐theory relations. The present argument adopts a version of M. Lange’s approach to symmetry principles qua meta‐laws. (2007, 2009, 2011) In Lange, symmetry principles ‘guide’ conservation laws in a similar way in which laws determine sub‐nomic facts. The emphasis is here less on the internal links between principles and laws of the same domain, but on how principles of a domain relate ‘externally’ to laws and principles of other domains. This ‘principle externalism’ relies also on a relativized version of Lewis’ best systems approach due to Callender and Cohen, called the better best system. (2009,2011) Different domains trade in different natural kinds, but the emphasis here is on overlapping natural kinds and hence the possibility of “distributing” the top‐down explanations due to principles. Possible inter‐field roles of principles discussed here include: clashes of principles, principles constraining laws of other domains, or, on the contrary, principles enabling the hybridization or the even the unification of laws belonging to different domains. Several examples from contemporary physics are briefly discussed: the case of the correspondence principles, the clash of principles of quantum physics and relativity, and the case of coupling constants in contemporary physics. The epistemic and methodological significance of this inter‐fields for current and future inter‐field knowledge transfer and the emergence of new disciplines is briefly appraised.}, language = {1. Philosophy of science}, year = {2017}, }
@unpublished{PatternComputationalismPatternalism2017a, title = {Pattern computationalism (pattern-alism) in machine ethics: a defense}, copyright = {All rights reserved}, abstract = {Linking our morality to machines (computers, robots, algorithms, etc.) is not an easy task neither for philosophers, nor for computer scientists. The very idea that processes such as moral cognition, moral behavior, moral intuition, or moral choice could be instantiated in non-human agents, autonomous and reliable enough, looks problematic to many philosophers, especially those fond of the gap between reasoning about the “is” (facts) and the “ought” (values, norms, moral code, etc.). Are artificial, and autonomous moral agents possible even in principle? And if they are, in what sense are they fundamentally different than other artificial agents? Is computation the right tool to build moral agents with enough moral expertise and discern? If it is, what kind of computationalism is most likely to be realized by artificial moral agents? The central focus point of this paper is representation within artificial systems that implement computationally moral processes. The paper draws inspiration from recent debates about computation and representation, (G. Piccinini, Rescorla, O. Shagir) and depicts a case in which architectures used in artificial agents suggest a novel type of computation based. This paper argues for a form of computationalism applied to moral artificial agents, called “moral pattern computationalism,” similar in some respects to computational structuralism (advocated by Putnam, Chalmers, Copeland, Scheutz i.a.). The argument discounts the role of semantics plays in computation implemented in artificial moral agents and stresses the role of patterns in moral behavioral data. It engages the existing literature about the computational nature of cognition, representation (or lack thereof) in computationalism, the importance of patterns, and, finally, the role of functional interpretations.}, language = {4. Applied ethics: machine ethics}, month = feb, year = {2017}, }
@unpublished{FictionsMapsStructures2017, address = {submitted to the monist, synthese}, type = {word file available on demand only}, title = {Fictions, maps, and structures in forecast models}, copyright = {All rights reserved}, abstract = {The main aim of this paper is to discuss some epistemic aspects of forecast models, especially in the context of “model fictionalism,” promoted and discussed by P. Godffrey-Smith, M. Suarez, and especially R. Frigg. Forecast models are designed to predict the future states of a system from what we know about its past and present states, and from our knowledge of laws of nature, causation, mechanisms, symmetries, etc. This paper argues that the very process of building forecast models implies ultimately the insertion into the model of several types of fictional entities. We discuss here especially fictional structures, as opposed to fictional objects, and on their role in this type of models. The main epistemological question is whether we can produce knowledge about the future by employing these fictional structures. The paper has also a metaphysical component, as it engages the ontological status of future objects, the distribution of properties over populations of models, and the ontology of fictions and their representational role. The epistemology of forecast models, as a second component envisaged here, is also peculiar, in respect of epistemic access we have to the target space. Models in general represent a target system, which is real. Future objects have a different status: they are real, but not known and not accessible by direct observation (unlike the target system of most our scientific models) Are forecast models completely target-less? What means to have a representational target (partially or totally) in the future? In relation to the variability (or lack thereof) of laws of nature, the paper argues that fictions in forecast models are non-mimetic, but nevertheless nomic possibilities. The paper emphasizes the difference between representing future states of a system and representing its past states. Mapping the future states of a system depends on assumptions which the modeler needs to make explicit. The paper does not discount therefore the difference in epistemic access between past, present, and future when it comes to models. The conclusion of this paper is that forecast fictional structures, with properties distributed over populations of models, enable the modeler to represent a target system in the future. Fictional structures serve a better role in forecast models than hypotheses, which are typically expressed in first order language.}, month = feb, year = {2017}, }
@unpublished{UnconceivedAlternativesExpected2017a, title = {“{Unconceived} alternatives” or “expected unifications”? {An} eliminative argument against {K}. {Stanford}’s “{New} {Induction}”}, copyright = {All rights reserved}, abstract = {This paper offers an eliminative inference based on a “unification ideal” that can weaken the antirealist “New Induction” advanced by K. Stanford. (2006, 2009) The minimal model of unification is couched in terms of identification of two theoretical terms (c1 and c2), one belonging to a theory T1 and the other to another theory T2. First, four scenarios of inconceivability are proposed. The formal approach proposed here is based on a “syntactic view” of scientific theories. The upshot of this mechanism is that some alternatives to T1, which still remain unconceived (relative to the conceptual and ideological space of T1), can be eliminated because they are inconsistent with alternatives to another theory T¬2. Consistency is here a requirement imposed on set of theories. An alternative view, based on an idea of “state spaces” and “conceptual spaces” is quickly discussed. This argument shows in what sense Stanford’s antirealist inductive argument is weakened when scientists or communities of scientists operate based on some theoretical ideals such as unification, parsimony, simplicity, etc.}, language = {1. Philosophy of science}, month = jan, year = {2017}, }
@unpublished{UnificationDecouplingIdentification2017a, title = {Unification, decoupling, and identification in the “{Palatini} formalism”}, copyright = {All rights reserved}, abstract = {This paper focuses on a couple of philosophical and historical aspects of the “Palatini formalism,” one of the “metric-affine” approaches to general relativity. It is argued here that the history and the conceptual developments of this formalism (mainly in the first two decades after the wake of general relativity), as well as its more recent incarnations, illustrate the interplay of several concepts in the philosophy of science. There are three aims of the present argument. First, the unificatory power of formalism is under scrutiny here: the decoupling of two mathematical structures by taking them as independent variables, followed by a partial structural identification. Second, it is the way we can read off enticing philosophical aspects of General Relativity from its formalism: the relation between the operations of decoupling and identifications (of mathematical structures) and explanation (mathematical explanation, structural explanation, and last but not least, functional explanation) is germane here. Third, this approach complements and augments current discussions on the role and value of variational principles in physics, especially in general relativity. Overall this paper attempts to show how mathematical constructs and assumptions have a role in the ontology of general relativity and some of its “extensions.”}, language = {5. Philosophy of physics}, month = feb, year = {2017}, }
@unpublished{SmallBigDiscovering2017a, title = {The small from the {Big}: discovering models and mechanisms with machine learning}, copyright = {All rights reserved}, abstract = {This paper proposes a new discussion on Big Data” as the primary source of model-building in science, when the computational architecture used is machine learning or evolutionary algorithms (and possibly a combination of them). Is Big Data mining a proper method of discovering and building models? The focus is on discovery and building (rather than justification or confirmation) of existing theories or models; and on relevant epistemic and pragmatic aspects of these computational architectures (rather than on the quantitative features of Big Data). Two competing (or, more mildly put, complementing) types of models used in biology and cognitive science are scrutinized: mechanism modeling and computational modeling (mostly network modeling and dynamical modeling). This paper aims to show that far from becoming irrelevant in the Big Data era, network models and mechanisms can be discovered and built from Big Data. The argument is based on the concept of patterns in data, discussed in the context of the relation between data and models (Bogen, Woodward, McAllister). It relates it then to the concept of “small patterns” in Big Data (Floridi) as hidden aspects of mechanistic models, hard to fathom by scientists. Machine learning, as a tool to categorize and characterize real patterns is assessed in the context of mechanistic and network models. Evolutionary computation is assessed as a method to optimize the search for mechanisms and complex networks. To compare and contrast the mechanistic account and its alternatives, this argument builds on two concepts central to all approaches: modularity (as related to decomposability), and organization (Bechtel, Darden, Craver), which both come in degrees and can be discovered through machine learning or evolutionary computation in Big Data (cf. E. Ratti and W. Pietsch). The paper concludes with the claim that Big Data, when it qualifies as scientific evidence, most likely has and will have a fundamental impact on the way we discover and build computational models in science.}, month = feb, year = {2017}, }
@unpublished{RealismModuliSpace2017a, title = {Realism in moduli space: constants, coupling, and hierarchies of theories}, copyright = {All rights reserved}, month = nov, year = {2017}, }
@unpublished{BottomupBetterBest2017a, title = {A bottom-up ‘{Better} {Best} {System} ’ of lawhood: compressibility , patterns, and mining {Big} {Data}}, copyright = {All rights reserved}, abstract = {This paper starts with an enduring empiricist concern about laws of nature : how do we relate laws of nature to empirical data? Although this issue has been raised in Early modern philosophy (Hume, Reid) this paper looks at this perennial issue from the perspective of the way we collect and process data in the 21st century, when many disciplines (astronomy, genetics, neuroscience, climate science, particle physics, social sciences, etc.) have shifted towards data-oriented and computational-intensive scientific practice. Is there a future for laws of nature in the data-oriented science? If so, which account of lawhood fares better in this sense? To address this question, this paper pursues two goals. First, it discusses the lawhood of powerful generalizations in scientific disciplines dominated by Big Data, computational power, and numerical methods. Contrary to a common hype, it is argued here that laws of nature will have a role to play in the advancement of science after the so-called “fourth-paradigm” stage of science. Relying on data alone (be it Big Data, or small data) is not science: for epistemological reasons, it is argued here, we need laws of nature (and as others may argue: theories, models, causation, mechanisms) to advance in the 21st century: nevertheless, some of these concepts need to be refitted to match the challenges emerging after the 21st century “data turn.” The second part of the argument asks: which account of lawhood is suitable for the stage of data-driven, computational-intensive science? What means to be a law of nature when Big Data counts as your scientific evidence and computational tools (including numerical simulations, machine learning, discovery algorithms, data mining) are part and parcel of the scientific method? Most of the philosophical accounts of lawhood: deflationary views, governing views, and various ‘systems’ views: Mill-Ramsey-Lewis or Cohen’s and C. Callender’s ‘better best system’ (BBS) need to be amended to accommodate this type of empiricism about laws of nature. The version of the BBS proposed here can account for scenarios in which laws of nature are distilled from data through various computational techniques. This paper emphasizes that Big Data is not restricted to one discipline or to one representation and that the set of natural kinds that Big Data comes with transgresses our own aims and ways of dividing science. In the ‘bottom-up’ BBS discussed here, a prospective ‘correspondence’ between Big Data and the laws of nature is proffered: data is associated with the Humean mosaic, and hence laws are supervening on data, not directly on occurrent facts; patterns in data are related to generalizations and ‘natural’ patterns to laws of nature; natural kinds are not restricted to one scientific discipline, but emerge from data. Laws of nature are then defined by structures in large datasets and computational methods used to mining Big Data and distill laws from mere generalizations. We focus in the concept of ‘natural pattern’ and relate it to the existing philosophical discussions: ‘small’ (Floridi 2012, 2011), ‘real,’ or ‘autonomous’ patterns. (Bogen 2010; Dennett 1991; Brading 2010) A couple of examples of bottom up lawhood inspired from recent literature are quickly discussed.}, month = oct, year = {2017}, }
@unpublished{DualApproachArtificial2017, title = {A dual approach to artificial intelligence and artificial morality. {Evolution} and learning}, copyright = {All rights reserved}, month = dec, year = {2017}, }
@unpublished{StructureThSider2016a, title = {The structure in {Th}. {Sider}’s ontological realism: multiplicity and scale relativity}, copyright = {All rights reserved}, abstract = {The concept of structure is widely used both in analytic metaphysics (Sider, Lewis) and in current philosophy of science (see especially structural realism). I attempt here to compare and contrast them by arguing that Th. Sider’s ontological realism (2009, 2011) assumption of the unitary structure is not warranted when analyzed from the perspective of scientific metaphysics. I adopt here a version of scientific realism, the rainforest realism, inspired from ontic structural realism, see Ladyman et al (2007). I argue that the quantifier invariantism and consequently the concept of structure is relative to scale. I argue for a scale relative ontological realism that is different from Sider’s, but follows closely its lines. This entails a form of ontological pluralism that Sider refuted.}, language = {7. Metaphysics}, month = jan, year = {2016}, }
@unpublished{StrongWeakDualitiesScientific2016a, title = {Strong-{Weak} {Dualities} and {Scientific} {Realism}: {History} and {Present} {Cases}}, copyright = {All rights reserved}, language = {5. Philosophy of physics}, month = feb, year = {2016}, }
@unpublished{RepresentationPredictionFictions2016a, title = {Representation, prediction, and fictions in forecast models}, copyright = {All rights reserved}, month = dec, year = {2016}, }
@unpublished{DiscoveringLearningImproving2016a, address = {Handbook to digital humanities}, title = {Discovering, learning, and improving ethics in the digital universe}, copyright = {All rights reserved}, month = sep, year = {2016}, }
@unpublished{NewDigitalEpistemology2016a, title = {A new digital epistemology? {The} case of evolutionary computation used in discovery}, copyright = {All rights reserved}, month = feb, year = {2016}, }
@unpublished{DigitalGuesswork2016a, address = {submitted to humana mente and accepted with suggestions}, title = {Digital guesswork}, copyright = {All rights reserved}, month = mar, year = {2016}, }
@unpublished{DiscoveringSmallModels2016a, title = {Discovering small models from {Big} {Data}: machine learning and evolutionary algorithms}, copyright = {All rights reserved}, month = jul, year = {2016}, }
@unpublished{VariationalPrinciplesClassical2016a, title = {The variational principles of classical mechanics: modalities, dispositions or mathematical fictions?}, copyright = {All rights reserved}, month = jan, year = {2016}, }
@unpublished{AdSCFTUnification2015a, title = {{AdS}/{CFT} as unification}, copyright = {All rights reserved}, language = {5. Philosophy of physics}, month = may, year = {2015}, }
@unpublished{MoralFunctionalismDispositions2015a, title = {Moral functionalism and dispositions in normative agency. {The} artificial case}, copyright = {All rights reserved}, month = sep, year = {2015}, }
@unpublished{VirtuesArtificialAgents2015a, address = {ETIN submission}, title = {Virtues of artificial agents: an agent-centric, case-based model based on 'soft computation'}, copyright = {All rights reserved}, month = jun, year = {2015}, }
@unpublished{PluralityModelsOptimization2014a, title = {Plurality of models, optimization and mechanisms in climate studies. {The} role of feedback in the new {IPCC} report}, copyright = {All rights reserved}, abstract = {In this paper I relate the feedback in climate models to the search for a class of optimal models (called here “optimality”). The final aim of this paper is to appraise the prospects of optimizing climate models, eliminate unconceived models, reduce their uncertainty and, ultimately, produce a more explanatory and a more realistic mechanism of the Earth’s climate. When compared with the multiplicity of numerical models, a mechanism is backed by the virtue of explanation and hence higher chance to be more realistic. I discuss these topics in the light of the most recent result of the Intergovernmental Panel on Climate Change (IPCC).}, month = oct, year = {2014}, }