<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F1152176%2Fitems%3Fkey%3Dbi2Q7duoPuqjf6lgym4TgM83%26format%3Dbibtex%26limit%3D100&group0=year&css=www.axiom.humanities.uva.nl/axiom.css&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F1152176%2Fitems%3Fkey%3Dbi2Q7duoPuqjf6lgym4TgM83%26format%3Dbibtex%26limit%3D100&group0=year&css=www.axiom.humanities.uva.nl/axiom.css");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F1152176%2Fitems%3Fkey%3Dbi2Q7duoPuqjf6lgym4TgM83%26format%3Dbibtex%26limit%3D100&group0=year&css=www.axiom.humanities.uva.nl/axiom.css"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@book{juravsky_speech_2017, edition = {3rd}, title = {Speech and {Language} {Processing} - {An} {Introduction} to {Natural} {Language} {Processing}, {Computational} {Linguistics}, and {Speech} {Recognition}}, url = {https://web.stanford.edu/~jurafsky/slp3/ed3book.pdf}, author = {Juravsky, Daniel and {James H. Martin}}, year = {2017}, }
@article{herbelot_high-risk_2017, title = {High-risk learning: acquiring new word vectors from tiny data}, url = {http://arxiv.org/abs/1707.06556}, journal = {Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing}, author = {Herbelot, Aurélie and Baroni, Marco}, year = {2017}, pages = {304--309}, }
@article{caliskan_semantics_2017, title = {Semantics derived automatically from language corpora contain human-like biases}, volume = {356}, url = {http://science.sciencemag.org/content/356/6334/183.abstract}, doi = {10.1126/science.aal4230}, abstract = {AlphaGo has demonstrated that a machine can learn how to do things that people spend many years of concentrated study learning, and it can rapidly learn how to do them better than any human can. Caliskan et al. now show that machines can learn word associations from written texts and that these associations mirror those learned by humans, as measured by the Implicit Association Test (IAT) (see the Perspective by Greenwald). Why does this matter? Because the IAT has predictive value in uncovering the association between concepts, such as pleasantness and flowers or unpleasantness and insects. It can also tease out attitudes and beliefs—for example, associations between female names and family or male names and career. Such biases may not be expressed explicitly, yet they can prove influential in behavior.Science, this issue p. 183; see also p. 133Machine learning is a means to derive artificial intelligence by discovering patterns in existing data. Here, we show that applying machine learning to ordinary human language results in human-like semantic biases. We replicated a spectrum of known biases, as measured by the Implicit Association Test, using a widely used, purely statistical machine-learning model trained on a standard corpus of text from the World Wide Web. Our results indicate that text corpora contain recoverable and accurate imprints of our historic biases, whether morally neutral as toward insects or flowers, problematic as toward race or gender, or even simply veridical, reflecting the status quo distribution of gender with respect to careers or first names. Our methods hold promise for identifying and addressing sources of bias in culture, including technology.}, number = {6334}, journal = {Science}, author = {Caliskan, Aylin and Bryson, Joanna J. and Narayanan, Arvind}, month = apr, year = {2017}, pages = {183}, }
@incollection{jackman_meaning_2017, edition = {Spring 2017}, title = {Meaning {Holism}}, url = {https://plato.stanford.edu/archives/spr2017/entries/meaning-holism/}, abstract = {The term “meaning holism” is generally applied to viewsthat treat the meanings of all of the words in a language asinterdependent. Holism draws much of its appeal from the way in whichthe usage of all our words seems interconnected, and runs into manyproblems because the resultant view can seem to conflict with (amongother things) the intuition that meanings are by and large shared andstable., This entry will examine the strengths of the arguments for andagainst meaning holism.}, urldate = {2017-04-01}, booktitle = {The {Stanford} {Encyclopedia} of {Philosophy}}, publisher = {Metaphysics Research Lab, Stanford University}, author = {Jackman, Henry}, editor = {Zalta, Edward N.}, year = {2017}, keywords = {Quine, Willard van Orman, analytic/synthetic distinction, compositionality, meaning, theories of, meaning: normativity of, mental content: narrow}, }
@incollection{speaks_theories_2017, edition = {Spring 2017}, title = {Theories of {Meaning}}, url = {https://plato.stanford.edu/archives/spr2017/entries/meaning/}, abstract = {The term “theory of meaning” has figured, in one way oranother, in a great number of philosophical disputes over the lastcentury. Unfortunately, this term has also been used to mean a greatnumber of different things. , Here I focus on two sorts of “theory of meaning.” Thefirst sort of theory—a semantic theory—is a theory whichassigns semantic contents to expressions of a language. Approaches tosemantics may be divided according to whether they assign propositionsas the meanings of sentences and, if they do, what view they take ofthe nature of these propositions., The second sort of theory—a foundational theory ofmeaning—is a theory which states the facts in virtue of whichexpressions have the semantic contents that they have. Approaches tothe foundational theory of meaning may be divided into theories whichdo, and theories which do not, explain the meanings of expressions ofa language used by a group in terms of the contents of the mentalstates of members of that group.}, urldate = {2017-04-01}, booktitle = {The {Stanford} {Encyclopedia} of {Philosophy}}, publisher = {Metaphysics Research Lab, Stanford University}, author = {Speaks, Jeff}, editor = {Zalta, Edward N.}, year = {2017}, keywords = {Frege, Gottlob, Grice, Paul, Tarski, Alfred: truth definitions, action, compositionality, convention, descriptions, indexicals, mind: computational theory of, names, natural kinds, personal identity, pragmatics, propositional attitude reports, propositions: singular, propositions: structured, rigid designators, semantics: two-dimensional, situations: in natural language semantics}, }
@incollection{reimer_reference_2017, edition = {Spring 2017}, title = {Reference}, url = {https://plato.stanford.edu/archives/spr2017/entries/reference/}, abstract = {Reference is a relation that obtains between certain sorts of representational tokens (e.g., names, mental states, pictures)and objects. For instance, when I assert that “George W. Bush is a Republican,” I use a particular sort of representational token — i.e. the name‘George W. Bush’— to refer to a particular individual — namely, George W. Bush. While names and other referentialterms are hardly the only type of representational token capable ofreferring, linguistic tokens like these have long stood at the center of philosophical inquiries into the nature of reference. Accordingly,this entry will focus almost entirely on linguistic reference. For more on the reference of mental states, see the entries oncausal theories of mental content, externalism about mental content, andteleological theories of mental content. For more on the reference of pictures,see the entry onGoodman's aesthetics., Proper names are standardly considered a paradigm example of linguisticreference — or, more specifically, a relation that obtains between certain sorts of linguistic expressions and what speakers use those expressions to talk about. Other expressions which are generally consideredto be of the referring sort include indexicals like ‘I’, ‘here’, ‘now’, and ‘that’. While it is highlyquestionable that all words refer, most philosophers of language assumethat at least certain sorts of terms (e.g. proper names and indexicals) regularlyand reliably do so. It is these sorts of terms that will serve as our primaryfocus below. Assuming that at least certain sorts of terms do in fact refer, the central question regarding linguistic reference becomes: how do such terms refer? What, in other words, is the ‘mechanism’ of reference? Subsidiary questions concern the relation between reference and meaning, reference and truth, and referenceand knowledge. Some philosophers have thought that the nature of reference is able to shed light on important metaphysical or epistemological issues. Other philosophers, however, are less sanguine. Indeed, certain philosophers have gone so far asto deny that reference is a substantive relation, one deserving of serious philosophical scrutiny.}, urldate = {2017-03-30}, booktitle = {The {Stanford} {Encyclopedia} of {Philosophy}}, publisher = {Metaphysics Research Lab, Stanford University}, author = {Reimer, Marga and Michaelson, Eliot}, editor = {Zalta, Edward N.}, year = {2017}, keywords = {Goodman, Nelson: aesthetics, descriptions, indexicals, mental content: causal theories of, mental content: externalism about, mental content: teleological theories of, natural kinds, nonexistent objects, rigid designators, skepticism: and content externalism}, }
@article{Boleda:2016:FDS:3068346.3068348, title = {Formal {Distributional} {Semantics}: {Introduction} to the {Special} {Issue}}, volume = {42}, issn = {0891-2017}, url = {https://doi.org/10.1162/COLI_a_00261}, doi = {10.1162/COLI<sub>a</sub><sub>0</sub>0261}, number = {4}, journal = {Comput. Linguist.}, author = {Boleda, Gemma and Herbelot, Aurélie}, month = dec, year = {2016}, note = {tex.acmid= 3068348 tex.issue\_date= December 2016 tex.numpages= 17}, pages = {619--635}, }
@techreport{svoboda_distributional_2016, address = {Brno}, title = {Distributional semantics using neural networks}, url = {https://dspace5.zcu.cz/bitstream/11025/25377/1/Svoboda.pdf}, abstract = {During recent years, neural networks show crucial improvement in catching semantics of words or sentences. They also show improves in Language modeling, which is crucial for many tasks among Natural Language Processing (NLP). One of the most used architectures of Artificial Neural Networks (ANN) in NLP are Recurrent Neural Networks (RNN) that do not use limited size of context. By using recurrent connections, information can cycle in side these networks for arbitrarily long time. Thesis summarizes the state-of-the-art approaches to distributional semantics. Thesis also focus on further use of ANN among NLP problems}, number = {DCSE/TR-2016-0}, urldate = {2017-04-02}, institution = {University of West Bohemia in Pilsen}, author = {Svoboda, Lukas}, month = jun, year = {2016}, pages = {47}, }
@incollection{gasparri_word_2016, edition = {Spring 2016}, title = {Word {Meaning}}, url = {https://plato.stanford.edu/archives/spr2016/entries/word-meaning/}, abstract = {Word meaning has played a somewhat marginal role in earlycontemporary philosophy of language, which was primarily concernedwith the structural features of sentences and showed lessinterest in the format of lexical representations and in the nature ofthe word-level input to compositional processes. Nowadays, it iswell-established that the way we account for word meaning is bound tohave a major impact in tipping the balance in favor or against a givenpicture of the fundamental properties of human language. This entryprovides an overview of the way issues related to lexical meaning havebeen explored in analytic philosophy and a summary of relevantresearch on the subject in neighboring scientific domains. Though themain focus will be on philosophical problems, contributions fromlinguistics, psychology, neuroscience and artificial intelligence willalso be considered, since research on word meaning is highlyinterdisciplinary.}, urldate = {2017-04-01}, booktitle = {The {Stanford} {Encyclopedia} of {Philosophy}}, publisher = {Metaphysics Research Lab, Stanford University}, author = {Gasparri, Luca and Marconi, Diego}, editor = {Zalta, Edward N.}, year = {2016}, keywords = {ambiguity, analytic/synthetic distinction, assertion, belief, cognition: embodied, compositionality, convention, descriptions, implicature, indexicals, logical form, meaning, theories of, meaning: normativity of, mental content: externalism about, mental content: narrow, names, natural kinds, pragmatics, presupposition, propositional attitude reports, propositions, quantifiers and quantification, reference, rigid designators, semantics: two-dimensional, speech acts, vagueness}, }
@incollection{scholz_philosophy_2016, edition = {Winter 2016}, title = {Philosophy of {Linguistics}}, url = {https://plato.stanford.edu/archives/win2016/entries/linguistics/}, abstract = {Philosophy of linguistics is the philosophy of science as applied to linguistics. This differentiates it sharply from the philosophy of language, traditionally concerned with matters of meaning and reference., As with the philosophy of other special sciences, there are general topics relating to matters like methodology and explanation (e.g., the status of statistical explanations in psychology and sociology, or the physics-chemistry relation in philosophy of chemistry), and more specific philosophical issues that come up in the special science at issue (simultaneity for philosophy of physics; individuation of species and ecosystems for the philosophy of biology). General topics of the first type in the philosophy of linguistics include:, Specific topics include issues in language learnability, language change, the competence-performance distinction, and the expressive power of linguistic theories., There are also topics that fall on the borderline between philosophy of language and philosophy of linguistics: of “linguistic relativity” (see the supplement on the linguistic relativity hypothesis in the Summer 2015 archived version of the entry on relativism), language vs. idiolect, speech acts (including the distinction between locutionary, illocutionary, and perlocutionary acts), the language of thought, implicature, and the semantics of mental states (see the entries on analysis, semantic compositionality, mental representation, pragmatics, and defaults in semantics and pragmatics). In these cases it is often the kind of answer given and not the inherent nature of the topic itself that determines the classification. Topics that we consider to be more in the philosophy of language than the philosophy of linguistics include intensional contexts, direct reference, and empty names (see the entries on propositional attitude reports, intensional logic, rigid designators, reference, and descriptions). , This entry does not aim to provide a general introduction to linguistics for philosophers; readers seeking that should consult a suitable textbook such as Akmajian et al. (2010) or Napoli (1996). For a general history of Western linguistic thought, including recenttheoretical linguistics, see Seuren (1998). Newmeyer (1986) is usefuladditional reading for post-1950 American linguistics. Tomalin (2006) traces the philosophical, scientific, and linguistic antecedents of Chomsky's magnum opus (1955/1956; published1975), and Scholz and Pullum (2007) provide a critical review.}, urldate = {2017-04-01}, booktitle = {The {Stanford} {Encyclopedia} of {Philosophy}}, publisher = {Metaphysics Research Lab, Stanford University}, author = {Scholz, Barbara C. and Pelletier, Francis Jeffry and Pullum, Geoffrey K.}, editor = {Zalta, Edward N.}, year = {2016}, keywords = {analysis, assertion, compositionality, defaults in semantics and pragmatics, descriptions, empiricism: logical, idiolects, innate/acquired distinction, innateness: and language, language of thought hypothesis, linguistics: computational, logic: intensional, mental representation, pragmatics, propositional attitude reports, reference, relativism, rigid designators}, }
@article{bolukbasi_man_2016, title = {Man is to {Computer} {Programmer} as {Woman} is to {Homemaker}? {Debiasing} {Word} {Embeddings}}, shorttitle = {Man is to {Computer} {Programmer} as {Woman} is to {Homemaker}?}, url = {http://arxiv.org/abs/1607.06520}, abstract = {The blind application of machine learning runs the risk of amplifying biases present in data. Such a danger is facing us with word embedding, a popular framework to represent text data as vectors which has been used in many machine learning and natural language processing tasks. We show that even word embeddings trained on Google News articles exhibit female/male gender stereotypes to a disturbing extent. This raises concerns because their widespread use, as we describe, often tends to amplify these biases. Geometrically, gender bias is first shown to be captured by a direction in the word embedding. Second, gender neutral words are shown to be linearly separable from gender definition words in the word embedding. Using these properties, we provide a methodology for modifying an embedding to remove gender stereotypes, such as the association between between the words receptionist and female, while maintaining desired associations such as between the words queen and female. We define metrics to quantify both direct and indirect gender biases in embeddings, and develop algorithms to "debias" the embedding. Using crowd-worker evaluation as well as standard benchmarks, we empirically demonstrate that our algorithms significantly reduce gender bias in embeddings while preserving the its useful properties such as the ability to cluster related concepts and to solve analogy tasks. The resulting embeddings can be used in applications without amplifying gender bias.}, urldate = {2017-03-31}, journal = {arXiv:1607.06520 [cs, stat]}, author = {Bolukbasi, Tolga and Chang, Kai-Wei and Zou, James and Saligrama, Venkatesh and Kalai, Adam}, month = jul, year = {2016}, note = {arXiv: 1607.06520}, keywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Learning, Statistics - Machine Learning}, }
@incollection{schroeder-heister_proof-theoretic_2016, edition = {Winter 2016}, title = {Proof-{Theoretic} {Semantics}}, url = {https://plato.stanford.edu/archives/win2016/entries/proof-theoretic-semantics/}, abstract = {Proof-theoretic semantics is an alternative to truth-conditionsemantics. It is based on the fundamental assumption that the centralnotion in terms of which meanings are assigned to certain expressionsof our language, in particular to logical constants, is that ofproof rather than truth. In this senseproof-theoretic semantics is semantics in terms of proof .Proof-theoretic semantics also means the semantics of proofs,i.e., the semantics of entities which describe how we arrive at certainassertions given certain assumptions. Both aspects of proof-theoreticsemantics can be intertwined, i.e. the semantics of proofs is itselfoften given in terms of proofs., Proof-theoretic semantics has several roots, the most specific onebeing Gentzen's remarks that the introduction rules in hiscalculus of natural deduction define the meanings of logical constants,while the elimination rules can be obtained as a consequence of thisdefinition (see section 2.2.1). Morebroadly, it belongs to what Prawitz called general prooftheory (see section 1.1). Even morebroadly, it is part of the tradition according to which the meaning ofa term should be explained by reference to the way it is usedin our language., Within philosophy, proof-theoretic semantics has mostly figuredunder the heading “theory of meaning”. This terminologyfollows Dummett, who claimed that the theory of meaning is the basis oftheoretical philosophy, a view which he attributed to Frege. The term“proof-theoretic semantics” was proposed bySchroeder-Heister (1991; used already in 1987 lectures in Stockholm) in order not to leave the term“semantics” to denotationalism alone—after all,“semantics” is the standard term for investigations dealingwith the meaning of linguistic expressions. Furthermore, unlike“theory of meaning”, the term “proof-theoreticsemantics” covers philosophical and technical aspects likewise.In 1999, the first conference with this title took place inTübingen.}, urldate = {2017-03-30}, booktitle = {The {Stanford} {Encyclopedia} of {Philosophy}}, publisher = {Metaphysics Research Lab, Stanford University}, author = {Schroeder-Heister, Peter}, editor = {Zalta, Edward N.}, year = {2016}, keywords = {Curry's paradox, Hilbert, David: program in the foundations of mathematics, Russell's paradox, category theory, connectives: sentence connectives in formal logic, logic, history of: intuitionistic logic, logic: classical, logic: intuitionistic, logic: linear, logic: substructural, logical constants, mathematics, philosophy of: intuitionism, paradoxes: and contemporary logic, proof theory: development of, realism: challenges to metaphysical, self-reference, truth: revision theory of, type theory}, }
@article{boleda_show_2016, title = {"{Show} me the cup": {Reference} with {Continuous} {Representations}}, shorttitle = {"{Show} me the cup"}, url = {http://arxiv.org/abs/1606.08777}, abstract = {One of the most basic functions of language is to refer to objects in a shared scene. Modeling reference with continuous representations is challenging because it requires individuation, i.e., tracking and distinguishing an arbitrary number of referents. We introduce a neural network model that, given a definite description and a set of objects represented by natural images, points to the intended object if the expression has a unique referent, or indicates a failure, if it does not. The model, directly trained on reference acts, is competitive with a pipeline manually engineered to perform the same task, both when referents are purely visual, and when they are characterized by a combination of visual and linguistic properties.}, urldate = {2017-03-30}, journal = {arXiv:1606.08777 [cs]}, author = {Boleda, Gemma and Padó, Sebastian and Baroni, Marco}, month = jun, year = {2016}, note = {arXiv: 1606.08777}, keywords = {Computer Science - Artificial Intelligence, Computer Science - Computation and Language, Computer Science - Learning}, }
@inproceedings{herbelot2015mr, title = {Mr {Darcy} and {Mr} {Toad}, gentlemen: distributional names and their kinds}, booktitle = {Proceedings of the 11th {International} {Conference} on {Computational} {Semantics}}, author = {Herbelot, Aurélie}, year = {2015}, pages = {151--161}, }
@inproceedings{DBLP:conf/emnlp/GuptaBBP15, title = {Distributional vectors encode referential attributes}, url = {http://aclweb.org/anthology/D/D15/D15-1002.pdf}, booktitle = {Proceedings of the 2015 {Conference} on {Empirical} {Methods} in {Natural} {Language} {Processing}, {EMNLP} 2015, {Lisbon}, {Portugal}, {September} 17-21, 2015}, author = {Gupta, Abhijeet and Boleda, Gemma and Baroni, Marco and Padó, Sebastian}, year = {2015}, note = {tex.bibsource= dblp computer science bibliography, http://dblp.org tex.biburl= http://dblp.uni-trier.de/rec/bib/conf/emnlp/GuptaBBP15 tex.crossref= DBLP:conf/emnlp/2015}, pages = {12--21}, }
@article{peregrin_review_2015, title = {Review of \textit{{Inferentialism}: {Why} {Rules} {Matter}}}, issn = {1538-1617}, shorttitle = {Review of \textit{{Inferentialism}}}, url = {http://ndpr.nd.edu/news/56764-inferentialism-why-rules-matter/}, abstract = {'Regel' means rule, but it didn't have to mean that, or anything at all. According to inferentialism, 'Regel' and other words of natural languages have meanings because they have inferential roles; they have the specific meanings they do because they have the specific inferential roles they do. Jaroslav Peregrin articulates and defends a version of that idea, stressing (in Part I) the importance of rules, and highlighting (in Part II) how the idea applies to logical vocabulary and laws of logic. Inferentialism is a clear and helpful presentation of its namesake theory. It is a welcome complement to Robert Brandom's Articulating Reasons, an abbreviated form of his Making It Explicit, which are currently the most prominent and influential statements of inferentialism. Both of Brandom's books cover a lot of philosophical terrain (language, mind, knowledge, action, and logic). Peregrin's book, by contrast, covers less terrain, primarily language and logic. That allows him to explore topics that Brandom does not explore in much detail in either of his books, especially aspects of inferentialism about logic. While Peregrin and Brandom agree about many important points, Inferentialism offers readers something new. Let me first briefly summarize each chapter, and then I will indicate a few ways in which the book could have addressed some important topics more thoroughly. In Chapter One, Peregrin argues for normative inferentialism over causal inferentialism. Causal inferentialism holds that the meaning of a word (e.g., 'Regel') is constituted by the inferences that people make or are disposed to make involving that word. Normative inferentialism, by contrast, holds that the meaning of a word is constituted by rules or norms governing inferences that speakers might make involving that word. Unlike its causal sibling, the explanans is good or proper inference (hence norms of inference), not just inference of any sort. In Chapter Two, Peregrin contends that normative inferentialism must broaden the notion of a rule of inference in two ways. First, it must admit some "material" rules of inference, ones that hold not in virtue of their so-called logical form but in virtue of the non-logical terms they involve, such as a rule licensing the inference from 'This is a mammal' to 'This is an animal.' Without such rules, no account of the meaning of non-logical terms -- 'mammal', 'animal', 'quark' and 'justice' -- could be given, since there would be no way to distinguish between such terms. Second, inferentialism must also admit rules for transitions from observable circumstances to sentences. Otherwise, it cannot adequately account for the meanings of empirical terms, such as 'mammal,' and 'animal,' terms which are distinguished partly by being properly used in response to relevant, observable circumstances. Following Brandom, Peregrin calls the resulting idea "Strong Inferentialism." In Chapter Three, Peregrin gets formally precise about inferential roles. A sentence's inferential potential is the set of sentences that it may be validly inferred from, and the set of sentences that may be validly inferred from it. What conclusions I may draw from a given sentence depend also on collateral premises available to me. I may not infer 'This is a square' from 'This has four right angles' alone, but I may do so with the addition of 'This has four equal sides'. A sentence's inferential significance is a sentence's inferential potential in a given context. A word's meaning is its contribution to the inferential potentials of sentences in which it occurs. This implies, according to Peregrin, that compositionality -- the idea that the meaning of a sentence depends in some way on the meanings of its constituent parts -- lies at the heart of inferentialism. In his words, "the inferential potential of every complex sentence can be seen as the sum of the contributions of its parts" (52). In Chapter Four, Peregrin aims to explain what it is for there to be rules governing inference. He says that for there to be a rule -- e.g., A implies B -- there must be normative attitudes, especially corrective attitudes towards those who violate the rule by, say, asserting A and denying B. Normative attitudes of this sort include explicit expressions of rules. Since he thinks that meaning-claims (e.g., '"Regel" means rule') express rules, he thinks they have normative force. In this respect, he thinks that meaning is normative. He explains that meaning-claims are nevertheless not rules for action -- rules for what to say when -- but only rules for criticism -- rules for which acts may be criticized or accepted. Just as the rules of chess do not tell you which move to make at any given point in a game, but only frame the space of possible moves, so too rules of inference do not tell you which inferences to make in a conversation, but only frame the space of possible inferences. In Chapter Five, Peregrin develops this idea further, carefully discussing the limits of the worn analogy between language and games, such as chess. He provides a very instructive table arraying important similarities. But there are many important differences. Unlike chess, some rules of language must, on pain of regress, remain unarticulated. The rules of language are also more complex, intricate, and disordered than those of chess. The rules of chess place only minimal constraints on how the game is implemented or embodied, but language imposes important "regulations" on the equipment involved: sign-designs, those who use them, and the many other things with which they interact and about which they speak. Perhaps most importantly, in chess, there is no counterpart to the world, which seems essential to natural language as we know it. Peregrin suggests language is more like a version of chess in which worldly conditions are given a role, such as making it impermissible to move a pawn when it's raining (34).[1] In Chapter Six, Peregrin considers how rules of any sort might have evolved. The main challenge, he thinks, is to explain why organisms would go from acting with regularity to following rules. Why would rules and rule-following have been evolutionarily advantageous? He conjectures that "rule-based cooperation" could have helped reduce "free riding," i.e., benefitting from other creatures' performance of a behavior but not performing that behavior oneself. Like Sellars, Brandom, and many others sympathetic with inferentialism, Peregrin holds that the capacity to follow rules distinguishes humans from other creatures, creating a "virtual space of meaningfulness" that didn't previously exist. Part II and Peregrin's turn to inferentialism about logic begins in Chapter Seven. This part uses more logical notation than Part I, but Peregrin remains clear throughout it, and I think it should be accessible to anyone who is comfortable with some basic philosophy of logic. Peregrin opens with a problem for inferentialism about logic. Gödel and Tarski have shown that there is more to consequence than inference, so inference cannot be used to explain consequence, which is precisely what inferentialism about logic aspires to do. The core of the problem is that if inferability is provability from finite premises, then some consequences cannot be inferred. Peregrin proposes to broaden the notion of inference (a third time) so that it need not be something that could be performed by humans, who can use only finite premises. In Chapter Eight, Peregrin considers which of the classical logical operators (¬, ∨, ∧, →) can be properly captured by an inferential pattern. He holds that ∨ cannot be so captured. In brief, his argument is that the introduction rules for ∨ (A ⊢ A ∨ B; B ⊢ A ∨ B) do not forbid a circumstance in which A ∨ B is true, but neither A nor B is true. So, there is "no straightforward inferentialist way to classical logic" (184). This need not be a problem for the inferentialist because she could dig in her heels and say so much the worse for classical logic. Or she could allow for "multiple conclusion" rules of inference, which permit one to draw multiple conclusions from a single set of premises (e.g., A ∨ B ⊢ A, B). Or she could admit that from an inferentialist point of view, intuitionistic logic -- a logic without the law of excluded middle (A ∨ ¬A) -- is more "natural" than classical logic. In Chapter Nine, Peregrin develops "expressivism" about logical vocabulary, the idea that such vocabulary makes it possible to express material rules of inference as claims in a language. He explains what sorts of terms are necessary for doing this, starting with a term that allows one to express that A implies B, then turning to what terms are necessary for expressing that A does not imply B. He argues that a language with such terms will lead to an intuitionistic logic, but shows also that if room is made for "multiple conclusion" inferences, classical logic is possible, too. He also explains that if one starts with incompatibility instead of inference (as do Brandom and Alp Aker), the road to classical logical is smoother. In Chapter Ten, Peregrin asks how an inferentialist can justify and know the rules of logic, such as modus ponens. For the inferentialist, the rules of logic are simply constitutive of the meaning of logical vocabulary. Modus ponens, for instance, is constitutive of the meaning of implication. Thus, Peregrin argues, they are trivially true. However, it is not trivial that a term for implication may be useful for a language to have. We can, thus, justify rules of logic by appeal to the usefulness of logical vocabulary. In Chapter Eleven, Peregrin argues that it is misleading to call rules of logic "rules of reasoning". They do not tell us which conclusions we should or may draw, or which beliefs to keep. In the first instance, rules of logic govern the interpersonal activity of demonstration and proof. Roughly like the way that rules of chess make chess pieces the pieces that they are, rules of logic constitute (the content of) our beliefs, making them what they are. Thus, they are the very framework that allows us to have beliefs and so cannot be a guide for which beliefs to have. As I indicated at the start of this review, this is a good book. There are, however, a few ways in which it could have been more thorough. First, Peregrin's view of what it takes for there to be norms is not wholly convincing. He says that a disposition to infer B from A does not by itself suffice for there to be a norm that B follows from A. There must also be normative attitudes, people correcting those who do not comply with the putative norm. But if attitudes are simply a function of what people actually do or are disposed to do, and norms are a function of attitudes, then norms are a function of what people actually do or are disposed to do, and normative inferentialism collapses into causal inferentialism, which Peregrin had said were different. He is aware of this danger (esp. 75-76), but does almost nothing to avert it. In Making It Explicit, Brandom also tries to avert it, claiming that the game of giving and asking for reasons is "norms all the way down." However, many have doubted that Brandom succeeds.[2] Since the difference between normative and causal inferentialism is at the heart of Peregrin's view, he needs to address this danger more thoroughly and convincingly. Second, Peregrin is too quick when discussing "Strong Inferentialism," the idea that rules of inference should be broadened to include rules not just for moves from claims to claims, but from observable circumstances to claims. He admits that the move from an observable circumstance (e.g., a cat in front of you) to a claim ('Lo, a cat!') is not normally regarded as an inference, and probably should not be so regarded (37). After all, doing so would seem to imply that observable circumstances are claims. Instead, he proposes that because such moves are norm-governed, they "may be seen as sufficiently inference like to warrant the label inferentialism" (42). However, as Peregrin is aware, this argument is weak; being norm-governed does not make such moves interestingly similar to inferences. He should be more precise. In particular, he could have explained that such moves are governed by norms of rational criticism, the very sort of norms that govern moves from claims to claims. For instance, one may demand support for a claim made in response to an observable circumstance, and one may appeal to such a claim when challenged to defend another claim, or when challenging another claim. Clarifying and developing that idea is essential to showing how inferentialism makes sense of empirical vocabulary. Without it, the story about empirical vocabulary risks appearing as a merely non-inferentialist add-on. Moreover, several philosophers have had interesting and helpful things to say about the inferentialist's treatment of empirical vocabulary.[3] Peregrin missed a chance to further that discussion. Third, although Peregrin is very good about addressing tempting and oft-repeated worries about inferentialism, there is one such worry that he does not address, a discussion of which would have made the book richer. Here is a version of it. Inferentialism conflicts with current orthodoxy in formal semantics, the project of systematically specifying the meanings of terms and sentences of natural languages. Inferentialism says that a sentence's meaning is its inferential role, but the current orthodoxy in formal semantics says that a sentence's meaning (as used on an occasion) is, at least in part, its truth-condition. Given this conflict, and the successes of currently orthodox formal semantics, inferentialism should be rejected. One might be tempted to reply that this argument rests on a simple confusion: inferentialism does not aim to specify the meanings of individual terms and sentences of natural languages; it aims only to explain why terms and sentences have meaning at all, and the meanings that they do have. (That distinction is sometimes referred to as the distinction between semantics and meta-semantics.) But that response does not suffice, for the inferentialist does indeed appear to hold that word and sentence meanings are inferential roles. And that appears to conflict with the formal semanticist's claim, to put it loosely but provocatively, that the meanings of sentences are truth-conditions. Granting this point, let me sketch one sort of reply that the inferentialist could make.[4] The point or purpose of any formal semantics, she might say, makes sense only within a broader framework. The explanatorily primitive terms of formal semantics, such as 'reference,' 'extension,' and 'true,' must ultimately be understood in terms of their inferential roles. To whatever extent we can specify the meanings of words and sentences in terms of 'reference,' 'extension,' and 'true,' we rely on an inferentialist understanding of those terms. That is just a sketch of a reply, but it is not obviously wrong. And given that smart philosophers have worried that the conflict between inferentialism and formal semantics is real, it would have been helpful for Peregrin to discuss that worry and this sort of reply.[5] Although there are a few ways in which I think Peregrin's book might have been more thorough, I heartily recommend it to anyone who is interested in understanding the problems and prospects for inferentialism.[6] [1] The idea is reminiscent of John Haugeland's discussion of semi-automatic, automatic, esoteric and empirical chess in "Truth and Rule-Following," in his Having Thought (Harvard University Press: Cambridge, MA. 1998). Unfortunately, Peregrin does not discuss Haugeland. [2] For instance, Jay Rosenberg, "Brandom's Making It Explicit: A First Encounter," Philosophy and Phenomenological Research, 57.1 (1997): 179-187; Jürgen Habermas, "From Kant to Hegel: On Robert Brandom's Pragmatic Philosophy of Language," European Journal of Philosophy, 8.3 (2000): 322-355; Robert Pippin, "Brandom's Hegel," European Journal of Philosophy, 13.3 (2005): 381-408. [3] In addition to several texts by Wilfrid Sellars, Robert Brandom, and John McDowell, see Michael P. Wolf, "Rigid Designation and Anaphoric Theories of Reference," Philosophical Studies 130 (2006): 351-75; Rebecca Kukla and Mark Lance, Yo! and Lo! (Cambridge, MA: Harvard University Press, 2009), esp. Ch. 2. [4] Brandom suggests a similar response (Making It Explicit, pp. 143-45). Peregrin expresses sympathy with it ("Developing Sellars's Semantic Legacy," in Wolf and Lance (eds.), The Self-Correcting Enterprise (New York: Rodopi, 2006), p. 273). [5] Referring to Brandom's work on inferentialism, Timothy Williamson gestures at a similar argument ("How Did We Get Here from There? The Transformation of Analytic Philosophy?" Belgrade Philosophical Annual XXVII (2014), 34). In a brief blog post, Jason Stanley sketches a similar argument, defending it against various objections in the appended comments ("The Use Theory of Meaning," Leiter Reports (March 14, 2006)). [6] Thanks to Nat Hansen and Michael Wolf for very helpful comments on an earlier draft.}, urldate = {2017-03-30}, author = {Maher, Chauncey}, collaborator = {Peregrin, Jaroslav}, month = apr, year = {2015}, }
@inproceedings{boleda_distributional_2015, title = {Distributional {Semantic} {Features} as {Semantic} {Primitives} – or not}, url = {http://www.aaai.org/ocs/index.php/SSS/SSS15/paper/view/10240}, urldate = {2017-03-30}, booktitle = {Knowledge {Representation} and {Reasoning}: {Integrating} {Symbolic} and {Neural} {Approaches}: {Papers} from the 2015 {AAAI} {Spring} {Symposium}}, author = {Boleda, Gemma and Erk, Katrin}, year = {2015}, }
@article{betti_haslangers_2014, title = {On {Haslanger}’s {Focal} {Analysis} of {Race} and {Gender} in {Resisting} {Reality} as an {Interpretive} {Model}}, volume = {1}, url = {http://krisis.eu/wp-content/uploads/2016/12/krisis-2014-1-03-betti.pdf}, journal = {Krisis}, author = {Betti, Arianna}, year = {2014}, pages = {13--18}, }
@article{baroni_frege_2014, title = {Frege in {Space}: {A} {Program} of {Compositional} {Distributional} {Semantics}}, volume = {9}, copyright = {Copyright (c)}, issn = {1945-3604}, shorttitle = {Frege in {Space}}, url = {http://csli-lilt.stanford.edu/ojs/index.php/LiLT/article/view/6}, abstract = {The lexicon of any natural language encodes a huge number of distinct word meanings. Just to understand this article, you will need to know what thousands of words mean. The space of possible sentential meanings is infinite: In this article alone, you will encounter many sentences that express ideas you have never heard before, we hope. Statistical semantics has addressed the issue of the vastness of word meaning by proposing methods to harvest meaning automatically from large collections of text (corpora). Formal semantics in the Fregean tradition has developed methods to account for the infinity of sentential meaning based on the crucial insight of compositionality, the idea that meaning of sentences is built incrementally by combining the meanings of their constituents. This article sketches a new approach to semantics that brings together ideas from statistical and formal semantics to account, in parallel, for the richness of lexical meaning and the combinatorial power of sentential semantics. We adopt, in particular, the idea that word meaning can be approximated by the patterns of co-occurrence of words in corpora from statistical semantics, and the idea that compositionality can be captured in terms of a syntax-driven calculus of function application from formal semantics.}, language = {en}, number = {0}, urldate = {2017-03-30}, journal = {LiLT (Linguistic Issues in Language Technology)}, author = {Baroni, Marco and Bernardi, Raffaela and Zamparelli, Roberto}, year = {2014}, keywords = {compositional semantics, distributional semantics}, }
@incollection{bernardi_distributional_2014, series = {Lecture {Notes} in {Computer} {Science}}, title = {Distributional {Semantics}: {A} {Montagovian} {View}}, copyright = {©2014 Springer-Verlag Berlin Heidelberg}, isbn = {978-3-642-54788-1 978-3-642-54789-8}, shorttitle = {Distributional {Semantics}}, url = {http://link.springer.com/chapter/10.1007/978-3-642-54789-8_5}, abstract = {This paper describes the current status of research in Distributional Semantics looking at the results from the Montagovian tradition stand point. It considers the main aspects of the Montagovian view as binoculars to observe those results, in particular: compositionality, syntax-semantics interface, logical words and entailment. To this end, it reviews some work that aims to tackle those issues within the Distributional Semantics Models and tries to highlight some open questions formal and distributional semanticists could address together. Credits: Some of the material in the background section is based on distributional semantics talks by Marco Baroni, Stefan Evert, Alessandro Lenci and Roberto Zamparelli.}, language = {en}, number = {8222}, urldate = {2017-03-30}, booktitle = {Categories and {Types} in {Logic}, {Language}, and {Physics}}, publisher = {Springer Berlin Heidelberg}, author = {Bernardi, Raffaella}, editor = {Casadio, Claudia and Coecke, Bob and Moortgat, Michael and Scott, Philip}, year = {2014}, doi = {10.1007/978-3-642-54789-8_5}, keywords = {Computation by Abstract Devices, Computer Science, general, History of Computing, Logics and Meanings of Programs, Mathematical Logic and Formal Languages}, pages = {63--89}, }
@inproceedings{herbelot2012distributional, title = {Distributional techniques for philosophical enquiry}, booktitle = {Proceedings of the 6th {Workshop} on {Language} {Technology} for {Cultural} {Heritage}, {Social} {Sciences}, and {Humanities}}, publisher = {Association for Computational Linguistics}, author = {Herbelot, Aurélie and Von Redecker, Eva and Müller, Johanna}, year = {2012}, pages = {45--54}, }
@article{erk_vector_2012, title = {Vector {Space} {Models} of {Word} {Meaning} and {Phrase} {Meaning}: {A} {Survey}}, volume = {6}, issn = {1749-818X}, shorttitle = {Vector {Space} {Models} of {Word} {Meaning} and {Phrase} {Meaning}}, url = {http://onlinelibrary.wiley.com/doi/10.1002/lnco.362/abstract}, doi = {10.1002/lnco.362}, abstract = {Distributional models represent a word through the contexts in which it has been observed. They can be used to predict similarity in meaning, based on the distributional hypothesis, which states that two words that occur in similar contexts tend to have similar meanings. Distributional approaches are often implemented in vector space models. They represent a word as a point in high-dimensional space, where each dimension stands for a context item, and a word's coordinates represent its context counts. Occurrence in similar contexts then means proximity in space. In this survey we look at the use of vector space models to describe the meaning of words and phrases: the phenomena that vector space models address, and the techniques that they use to do so. Many word meaning phenomena can be described in terms of semantic similarity: synonymy, priming, categorization, and the typicality of a predicate's arguments. But vector space models can do more than just predict semantic similarity. They are a very flexible tool, because they can make use of all of linear algebra, with all its data structures and operations. The dimensions of a vector space can stand for many things: context words, or non-linguistic context like images, or properties of a concept. And vector space models can use matrices or higher-order arrays instead of vectors for representing more complex relationships. Polysemy is a tough problem for distributional approaches, as a representation that is learned from all of a word's contexts will conflate the different senses of the word. It can be addressed, using either clustering or vector combination techniques. Finally, we look at vector space models for phrases, which are usually constructed by combining word vectors. Vector space models for phrases can predict phrase similarity, and some argue that they can form the basis for a general-purpose representation framework for natural language semantics.}, language = {en}, number = {10}, urldate = {2017-04-02}, journal = {Language and Linguistics Compass}, author = {Erk, Katrin}, month = oct, year = {2012}, pages = {635--653}, }
@book{kibbee_chomskyan_2010, title = {Chomskyan (r)evolutions}, isbn = {978-90-272-1169-9}, abstract = {It is not unusual for contemporary linguists to claim that Modern Linguistics began in 1957 (with the publication of Noam Chomsky s "Syntactic Structures"). Some of the essays in "Chomskyan (R)evolutions" examine the sources, the nature and the extent of the theoretical changes Chomsky introduced in the 1950s. Other contributions explore the key concepts and disciplinary alliances that have evolved considerably over the past sixty years, such as the meanings given for Universal Grammar, the relationship of Chomskyan linguistics to other disciplines (Cognitive Science, Psychology, Evolutionary Biology), and the interactions between mainstream Chomskyan linguistics and other linguistic theories active in the late 20th century: Functionalism, Generative Semantics and Relational Grammar. The broad understanding of the recent history of linguistics points the way towards new directions and methods that linguistics can pursue in the future."}, language = {en}, publisher = {John Benjamins Publishing}, author = {Kibbee, Douglas A.}, year = {2010}, note = {Google-Books-ID: TMW7inSiDK0C}, keywords = {Language Arts \& Disciplines / Linguistics / General}, }
@article{louwerse_language_2009, title = {Language {Encodes} {Geographical} {Information}}, volume = {33}, issn = {1551-6709}, url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1551-6709.2008.01003.x/abstract}, doi = {10.1111/j.1551-6709.2008.01003.x}, abstract = {Population counts and longitude and latitude coordinates were estimated for the 50 largest cities in the United States by computational linguistic techniques and by human participants. The mathematical technique Latent Semantic Analysis applied to newspaper texts produced similarity ratings between the 50 cities that allowed for a multidimensional scaling (MDS) of these cities. MDS coordinates correlated with the actual longitude and latitude of these cities, showing that cities that are located together share similar semantic contexts. This finding was replicated using a first-order co-occurrence algorithm. The computational estimates of geographical location as well as population were akin to human estimates. These findings show that language encodes geographical information that language users in turn may use in their understanding of language and the world.}, language = {en}, number = {1}, urldate = {2017-03-31}, journal = {Cognitive Science}, author = {Louwerse, Max M. and Zwaan, Rolf A.}, month = jan, year = {2009}, keywords = {Computational linguistics, Corpus linguistics, Geographical coordinates, Geography, Latent semantic analysis, Multidimensional scaling, Semantic representations, Spatial cognition, Word frequency}, pages = {51--73}, }
@article{sahlgren_distributional_2008, title = {The distributional hypothesis}, volume = {20}, number = {1}, journal = {Italian Journal of Linguistics}, author = {Sahlgren, Magnus}, year = {2008}, pages = {33--54}, }
@misc{lenci_distributional_2008, title = {Distributional semantics in linguistic and cognitive research}, url = {http://linguistica.sns.it/RdL/20.1/ALenci.pdf}, urldate = {2017-03-30}, author = {Lenci, Alessandro}, year = {2008}, }
@article{brandom_inferentialism_2007, title = {Inferentialism and {Some} of {Its} {Challenges}}, volume = {74}, issn = {0031-8205}, url = {http://www.jstor.org/stable/40041073}, number = {3}, urldate = {2017-03-30}, journal = {Philosophy and Phenomenological Research}, author = {Brandom, Robert}, year = {2007}, pages = {651--676}, }
@inproceedings{Pado:2003:CSS:1075096.1075113, address = {Sapporo, Japan}, series = {{ACL} '03}, title = {Constructing {Semantic} {Space} {Models} from {Parsed} {Corpora}}, url = {http://dx.doi.org/10.3115/1075096.1075113}, doi = {10.3115/1075096.1075113}, booktitle = {Proceedings of the 41st {Annual} {Meeting} on {Association} for {Computational} {Linguistics} - {Volume} 1}, publisher = {Association for Computational Linguistics}, author = {Padó, Sebastian and Lapata, Mirella}, year = {2003}, note = {tex.acmid= 1075113 tex.numpages= 8}, pages = {128--135}, }
@book{nevin_legacy_2002, title = {The {Legacy} of {Zellig} {Harris}: {Language} and {Information} {Into} the 21st {Century}}, isbn = {978-90-272-4736-0}, shorttitle = {The {Legacy} of {Zellig} {Harris}}, abstract = {Zellig Harris opened many lines of research in language, information, and culture, from generative grammar to informatics, from mathematics to language pedagogy. An international array of scholars here describe further developments and relate this work to that of others. Volume 1 begins with a survey article by Harris himself, previously unavailable in English. T.A. Ryckman, Paul Mattick, Maurice Gross, and Francis Lin show the importance of Harris's methodology for philosophy of science, the first two with reference especially to his remarkable findings on the form of information in science. Themes of discourse and sublanguage analysis are developed further in chapters by Michael Gottfried, James Munz, Robert Longacre, and Carlota Smith. Morris Salkoff, Peter Seuren, and Lila Gleitman present diverse developments in syntax and semantics. Phonology is represented in chapters by Leigh Lisker and by Frank Harary and Stephen Helmreich. Daythal Kendall applies operator grammar to literary analysis of Sapir's Takelma texts, and Fred Lukoff's chapter describes benefits of string analysis for language pedagogy.}, language = {en}, publisher = {John Benjamins Publishing}, author = {Nevin, Bruce E. and Johnson, Stephen M.}, month = jan, year = {2002}, keywords = {Language Arts \& Disciplines / Linguistics / General}, }
@article{nevin_minimalist_1993, title = {A {Minimalist} {Program} for {Linguistics}: {The} {Work} of {Zellig} {Harris} on {Meaning} and {Information}}, volume = {20}, issn = {0302-5160, 1569-9781}, shorttitle = {A {Minimalist} {Program} for {Linguistics}}, url = {http://www.jbe-platform.com/content/journals/10.1075/hl.20.2-3.06nev}, doi = {10.1075/hl.20.2-3.06nev}, abstract = {SUMMARYZellig S. Harris (1909-1992) is a familiar icon of American structuralism. According to received views of the history of linguistics in the 20th century, he is an exemplar of \'taxonomic linguistics\' seeking practical discovery procedures whereby one could mechanically derive a grammar from distributional analysis of a corpus of utterances without reference to meaning, and a proponent of empiricist and behaviorist views that have been overthrown by the revolution of Generative linguistics. An examination of what he actually wrote, however, shows a lifelong concern with the analysis and representation of meaning. Harris\' approach to the evaluation of alternative tools of analysis, alternative grammars, and alternative theories of language arises from a crucial but little acknowledged dilemma of linguistics grounded in a fundamental property of language, namely, that it contains within itself virtually unrestricted metalinguistic capacities, upon which any description of language whatever either directly or indirectly depends.RÉSUMÉZellig S. Harris (1909-1992) est un icône familier du structuralisme américain. En plus d\'être l\'auteur d\'opinions empiristes et behavioristes qui ont été renversées par la révolution de la linguistique generative, Harris serait, selon les idées reçues sur l\'histoire de la linguistique, un produit de la \'linguistique taxonomique\', recherchant des procédures de découverte pratiques par lesquelles l\'on peut dériver mécaniquement une grammaire à partir d\'une analyse distributicnelle d\'un corpus d\'énoncés, sans référence au sens. Un examen attentif de ses écrits révèle, cependant, un véritable souci de l\'analyse et de la représentation du sens, et cela sa vie durant. L\'approche de Harris relative à l\'évaluation d\'outils alternatifs pour l\'analyse, de grammaires alternatives et de théories du language alternatives est motivée par un dilemme linguistique crucial mais peu reconnu, encré dans une propriété fondamentale du language, à savoir que le language contient des capacités métalinguistiques quasi illimitées desquelles dépend, soit directement ou indirectement, toute description de celui-ci.ZUSAMMENFASSUNGZellig S. Harris (1909-1992) ist ein geläufiges Bild des Amerikanischen Structuralism. Den üblichen Darstellungen in der Wissenschaftsgeschichte der Linguistik des 20. Jahrhunderts zufolge stellt er ein Muster der \'taxonomischen Sprachwissenschaft\' dar, stets auf der Suche nach praktischen Entdeckungs-prozeduren, durch die es möglich sein solle, auf mechanischem Wege eine Grammatik aufgrund einer distributionellen Analyse aus Korpus von Sprach-äußerungen ohne Bezug auf die Bedeutung zu erstellen. Gleichzeitig gilt er als ein Vertreter von empiristischen und behavioristischen Ansichten, die durch durch die generative Revolution in der Linguistik über den Haufen geworfen worden seien. Dagegen ergibt eine sorgfältige Untersuchung dessen, was Harris tatsächlich schrieb, daB er ein lebenslanges Engagement fur die Analyse und Darstellung der Bedeutung hatte. Harris\' Herangehen an die Bewertung alternativer Werkzeuge fur die Analyse, alternative Grammatiken, und alternative Sprachtheorien nimmt seinen Ausgang im kruzialen, jedoch wenig erkann-ten Dilemma der Linguistik, die auf einer fundamentalen Eigenschaft der Spra-che basiert, nämlich die, daB sie in sich unbeschränkte metalinguistische Mog-lichkeiten enthält, von der eine jede Sprachbeschreibung entweder direkt oder indirekt abhangt.}, number = {2}, urldate = {2017-03-31}, journal = {Historiographia Linguistica}, author = {Nevin, Bruce E.}, month = jan, year = {1993}, pages = {355--398}, }
@article{miller_contextual_1991, title = {Contextual correlates of semantic similarity}, volume = {6}, issn = {0169-0965}, url = {http://dx.doi.org/10.1080/01690969108406936}, doi = {10.1080/01690969108406936}, abstract = {The relationship between semantic and contextual similarity is investigated for pairs of nouns that vary from high to low semantic similarity. Semantic similarity is estimated by subjective ratings; contextual similarity is estimated by the method of sorting sentential contexts. The results show an inverse linear relationship between similarity of meaning and the discriminability of contexts. This relation, is obtained for two separate corpora of sentence contexts. It is concluded that, on average, for words in the same language drawn from the same syntactic and semantic categories, the more often two words can be substituted into the same contexts the more similar in meaning they are judged to be.}, number = {1}, urldate = {2017-03-31}, journal = {Language and Cognitive Processes}, author = {Miller, George A. and Charles, Walter G.}, month = jan, year = {1991}, pages = {1--28}, }
@book{gardner_quest_1973, address = {New York}, edition = {1st edition}, title = {The {Quest} for {Mind}: {Piaget}, {Levi}-{Strauss}, and the {Structuralist} {Movement}}, isbn = {978-0-394-47944-6}, shorttitle = {The {Quest} for {Mind}}, abstract = {Paiget and Levi-Strauss are embarked on parallel scientific enterprises; and their work represents the most significant contemporary innovation in the sciences of man. The author's desire to present the ideas of these two men, to specify previously unnoticed areas of common ground, and to describe the importance and promise of the structuralist movement which they have launched is what prompted him to write this book.}, language = {English}, publisher = {Knopf}, author = {Gardner, Howard}, year = {1973}, }
@book{quine_word_1960, address = {Cambridge, MA}, edition = {3rd Edition}, title = {Word and {Object}}, url = {https://1drv.ms/t/s!AgPq3zEkkYuOgZwZzNSmUOCRI_wf6g}, publisher = {MIT Press}, author = {Quine, Willard Van Orman}, year = {1960}, keywords = {19, seed}, }
@article{harris_distributional_1954, title = {Distributional {Structure}}, volume = {10}, issn = {0043-7956}, url = {http://dx.doi.org/10.1080/00437956.1954.11659520}, doi = {10.1080/00437956.1954.11659520}, number = {2-3}, urldate = {2017-03-31}, journal = {WORD}, author = {Harris, Zellig S.}, month = aug, year = {1954}, pages = {146--162}, }
@book{harris_methods_1951, series = {Methods in {Structural} {Linguistics}}, title = {Methods in {Structural} {Linguistics}}, publisher = {University of Chicago Press}, author = {Harris, Z.S.}, year = {1951}, }
@book{bloomfield_leonard_language_1933, address = {New York}, title = {Language}, isbn = {978-81-208-1196-6}, abstract = {The book presents the fundamentals of Linguistics and the historical survey of languages to the reader without any complication and obscurity. It is a valuable book for students and scholars of linguistics.The author has followed the traditional order of presentation. He begins with the survey of languages of the world, proceeds with the study of phonetic structure, gramatical forms, syntax and morphology, each being the indispensable preliminary to the study of the ensuing one.}, language = {en}, publisher = {Holt}, author = {{Bloomfield, Leonard}}, year = {1933}, keywords = {18}, }
@misc{bryson_semantics_nodate, title = {Semantics derived automatically from language corpora necessarily contain human biases}, url = {https://joanna-bryson.blogspot.nl/2016/08/semantics-derived-automatically-from.html}, urldate = {2017-12-29}, author = {Bryson, Joanna}, }
@misc{bryson_should_nodate, title = {Should we let someone use {AI} to delete human bias? {Would} we know what we were saying?}, url = {https://joanna-bryson.blogspot.nl/2016/07/should-we-let-someone-use-ai-to-delete.html}, urldate = {2017-12-29}, author = {Bryson, Joanna}, }
@misc{bryson_we_nodate, title = {We {Didn}'t {Prove} {Prejudice} {Is} {True} ({A} {Role} for {Consciousness})}, url = {https://joanna-bryson.blogspot.nl/2017/04/we-didnt-prove-prejudice-is-true-role.html}, urldate = {2017-12-29}, author = {Bryson, Joanna}, }
@misc{emerging_technology_from_the_arxiv_neural_nodate, title = {Neural networks are inadvertently learning our language’s hidden gender biases}, url = {https://www.technologyreview.com/s/602025/how-vector-space-mathematics-reveals-the-hidden-sexism-in-language/}, abstract = {As neural networks tease apart the structure of language, they are finding a hidden gender bias that nobody knew was there.}, urldate = {2017-04-02}, journal = {MIT Technology Review}, author = {Emerging Technology from the arXiv}, }
@misc{peregrin_what_nodate, title = {What is {Inferentialism}?}, url = {https://asep.lib.cas.cz/arl-cav/en/detail-cav_un_epca-0377515-What-is-Inferentialism/}, abstract = {Record detail - What is Inferentialism? - Record detail - The Czech Academy of Sciences}, urldate = {2017-03-30}, author = {Peregrin, Jaroslav}, }