Measuring Compositionality in Representation Learning. Andreas, J. In International Conference of Learning Representations, 2019. Paper abstract bibtex 14 downloads Many machine learning algorithms represent input data with vector embeddings or discrete codes. When inputs exhibit compositional structure (e.g. objects built from parts or procedures from subroutines), it is natural to ask whether this compositional structure is reflected in the the inputs' learned representations. While the assessment of compositionality in languages has received significant attention in linguistics and adjacent fields, the machine learning literature lacks general-purpose tools for producing graded measurements of compositional structure in more general (e.g. vector-valued) representation spaces. We describe a procedure for evaluating compositionality by measuring how well the true representation-producing model can be approximated by a model that explicitly composes a collection of inferred representational primitives. We use the procedure to provide formal and empirical characterizations of compositional structure in a variety of settings, exploring the relationship between compositionality and learning dynamics, human judgments, representational similarity, and generalization.
@inproceedings{Andreas2019,
abstract = {Many machine learning algorithms represent input data with vector embeddings or discrete codes. When inputs exhibit compositional structure (e.g. objects built from parts or procedures from subroutines), it is natural to ask whether this compositional structure is reflected in the the inputs' learned representations. While the assessment of compositionality in languages has received significant attention in linguistics and adjacent fields, the machine learning literature lacks general-purpose tools for producing graded measurements of compositional structure in more general (e.g. vector-valued) representation spaces. We describe a procedure for evaluating compositionality by measuring how well the true representation-producing model can be approximated by a model that explicitly composes a collection of inferred representational primitives. We use the procedure to provide formal and empirical characterizations of compositional structure in a variety of settings, exploring the relationship between compositionality and learning dynamics, human judgments, representational similarity, and generalization.},
archivePrefix = {arXiv},
arxivId = {1902.07181},
author = {Andreas, Jacob},
booktitle = {International Conference of Learning Representations},
eprint = {1902.07181},
file = {:Users/shanest/Documents/Library/Andreas/International Conference of Learning Representations/Andreas - 2019 - Measuring Compositionality in Representation Learning.pdf:pdf},
keywords = {method: tree reconstruction error,phenomenon: compositionality},
title = {{Measuring Compositionality in Representation Learning}},
url = {http://arxiv.org/abs/1902.07181},
year = {2019}
}
Downloads: 14
{"_id":"N2tzXR2sde3qf8ngR","bibbaseid":"andreas-measuringcompositionalityinrepresentationlearning-2019","authorIDs":[],"author_short":["Andreas, J."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Many machine learning algorithms represent input data with vector embeddings or discrete codes. When inputs exhibit compositional structure (e.g. objects built from parts or procedures from subroutines), it is natural to ask whether this compositional structure is reflected in the the inputs' learned representations. While the assessment of compositionality in languages has received significant attention in linguistics and adjacent fields, the machine learning literature lacks general-purpose tools for producing graded measurements of compositional structure in more general (e.g. vector-valued) representation spaces. We describe a procedure for evaluating compositionality by measuring how well the true representation-producing model can be approximated by a model that explicitly composes a collection of inferred representational primitives. We use the procedure to provide formal and empirical characterizations of compositional structure in a variety of settings, exploring the relationship between compositionality and learning dynamics, human judgments, representational similarity, and generalization.","archiveprefix":"arXiv","arxivid":"1902.07181","author":[{"propositions":[],"lastnames":["Andreas"],"firstnames":["Jacob"],"suffixes":[]}],"booktitle":"International Conference of Learning Representations","eprint":"1902.07181","file":":Users/shanest/Documents/Library/Andreas/International Conference of Learning Representations/Andreas - 2019 - Measuring Compositionality in Representation Learning.pdf:pdf","keywords":"method: tree reconstruction error,phenomenon: compositionality","title":"Measuring Compositionality in Representation Learning","url":"http://arxiv.org/abs/1902.07181","year":"2019","bibtex":"@inproceedings{Andreas2019,\nabstract = {Many machine learning algorithms represent input data with vector embeddings or discrete codes. When inputs exhibit compositional structure (e.g. objects built from parts or procedures from subroutines), it is natural to ask whether this compositional structure is reflected in the the inputs' learned representations. While the assessment of compositionality in languages has received significant attention in linguistics and adjacent fields, the machine learning literature lacks general-purpose tools for producing graded measurements of compositional structure in more general (e.g. vector-valued) representation spaces. We describe a procedure for evaluating compositionality by measuring how well the true representation-producing model can be approximated by a model that explicitly composes a collection of inferred representational primitives. We use the procedure to provide formal and empirical characterizations of compositional structure in a variety of settings, exploring the relationship between compositionality and learning dynamics, human judgments, representational similarity, and generalization.},\narchivePrefix = {arXiv},\narxivId = {1902.07181},\nauthor = {Andreas, Jacob},\nbooktitle = {International Conference of Learning Representations},\neprint = {1902.07181},\nfile = {:Users/shanest/Documents/Library/Andreas/International Conference of Learning Representations/Andreas - 2019 - Measuring Compositionality in Representation Learning.pdf:pdf},\nkeywords = {method: tree reconstruction error,phenomenon: compositionality},\ntitle = {{Measuring Compositionality in Representation Learning}},\nurl = {http://arxiv.org/abs/1902.07181},\nyear = {2019}\n}\n","author_short":["Andreas, J."],"key":"Andreas2019","id":"Andreas2019","bibbaseid":"andreas-measuringcompositionalityinrepresentationlearning-2019","role":"author","urls":{"Paper":"http://arxiv.org/abs/1902.07181"},"keyword":["method: tree reconstruction error","phenomenon: compositionality"],"metadata":{"authorlinks":{}},"downloads":14},"bibtype":"inproceedings","biburl":"https://www.shane.st/teaching/575/win20/MachineLearning-interpretability.bib","creationDate":"2020-01-05T04:04:02.892Z","downloads":14,"keywords":["method: tree reconstruction error","phenomenon: compositionality"],"search_terms":["measuring","compositionality","representation","learning","andreas"],"title":"Measuring Compositionality in Representation Learning","year":2019,"dataSources":["okYcdTpf4JJ2zkj7A","TPZs4iPqAgE5a8mjq","znj7izS5PeehdLR3G"]}