<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4476011%2Fitems%3Fkey%3DBfP7bN7FF9dJwtyiLBORewdg%26format%3Dbibtex%26limit%3D100&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4476011%2Fitems%3Fkey%3DBfP7bN7FF9dJwtyiLBORewdg%26format%3Dbibtex%26limit%3D100");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F4476011%2Fitems%3Fkey%3DBfP7bN7FF9dJwtyiLBORewdg%26format%3Dbibtex%26limit%3D100"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@article{pooransingh_similarity_2021, title = {Similarity {Analysis} of {Modern} {Genre} {Music} {Based} on {Billboard} {Hits}}, volume = {9}, issn = {2169-3536}, doi = {10.1109/ACCESS.2021.3122386}, abstract = {Mainstream music can be popularly categorized into specific genres: Rock, Country, Hip Hop, contemporary rhythm and blues (R\&B) and Pop. Music of these genres are continually complied on the Billboard music charts based on popularity. This paper explores the uniqueness of these genres and the possible melding of acoustic characteristics over time. Principal Component Analysis (PCA) are applied to timbral and non-timbral characteristics and compared for each genre. Results show that Hip Hop and Pop maintained a unique distinction over time compared to the other genres while Rock, Country and R\&B began to share similar acoustic characteristics within recent times. Further analysis make an attempt to predict the trend of the acoustic nature of genres.}, journal = {IEEE Access}, author = {Pooransingh, Akash and Dhoray, Dylan}, year = {2021}, note = {Conference Name: IEEE Access}, keywords = {Acoustics, Feature extraction, Frequency measurement, High frequency, Mel frequency cepstral coefficient, Music information retrieval, Principal component analysis, Rhythm, acoustic applications, principal component analysis}, pages = {144916--144926}, }
@techreport{jo_musicians_2021, title = {Musicians and non-musicians’ consonant/dissonant perception investigated by {EEG} and {fMRI}}, copyright = {© 2021, Posted by Cold Spring Harbor Laboratory. This pre-print is available under a Creative Commons License (Attribution-NonCommercial-NoDerivs 4.0 International), CC BY-NC-ND 4.0, as described at http://creativecommons.org/licenses/by-nc-nd/4.0/}, url = {https://www.biorxiv.org/content/10.1101/2021.08.15.456377v1}, abstract = {The perception of two (or more) simultaneous musical notes, depending on their pitch interval(s), could be broadly categorized as consonant or dissonant. Previous studies have suggested that musicians and non-musicians adopt different strategies when discerning music intervals: the frequency ratio (perfect fifth or tritone) for the former, and frequency differences (e.g., roughness vs. non-roughness) for the latter. To extend and replicate this previous finding, in this follow-up study we reran the ElectroEncephaloGraphy (EEG) experiment, and separately collected functional magnetic resonance imaging (fMRI) data of the same protocol. The behavioral results replicated our previous findings that musicians used pitch intervals and nonmusicians roughness for consonant judgments. And the ERP amplitude differences between groups in both frequency ratio and frequency differences were primarily around N1 and P2 periods along the midline channels. The fMRI results, with the joint analyses by univariate, multivariate, and connectivity approaches, further reinforce the involvement of midline and related-brain regions in consonant/dissonance judgments. Additional representational similarity analysis (or RSA), and the final spatio-temporal searchlight RSA (or ss-RSA), jointly combined the fMRI-EEG into the same representational space, providing final support on the neural substrates of neurophysiological signatures. Together, these analyses not just exemplify the importance of replication, that musicians rely more on top-down knowledge for consonance/dissonance perception; but also demonstrate the advantages of multiple analyses in constraining the findings from both EEG and fMRI. Significance Statement In this study, the neural correlates of consonant and dissonant perception has been revisited with both EEG and fMRI. Behavioral results of the current study well replicated the pattern of our earlier work (Kung et al., 2014), and the ERP results, though showing that both musicians and nonmusicians processed rough vs. non-rough notes similarly, still supported the top-down modulation in musicians likely through long-term practice. The fMRI results, combining univariate (GLM contrast and functional connectivity) and multivariate (MVPA searchlight and RSA on voxel-, connectivity-, and spatio-temporal RSA searchlight-level) analyses, commonly speak to lateralized and midline regions, at different time windows, as the core brain networks that underpin both musicians’ and nonmusicians’ consonant/dissonant perceptions.}, language = {en}, urldate = {2021-11-16}, author = {Jo, HanShin and Hsieh, Tsung-Hao and Chien, Wei-Che and Shaw, Fu-Zen and Liang, Sheng-Fu and Kung, Chun-Chia}, month = aug, year = {2021}, doi = {10.1101/2021.08.15.456377}, note = {Company: Cold Spring Harbor Laboratory Distributor: Cold Spring Harbor Laboratory Label: Cold Spring Harbor Laboratory Section: New Results Type: article}, pages = {2021.08.15.456377}, }
@article{nakai_correspondence_2021, title = {Correspondence of categorical and feature-based representations of music in the human brain}, volume = {11}, issn = {2162-3279}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/brb3.1936}, doi = {10.1002/brb3.1936}, abstract = {Introduction Humans tend to categorize auditory stimuli into discrete classes, such as animal species, language, musical instrument, and music genre. Of these, music genre is a frequently used dimension of human music preference and is determined based on the categorization of complex auditory stimuli. Neuroimaging studies have reported that the superior temporal gyrus (STG) is involved in response to general music-related features. However, there is considerable uncertainty over how discrete music categories are represented in the brain and which acoustic features are more suited for explaining such representations. Methods We used a total of 540 music clips to examine comprehensive cortical representations and the functional organization of music genre categories. For this purpose, we applied a voxel-wise modeling approach to music-evoked brain activity measured using functional magnetic resonance imaging. In addition, we introduced a novel technique for feature-brain similarity analysis and assessed how discrete music categories are represented based on the cortical response pattern to acoustic features. Results Our findings indicated distinct cortical organizations for different music genres in the bilateral STG, and they revealed representational relationships between different music genres. On comparing different acoustic feature models, we found that these representations of music genres could be explained largely by a biologically plausible spectro-temporal modulation-transfer function model. Conclusion Our findings have elucidated the quantitative representation of music genres in the human cortex, indicating the possibility of modeling this categorization of complex auditory stimuli based on brain activity.}, language = {en}, number = {1}, urldate = {2021-10-26}, journal = {Brain and Behavior}, author = {Nakai, Tomoya and Koide-Majima, Naoko and Nishimoto, Shinji}, year = {2021}, note = {\_eprint: https://onlinelibrary.wiley.com/doi/pdf/10.1002/brb3.1936}, keywords = {MTF model, STG, fMRI, music genre}, pages = {e01936}, }
@article{kapsi_role_2020, title = {The {Role} of {Sleep} and {Impact} on {Brain} and {Learning}}, volume = {8}, doi = {10.3991/ijes.v8i3.17099}, abstract = {There are many interventions which may enhance learning. Many techniques are used in education to empower memory, which is a basic cognitive ability to ensure learning. A question arises: if learning is a natural process, is there a natural mechanism which supports learning? In this review, it is supported that sleep is such a mechanism. Research results on sleep and learning are presented and support different effects on the brain and learning, according to the age of the population. Sleep is a fundamental process for brain function and cog-nition. More studies should follow to make good use of this information, so as to design new interventions for the field of education.}, journal = {International Journal of Recent Contributions from Engineering Science \& IT (iJES)}, author = {Kapsi, Sevasti and Katsantoni, Spiridoula and Drigas, Athanasios}, month = sep, year = {2020}, pages = {59--68}, }
@article{goldman_improvisation_2020, title = {Improvisation experience predicts how musicians categorize musical structures}, volume = {48}, issn = {0305-7356}, url = {https://doi.org/10.1177/0305735618779444}, doi = {10.1177/0305735618779444}, abstract = {Western music improvisers learn to realize chord symbols in multiple ways according to functional classifications, and practice making substitutions of these realizations accordingly. In contrast, Western classical musicians read music that specifies particular realizations so that they rarely make such functional substitutions. We advance a theory that experienced improvisers more readily perceive musical structures with similar functions as sounding similar by virtue of this categorization, and that this categorization partly enables the ability to improvise by allowing performers to make substitutions. We tested this with an oddball task while recording electroencephalography. In the task, a repeating standard chord progression was randomly interspersed with two kinds of deviants: one in which one of the chords was substituted with a chord from the same functional class (“exemplar deviant”), and one in which the substitution was outside the functional class (“function deviant”). For function compared to exemplar deviants, participants with more improvisation experience responded more quickly and accurately and had more discriminable N2c and P3b ERP components. Further, N2c and P3b signal discriminability predicted participants’ behavioral ability to discriminate the stimuli. Our research contributes to the cognitive science of creativity through identifying differences in knowledge organization as a trait that facilitates creative ability.}, language = {en}, number = {1}, urldate = {2021-11-16}, journal = {Psychology of Music}, author = {Goldman, Andrew and Jackson, Tyreek and Sajda, Paul}, month = jan, year = {2020}, note = {Publisher: SAGE Publications Ltd}, keywords = {EEG, Improvisation, auditory perception/cognition, concepts and categories, creativity}, pages = {18--34}, }
@article{epure_modeling_2020, title = {Modeling the {Music} {Genre} {Perception} across {Language}-{Bound} {Cultures}}, url = {http://arxiv.org/abs/2010.06325}, abstract = {The music genre perception expressed through human annotations of artists or albums varies significantly across language-bound cultures. These variations cannot be modeled as mere translations since we also need to account for cultural differences in the music genre perception. In this work, we study the feasibility of obtaining relevant cross-lingual, culture-specific music genre annotations based only on language-specific semantic representations, namely distributed concept embeddings and ontologies. Our study, focused on six languages, shows that unsupervised cross-lingual music genre annotation is feasible with high accuracy, especially when combining both types of representations. This approach of studying music genres is the most extensive to date and has many implications in musicology and music information retrieval. Besides, we introduce a new, domain-dependent cross-lingual corpus to benchmark state of the art multilingual pre-trained embedding models.}, urldate = {2021-10-26}, journal = {arXiv:2010.06325 [cs]}, author = {Epure, Elena V. and Salha, Guillaume and Moussallam, Manuel and Hennequin, Romain}, month = nov, year = {2020}, note = {arXiv: 2010.06325}, keywords = {Computer Science - Computation and Language, Computer Science - Machine Learning}, }
@article{popal_guide_2019, title = {A {Guide} to {Representational} {Similarity} {Analysis} for {Social} {Neuroscience}}, volume = {14}, issn = {1749-5016}, url = {https://doi.org/10.1093/scan/nsz099}, doi = {10.1093/scan/nsz099}, abstract = {Representational similarity analysis (RSA) is a computational technique that uses pairwise comparisons of stimuli to reveal their representation in higher-order space. In the context of neuroimaging, mass-univariate analyses and other multivariate analyses can provide information on what and where information is represented but have limitations in their ability to address how information is represented. Social neuroscience is a field that can particularly benefit from incorporating RSA techniques to explore hypotheses regarding the representation of multidimensional data, how representations can predict behavior, how representations differ between groups and how multimodal data can be compared to inform theories. The goal of this paper is to provide a practical as well as theoretical guide to implementing RSA in social neuroscience studies.}, number = {11}, urldate = {2021-12-17}, journal = {Social Cognitive and Affective Neuroscience}, author = {Popal, Haroon and Wang, Yin and Olson, Ingrid R}, month = nov, year = {2019}, pages = {1243--1253}, }
@incollection{koelsch_music_2019, address = {Cambridge, MA, US}, title = {Music and the brain}, isbn = {978-0-262-03927-7}, abstract = {The term music refers to structured sounds that are produced by humans as a means of social interaction, expression, diversion, or evocation of emotion. Making music in a group is a tremendously demanding task for the human brain, and it elicits a large array of cognitive (and affective) processes, including perception, multimodal integration, attention, social cognition, memory, and communicative functions, including syntactic processing and processing of meaning information, action, and emotion. This richness makes music an ideal tool to investigate the workings of the human brain. This chapter reviews neuroscientific research findings about some of these processes. Tonal languages rely on a meticulous decoding of pitch information, and both tonal and nontonal languages require an accurate analysis of speech prosody to decode structure and meaning of speech. The assumption of an intimate connection between music and speech is corroborated by the reviewed findings of overlapping and shared neural resources for music and language processing in both adults and children. These findings suggest that the human brain, particularly at an early age, does not treat language and music as separate domains, but rather treats language as a special case of music, and music as a special case of sound. (PsycInfo Database Record (c) 2020 APA, all rights reserved)}, booktitle = {Foundations in music psychology: {Theory} and research}, publisher = {The MIT Press}, author = {Koelsch, Stefan}, year = {2019}, keywords = {Brain, Cognitive Processes, Language, Music, Music Perception, Neurosciences, Oral Communication, Social Cognition, Social Interaction}, pages = {407--458}, }
@article{popal_guide_2019-1, title = {A {Guide} to {Representational} {Similarity} {Analysis} for {Social} {Neuroscience}}, volume = {14}, issn = {1749-5016}, url = {https://doi.org/10.1093/scan/nsz099}, doi = {10.1093/scan/nsz099}, abstract = {Representational similarity analysis (RSA) is a computational technique that uses pairwise comparisons of stimuli to reveal their representation in higher-order space. In the context of neuroimaging, mass-univariate analyses and other multivariate analyses can provide information on what and where information is represented but have limitations in their ability to address how information is represented. Social neuroscience is a field that can particularly benefit from incorporating RSA techniques to explore hypotheses regarding the representation of multidimensional data, how representations can predict behavior, how representations differ between groups and how multimodal data can be compared to inform theories. The goal of this paper is to provide a practical as well as theoretical guide to implementing RSA in social neuroscience studies.}, number = {11}, urldate = {2021-11-16}, journal = {Social Cognitive and Affective Neuroscience}, author = {Popal, Haroon and Wang, Yin and Olson, Ingrid R}, month = nov, year = {2019}, pages = {1243--1253}, }
@techreport{nakai_representation_2018, title = {Representation of music genres based on the spectro-temporal modulation responses of the human brain}, copyright = {© 2018, Posted by Cold Spring Harbor Laboratory. This pre-print is available under a Creative Commons License (Attribution-NonCommercial-NoDerivs 4.0 International), CC BY-NC-ND 4.0, as described at http://creativecommons.org/licenses/by-nc-nd/4.0/}, url = {https://www.biorxiv.org/content/10.1101/471326v1}, abstract = {Music genre is an essential category for understanding human musical preferences and is provided based on the abstract categorization upon complex auditory stimuli. Previous neuroimaging studies have reported the involvement of the superior temporal gyrus (STG) in response to general music-related features. However, it remains largely unclear how abstract categories of music genre are represented in the brain and what acoustic features are more suited for explaining such representations. Here we examined comprehensive cortical representations and functional organization of music genres using 540 music clips. We applied a voxel-wise modeling approach to music-evoked brain activity measured using functional magnetic resonance imaging (fMRI). We observed distinct cortical organizations for different music genres in the bilateral STG, which revealed the representational relationship between various music genres, e.g., classical and hip-hop music showed opposite representations. Representations of music genres were largely explained by spectro-temporal modulation, which was modeled by a biologically plausible spectro-temporal modulation-transfer function (MTF) model. Our results elucidate the quantitative representation of music genres in the human cortex and indicate the possibility of modeling our abstract categorization of complex auditory stimuli based on the brain activity. Significance statement Music genre is an essential category for understanding human preferences of music. However, it is largely unknown how abstract categories of music genre are represented in the brain. Here, we examined comprehensive cortical representations of music genres by building voxel-wise models of fMRI data collected while human subjects listened to 540 music clips. We found distinct cortical organizations for various music genres in the bilateral STG. Such genre-specific cortical organization was explained by the biologically plausible MTF model. The current study elucidates the quantitative representation of music genres in the human cortex for the first time and indicates the possibility of modeling our abstract categorization of complex auditory stimuli based on the brain activity.}, language = {en}, urldate = {2021-11-16}, author = {Nakai, Tomoya and Koide-Majima, Naoko and Nishimoto, Shinji}, month = nov, year = {2018}, doi = {10.1101/471326}, note = {Company: Cold Spring Harbor Laboratory Distributor: Cold Spring Harbor Laboratory Label: Cold Spring Harbor Laboratory Section: New Results Type: article}, pages = {471326}, }
@inproceedings{nakai_encoding_2018, title = {Encoding and {Decoding} of {Music}-{Genre} {Representations} in the {Human} {Brain}}, doi = {10.1109/SMC.2018.00108}, abstract = {Music-genre recognition (MGR) has been a central issue in understanding human preferences of music. Previous studies have used various acoustic features to achieve MGR, though it has been largely unknown how music genres and related features are represented in the brain. Here, we measured brain activity while subjects passively listened to naturalistic music of various genres. A voxel-wise encoding model showed different activation patterns for each music genre in the bilateral superior temporal gyrus. We further performed music-genre classification using both a feature-based approach and a brain activity-based approach. Both approaches provided above-chance classification accuracy. Among four feature models, a biologically plausible spectro-temporal modulation transfer function (MTF) model showed the highest performance. These results provide a new insight into biologically plausible models of music genre.}, booktitle = {2018 {IEEE} {International} {Conference} on {Systems}, {Man}, and {Cybernetics} ({SMC})}, author = {Nakai, Tomoya and Koide-Majima, Naoko and Nishimoto, Shinji}, month = oct, year = {2018}, note = {ISSN: 2577-1655}, keywords = {Biological system modeling, Brain modeling, Decoding, Encoding, Feature extraction, MRI, MTF, Music, Training, decoding, music genre}, pages = {584--589}, }
@article{sankaran_decoding_2018, title = {Decoding the dynamic representation of musical pitch from human brain activity}, volume = {8}, copyright = {2018 The Author(s)}, issn = {2045-2322}, url = {https://www.nature.com/articles/s41598-018-19222-3}, doi = {10.1038/s41598-018-19222-3}, abstract = {In music, the perception of pitch is governed largely by its tonal function given the preceding harmonic structure of the music. While behavioral research has advanced our understanding of the perceptual representation of musical pitch, relatively little is known about its representational structure in the brain. Using Magnetoencephalography (MEG), we recorded evoked neural responses to different tones presented within a tonal context. Multivariate Pattern Analysis (MVPA) was applied to “decode” the stimulus that listeners heard based on the underlying neural activity. We then characterized the structure of the brain’s representation using decoding accuracy as a proxy for representational distance, and compared this structure to several well established perceptual and acoustic models. The observed neural representation was best accounted for by a model based on the Standard Tonal Hierarchy, whereby differences in the neural encoding of musical pitches correspond to their differences in perceived stability. By confirming that perceptual differences honor those in the underlying neuronal population coding, our results provide a crucial link in understanding the cognitive foundations of musical pitch across psychological and neural domains.}, language = {en}, number = {1}, urldate = {2021-11-16}, journal = {Scientific Reports}, author = {Sankaran, N. and Thompson, W. F. and Carlile, S. and Carlson, T. A.}, month = jan, year = {2018}, note = {Bandiera\_abtest: a Cc\_license\_type: cc\_by Cg\_type: Nature Research Journals Number: 1 Primary\_atype: Research Publisher: Nature Publishing Group Subject\_term: Cortex;Neural decoding;Perception Subject\_term\_id: cortex;neural-decoding;perception}, keywords = {Cortex, Neural decoding, Perception}, pages = {839}, }
@article{casey_music_2017, title = {Music of the {7Ts}: {Predicting} and {Decoding} {Multivoxel} {fMRI} {Responses} with {Acoustic}, {Schematic}, and {Categorical} {Music} {Features}}, volume = {8}, issn = {1664-1078}, shorttitle = {Music of the {7Ts}}, url = {https://www.frontiersin.org/article/10.3389/fpsyg.2017.01179}, doi = {10.3389/fpsyg.2017.01179}, abstract = {Underlying the experience of listening to music are parallel streams of auditory, categorical, and schematic qualia, whose representations and cortical organization remain largely unresolved. We collected high-field (7T) fMRI data in a music listening task, and analyzed the data using multivariate decoding and stimulus-encoding models. Twenty subjects participated in the experiment, which measured BOLD responses evoked by naturalistic listening to twenty-five music clips from five genres. Our first analysis applied machine classification to the multivoxel patterns that were evoked in temporal cortex. Results yielded above-chance levels for both stimulus identification and genre classification–cross-validated by holding out data from multiple of the stimuli during model training and then testing decoding performance on the held-out data. Genre model misclassifications were significantly correlated with those in a corresponding behavioral music categorization task, supporting the hypothesis that geometric properties of multivoxel pattern spaces underlie observed musical behavior. A second analysis employed a spherical searchlight regression analysis which predicted multivoxel pattern responses to music features representing melody and harmony across a large area of cortex. The resulting prediction-accuracy maps yielded significant clusters in the temporal, frontal, parietal, and occipital lobes, as well as in the parahippocampal gyrus and the cerebellum. These maps provide evidence in support of our hypothesis that geometric properties of music cognition are neurally encoded as multivoxel representational spaces. The maps also reveal a cortical topography that differentially encodes categorical and absolute-pitch information in distributed and overlapping networks, with smaller specialized regions that encode tonal music information in relative-pitch representations.}, urldate = {2021-11-16}, journal = {Frontiers in Psychology}, author = {Casey, Michael A.}, year = {2017}, pages = {1179}, }
@inproceedings{guclu_brains_2016, title = {Brains on {Beats}}, volume = {29}, url = {https://proceedings.neurips.cc/paper/2016/hash/b9d487a30398d42ecff55c228ed5652b-Abstract.html}, urldate = {2021-11-16}, booktitle = {Advances in {Neural} {Information} {Processing} {Systems}}, publisher = {Curran Associates, Inc.}, author = {Güçlü, Umut and Thielen, Jordy and Hanke, Michael and van Gerven, Marcel}, year = {2016}, }
@article{mcdermott_indifference_2016, title = {Indifference to dissonance in native {Amazonians} reveals cultural variation in music perception}, volume = {535}, copyright = {2016 Nature Publishing Group, a division of Macmillan Publishers Limited. All Rights Reserved.}, issn = {1476-4687}, url = {https://www.nature.com/articles/nature18635}, doi = {10.1038/nature18635}, abstract = {A native Amazonian society rated consonant and dissonant chords and vocal harmonies as equally pleasant, whereas Bolivian city- and town-dwellers preferred consonance, indicating that preference for consonance over dissonance is not universal and probably develops from exposure to particular types of polyphonic music.}, language = {en}, number = {7613}, urldate = {2021-11-16}, journal = {Nature}, author = {McDermott, Josh H. and Schultz, Alan F. and Undurraga, Eduardo A. and Godoy, Ricardo A.}, month = jul, year = {2016}, note = {Bandiera\_abtest: a Cg\_type: Nature Research Journals Number: 7613 Primary\_atype: Research Publisher: Nature Publishing Group Subject\_term: Auditory system;Human behaviour Subject\_term\_id: auditory-system;human-behaviour}, keywords = {Auditory system, Human behaviour}, pages = {547--550}, }
@book{hallam_oxford_2016, title = {The {Oxford} {Handbook} of {Music} {Psychology}}, isbn = {978-0-19-103445-9}, abstract = {The second edition of The Oxford Handbook of Music Psychology updates the original landmark text and provides a comprehensive review of the latest developments in this fast-growing area of research. Covering both experimental and theoretical perspectives, each of the 11 sections is edited by an internationally recognised authority in the area. The first ten parts present chapters that focus on specific areas of music psychology: the origins and functions of music; music perception, responses to music; music and the brain; musical development; learning musical skills; musical performance; composition and improvisation; the role of music in everyday life; and music therapy. In each part authors critically review the literature, highlight current issues and explore possibilities for the future. The final part examines how, in recent years, the study of music psychology has broadened to include a range of other disciplines. It considers the way that research has developed in relation to technological advances, and points the direction for further development in the field. With contributions from internationally recognised experts across 55 chapters, it is an essential resource for students and researchers in psychology and musicology.}, language = {en}, publisher = {Oxford University Press}, author = {Hallam, Susan and Cross, Ian and Thaut, Michael}, month = jan, year = {2016}, note = {Google-Books-ID: uho2CwAAQBAJ}, keywords = {Education / Educational Psychology, Music / Instruction \& Study / Theory, Philosophy / Aesthetics, Psychology / Developmental / Child, Psychology / Developmental / General, Psychology / Social Psychology}, }
@article{levitin_measuring_2016, title = {Measuring the representational space of music with {fMRI}: a case study with {Sting}}, volume = {22}, issn = {1355-4794}, shorttitle = {Measuring the representational space of music with {fMRI}}, url = {https://doi.org/10.1080/13554794.2016.1216572}, doi = {10.1080/13554794.2016.1216572}, abstract = {Functional brain imaging has revealed much about the neuroanatomical substrates of higher cognition, including music, language, learning, and memory. The technique lends itself to studying of groups of individuals. In contrast, the nature of expert performance is typically studied through the examination of exceptional individuals using behavioral case studies and retrospective biography. Here, we combined fMRI and the study of an individual who is a world-class expert musician and composer in order to better understand the neural underpinnings of his music perception and cognition, in particular, his mental representations for music. We used state of the art multivoxel pattern analysis (MVPA) and representational dissimilarity analysis (RDA) in a fixed set of brain regions to test three exploratory hypotheses with the musician Sting: (1) Composing would recruit neutral structures that are both unique and distinguishable from other creative acts, such as composing prose or visual art; (2) listening and imagining music would recruit similar neural regions, indicating that musical memory shares anatomical substrates with music listening; (3) the MVPA and RDA results would help us to map the representational space for music, revealing which musical pieces and genres are perceived to be similar in the musician’s mental models for music. Our hypotheses were confirmed. The act of composing, and even of imagining elements of the composed piece separately, such as melody and rhythm, activated a similar cluster of brain regions, and were distinct from prose and visual art. Listened and imagined music showed high similarity, and in addition, notable similarity/dissimilarity patterns emerged among the various pieces used as stimuli: Muzak and Top 100/Pop songs were far from all other musical styles in Mahalanobis distance (Euclidean representational space), whereas jazz, R\&B, tango and rock were comparatively close. Closer inspection revealed principaled explanations for the similarity clusters found, based on key, tempo, motif, and orchestration.}, number = {6}, urldate = {2021-11-16}, journal = {Neurocase}, author = {Levitin, Daniel J. and Grafton, Scott T.}, month = nov, year = {2016}, pmid = {27687156}, note = {Publisher: Routledge \_eprint: https://doi.org/10.1080/13554794.2016.1216572}, keywords = {Corrigendum, MVPA, Music cognition, case studies, mental imagery, neuroimaging}, pages = {548--557}, }
@book{ajoodha_single-labelled_2015, title = {Single-labelled music genre classification using content-based features}, author = {Ajoodha, Ritesh and Klein, Richard and Rosman, Benjamin}, month = nov, year = {2015}, doi = {10.1109/RoboMech.2015.7359500}, note = {Pages: 71}, }
@incollection{schlaug_chapter_2015, series = {Music, {Neurology}, and {Neuroscience}: {Evolution}, the {Musical} {Brain}, {Medical} {Conditions}, and {Therapies}}, title = {Chapter 3 - {Musicians} and music making as a model for the study of brain plasticity}, volume = {217}, url = {https://www.sciencedirect.com/science/article/pii/S0079612314000211}, abstract = {Playing a musical instrument is an intense, multisensory, and motor experience that usually commences at an early age and requires the acquisition and maintenance of a range of sensory and motor skills over the course of a musician's lifetime. Thus, musicians offer an excellent human model for studying behavioral-cognitive as well as brain effects of acquiring, practicing, and maintaining these specialized skills. Research has shown that repeatedly practicing the association of motor actions with specific sound and visual patterns (musical notation), while receiving continuous multisensory feedback will strengthen connections between auditory and motor regions (e.g., arcuate fasciculus) as well as multimodal integration regions. Plasticity in this network may explain some of the sensorimotor and cognitive enhancements that have been associated with music training. Furthermore, the plasticity of this system as a result of long term and intense interventions suggest the potential for music making activities (e.g., forms of singing) as an intervention for neurological and developmental disorders to learn and relearn associations between auditory and motor functions such as vocal motor functions.}, language = {en}, urldate = {2021-11-16}, booktitle = {Progress in {Brain} {Research}}, publisher = {Elsevier}, author = {Schlaug, Gottfried}, editor = {Altenmüller, Eckart and Finger, Stanley and Boller, François}, month = jan, year = {2015}, doi = {10.1016/bs.pbr.2014.11.020}, keywords = {Auditory–Motor Mapping Training (AMMT), Melodic Intonation Therapy, auditory, brain plasticity, diffusion tensor imaging, morphometry, motor}, pages = {37--55}, }
@techreport{hanke_high-resolution_2015, title = {High-resolution 7-{Tesla} {fMRI} data on the perception of musical genres –\ an extension to the \textit{studyforrest} dataset}, copyright = {http://creativecommons.org/licenses/by/4.0/}, url = {https://f1000research.com/articles/4-174}, abstract = {Here we present an extension to the studyforrest dataset – a versatile resource\ for studying the behavior of the human brain in situations of real-life complexity\ ( http://studyforrest.org ). This release adds more high-resolution, ultra high-field\ (7 Tesla) functional magnetic resonance imaging (fMRI) data from the same individuals.\ The twenty participants were repeatedly stimulated with a total of 25 music\ clips, with and without speech content, from five different genres using a slow\ event-related paradigm. The data release includes raw fMRI data, as well as precomputed\ structural alignments for within-subject and group analysis. In addition to\ fMRI, simultaneously recorded cardiac and respiratory traces, as well the complete\ implementation of the stimulation paradigm, including stimuli, are provided. An initial\ quality control analysis reveals distinguishable patterns of response to individual\ genres throughout a large expanse of areas known to be involved in auditory and\ speech processing. The present data can be used to, for example, generate encoding\ models for music perception that can be validated against the previously\ released fMRI data from stimulation with the “Forrest Gump” audio-movie and its\ rich musical content. In order to facilitate replicative and derived works, only free\ and open-source software was utilized.}, language = {en}, number = {4:174}, urldate = {2021-11-16}, institution = {F1000Research}, author = {Hanke, Michael and Dinga, Richard and Häusler, Christian and Guntupalli, J. Swaroop and Casey, Michael and Kaule, Falko R. and Stadler, Jörg}, month = jun, year = {2015}, doi = {10.12688/f1000research.6679.1}, note = {Type: article}, keywords = {7 Tesla, auditory features, functional magnetic resonance imaging, music perception, natural sounds}, }
@book{frances_perception_2014, title = {The {Perception} of {Music}}, isbn = {978-1-317-76754-1}, abstract = {This translation of this classic text contains a balance of cultural and biological considerations. While arguing for the strong influence of exposure and of formal training on the way that music is perceived, Frances draws on the literature concerning the amusias to illustrate his points about the types of cognitive abstraction that are performed by the listener.}, language = {en}, publisher = {Psychology Press}, author = {Frances, Robert and Dowling, W. Jay}, month = mar, year = {2014}, note = {Google-Books-ID: 8GYAAwAAQBAJ}, keywords = {Psychology / Cognitive Psychology \& Cognition, Psychology / General}, }
@article{thaut_human_2014, title = {Human {Brain} {Basis} of {Musical} {Rhythm} {Perception}: {Common} and {Distinct} {Neural} {Substrates} for {Meter}, {Tempo}, and {Pattern}}, volume = {4}, copyright = {http://creativecommons.org/licenses/by/3.0/}, shorttitle = {Human {Brain} {Basis} of {Musical} {Rhythm} {Perception}}, url = {https://www.mdpi.com/2076-3425/4/2/428}, doi = {10.3390/brainsci4020428}, abstract = {Rhythm as the time structure of music is composed of distinct temporal components such as pattern, meter, and tempo. Each feature requires different computational processes: meter involves representing repeating cycles of strong and weak beats; pattern involves representing intervals at each local time point which vary in length across segments and are linked hierarchically; and tempo requires representing frequency rates of underlying pulse structures. We explored whether distinct rhythmic elements engage different neural mechanisms by recording brain activity of adult musicians and non-musicians with positron emission tomography (PET) as they made covert same-different discriminations of (a) pairs of rhythmic, monotonic tone sequences representing changes in pattern, tempo, and meter, and (b) pairs of isochronous melodies. Common to pattern, meter, and tempo tasks were focal activities in right, or bilateral, areas of frontal, cingulate, parietal, prefrontal, temporal, and cerebellar cortices. Meter processing alone activated areas in right prefrontal and inferior frontal cortex associated with more cognitive and abstract representations. Pattern processing alone recruited right cortical areas involved in different kinds of auditory processing. Tempo processing alone engaged mechanisms subserving somatosensory and premotor information (e.g., posterior insula, postcentral gyrus). Melody produced activity different from the rhythm conditions (e.g., right anterior insula and various cerebellar areas). These exploratory findings suggest the outlines of some distinct neural components underlying the components of rhythmic structure.}, language = {en}, number = {2}, urldate = {2021-11-16}, journal = {Brain Sciences}, author = {Thaut, Michael H. and Trimarchi, Pietro Davide and Parsons, Lawrence M.}, month = jun, year = {2014}, note = {Number: 2 Publisher: Multidisciplinary Digital Publishing Institute}, keywords = {brain, music, neuroimaging, perception, rhythm}, pages = {428--452}, }
@article{istok_i_2013, title = {‘{I} love {Rock} ‘n’ {Roll}’—{Music} genre preference modulates brain responses to music}, volume = {92}, issn = {0301-0511}, url = {https://www.sciencedirect.com/science/article/pii/S0301051112002505}, doi = {10.1016/j.biopsycho.2012.11.005}, abstract = {The present study examined the effect of participants’ music genre preference on the neural processes underlying evaluative and cognitive judgements of music using the event-related potential technique. To this aim, two participant groups differing in their preference for Latin American and Heavy Metal music performed a liking judgement and a genre classification task on a variety of excerpts of either music genre. A late positive potential (LPP) was elicited in all conditions between 600 and 900ms after stimulus onset. During the genre classification task, an early negativity was elicited by the preferred compared to the non-preferred music at around 230–370ms whereas the non-preferred genre was characterized by a larger LPP. The findings suggest that evaluative and cognitive judgements of music are accompanied by affective responses and that the valence of music may spontaneously modulate early processes of music categorization even when no overt liking judgement is required.}, language = {en}, number = {2}, urldate = {2021-11-16}, journal = {Biological Psychology}, author = {Istók, Eva and Brattico, Elvira and Jacobsen, Thomas and Ritter, Aileen and Tervaniemi, M.}, month = feb, year = {2013}, keywords = {Early negativity, Evaluative processing, Event-related potential, Late positive potential (LPP), Music genre preference}, pages = {142--151}, }
@article{dunn_toward_2012, title = {Toward a better understanding of the relation between music preference, listening behavior, and personality}, volume = {40}, issn = {0305-7356}, url = {https://doi.org/10.1177/0305735610388897}, doi = {10.1177/0305735610388897}, abstract = {Previous research relating personality and music preferences has often measured such reported preferences according to genre labels. To support previous research, the current paper has expanded investigation of the relation between personality and music preferences to include direct measurement of music listening behavior. A study (N = 395) measured participants’ personality, reported music preferences, and their listening behavior, which was tracked while using a music database for a minimum period of three months. Results indicated that reported music preferences were correlated to listening behavior, and indicated robust positive relations between Neuroticism and Classical music preference, and between Openness to Experience and Jazz music preference. Results also indicated issues when using genre labels to measure music preferences, which are discussed.}, language = {en}, number = {4}, urldate = {2021-11-16}, journal = {Psychology of Music}, author = {Dunn, Peter Gregory and de Ruyter, Boris and Bouwhuis, Don G.}, month = jul, year = {2012}, note = {Publisher: SAGE Publications Ltd}, keywords = {Big Five, genre, listening behavior, music preferences, personal preferences, personality}, pages = {411--428}, }
@article{nieminen_development_2012, title = {The development of the aesthetic experience of music: {Preference}, emotions, and beauty}, volume = {16}, issn = {1029-8649}, shorttitle = {The development of the aesthetic experience of music}, url = {https://doi.org/10.1177/1029864912450454}, doi = {10.1177/1029864912450454}, abstract = {From an early age, children are attracted to the aesthetics of music. Employing a cross-sectional design including school-aged children, the present exploratory study aimed to investigate the effects of age, gender, and music education on three important aspects of the aesthetic experience of music: musical preference, musical emotion recognition, and the use of the aesthetic categories for music. To this aim, we developed an experimental procedure suitable to quantify children’s musical preferences and their judgment of musical emotions and aesthetics. The musical material consisted of three short piano pieces: a piece in major mode, a piece in minor mode, and a free tonal piece. The responses of 78 children were analyzed, whereby the children were assigned to two age groups: 6–7-year-olds (n = 38) and 8–9-year-olds (n = 40). Children preferred the piece in major mode to the one in minor. Except for 6–7-year-olds without music education, children gave the highest happiness ratings for the major piece. Only 8–9-year-olds found the minor piece sadder than the major piece, and the major piece more beautiful than the piece in minor. The ratings of the free tonal piece were mostly indifferent and probably reflect children’s difficulty in judging music that does not yet belong to their short musical history. Taken together, the current data imply that school-aged children are able to make emotional and aesthetic judgments about unfamiliar musical pieces.}, language = {en}, number = {3}, urldate = {2021-11-16}, journal = {Musicae Scientiae}, author = {Nieminen, Sirke and Istók, Eva and Brattico, Elvira and Tervaniemi, Mari}, month = nov, year = {2012}, note = {Publisher: SAGE Publications Ltd}, keywords = {aesthetic experience, beauty, development, emotions, music, preference, tonality}, pages = {372--391}, }
@article{fedorenko_sensitivity_2012, title = {Sensitivity to musical structure in the human brain}, volume = {108}, issn = {0022-3077}, url = {https://journals.physiology.org/doi/full/10.1152/jn.00209.2012}, doi = {10.1152/jn.00209.2012}, abstract = {Evidence from brain-damaged patients suggests that regions in the temporal lobes, distinct from those engaged in lower-level auditory analysis, process the pitch and rhythmic structure in music. In contrast, neuroimaging studies targeting the representation of music structure have primarily implicated regions in the inferior frontal cortices. Combining individual-subject fMRI analyses with a scrambling method that manipulated musical structure, we provide evidence of brain regions sensitive to musical structure bilaterally in the temporal lobes, thus reconciling the neuroimaging and patient findings. We further show that these regions are sensitive to the scrambling of both pitch and rhythmic structure but are insensitive to high-level linguistic structure. Our results suggest the existence of brain regions with representations of musical structure that are distinct from high-level linguistic representations and lower-level acoustic representations. These regions provide targets for future research investigating possible neural specialization for music or its associated mental processes.}, number = {12}, urldate = {2021-11-16}, journal = {Journal of Neurophysiology}, author = {Fedorenko, Evelina and McDermott, Josh H. and Norman-Haignere, Sam and Kanwisher, Nancy}, month = dec, year = {2012}, note = {Publisher: American Physiological Society}, keywords = {brain, fMRI, music}, pages = {3289--3300}, }
@inproceedings{casey_population_2012, address = {Berlin, Heidelberg}, series = {Lecture {Notes} in {Computer} {Science}}, title = {Population {Codes} {Representing} {Musical} {Timbre} for {High}-{Level} {fMRI} {Categorization} of {Music} {Genres}}, isbn = {978-3-642-34713-9}, doi = {10.1007/978-3-642-34713-9_5}, abstract = {We present experimental evidence in support of distributed neural codes for timbre that are implicated in discrimination of musical styles. We used functional magnetic resonance imaging (fMRI) in humans and multivariate pattern analysis (MVPA) to identify activation patterns that encode the perception of rich music audio stimuli from five different musical styles. We show that musical styles can be automatically classified from population codes in bilateral superior temporal sulcus (STS). To investigate the possible link between the acoustic features of the auditory stimuli and neural population codes in STS, we conducted a representational similarity analysis and a multivariate regression-retrieval task. We found that the similarity structure of timbral features of our stimuli resembled the similarity structure of the STS more than any other type of acoustic feature. We also found that a regression model trained on timbral features outperformed models trained on other types of audio features. Our results show that human brain responses to complex, natural music can be differentiated by timbral audio features, emphasizing the importance of timbre in auditory perception.}, language = {en}, booktitle = {Machine {Learning} and {Interpretation} in {Neuroimaging}}, publisher = {Springer}, author = {Casey, Michael and Thompson, Jessica and Kang, Olivia and Raizada, Rajeev and Wheatley, Thalia}, editor = {Langs, Georg and Rish, Irina and Grosse-Wentrup, Moritz and Murphy, Brian}, year = {2012}, keywords = {STS, cepstrum, multivariate analysis, music, timbre code}, pages = {34--41}, }
@article{koelsch_toward_2011, title = {Toward a {Neural} {Basis} of {Music} {Perception} – {A} {Review} and {Updated} {Model}}, volume = {2}, issn = {1664-1078}, url = {https://www.frontiersin.org/article/10.3389/fpsyg.2011.00110}, doi = {10.3389/fpsyg.2011.00110}, abstract = {Music perception involves acoustic analysis, auditory memory, auditory scene analysis, processing of interval relations, of musical syntax and semantics, and activation of (pre)motor representations of actions. Moreover, music perception potentially elicits emotions, thus giving rise to the modulation of emotional effector systems such as the subjective feeling system, the autonomic nervous system, the hormonal, and the immune system. Building on a previous article (Koelsch and Siebel, 2005), this review presents an updated model of music perception and its neural correlates. The article describes processes involved in music perception, and reports EEG and fMRI studies that inform about the time course of these processes, as well as about where in the brain these processes might be located.}, urldate = {2021-11-16}, journal = {Frontiers in Psychology}, author = {Koelsch, Stefan}, year = {2011}, pages = {110}, }
@article{schafer_functions_2009, title = {From the functions of music to music preference}, volume = {37}, issn = {0305-7356}, url = {https://doi.org/10.1177/0305735608097247}, doi = {10.1177/0305735608097247}, abstract = {To date, not much is known about how the functions of music relate to music preference. This article examines the basic hypothesis that the strength of preference for a given kind of music depends on the degree to which that kind of music serves the needs of the listener; that is, how well the respective functions of music are fulfilled. Study 1, a pilot study, identified the best-known musical styles of the participants, yielding 25 styles that were known by at least 10 percent of them. Study 2 used these 25 styles and found that rock, pop and classical music were liked most. A factor analysis yielded six distinct dimensions of music preference. People showed great variation in the strength of preference for their favourite music. This is explained by the impact of different functions of music. The potential of music to express people's identity and values and to bring them together was most closely related to the strength of preference. However, the reasons for liking a particular style are not congruent with the functions that people ascribe to their favourite music in general. A theoretical model of the development of music preferences is suggested.}, language = {en}, number = {3}, urldate = {2021-11-16}, journal = {Psychology of Music}, author = {Schäfer, Thomas and Sedlmeier, Peter}, month = jul, year = {2009}, note = {Publisher: SAGE Publications Ltd}, keywords = {genres, musical taste, styles, uses and gratification approach}, pages = {279--300}, }
@incollection{tervaniemi_musical_2009, title = {Musical {Sounds} in the {Human} {Brain}}, isbn = {978-1-315-22409-1}, abstract = {This chapter aims to describe two traditional empirical approaches to neurocognitive music research and their findings on cortical music-sound encoding, as well as the first attempts to investigate the brain basis of musical emotions. In both viewpoints, music and musicians offer a fruitful means for investigating human behavior at a very high cognitive and creative level in a well-controlled setting. Frequency modifications in music sounds were predominantly encoded in the right auditory cortex, while in speech sounds, they activated the auditory areas bilaterally. The data obtained by Tervaniemi and colleagues indicated that speech and music sound patterns activate separate brain areas in the temporal and frontal lobes. The data indicated a left-hemispheric dominance for speech sounds and a tendency toward a right-hemispheric dominance for musical sounds. In music, the existence of novel, unexpected sound events is in a key position to keep the listener alert and, if present in an appropriate degree, also important in creating positive emotions.}, booktitle = {Neuroaesthetics}, publisher = {Routledge}, author = {Tervaniemi, Mari}, year = {2009}, note = {Num Pages: 11}, }
@article{gjerdingen_scanning_2008, title = {Scanning the {Dial}: {The} {Rapid} {Recognition} of {Music} {Genres}}, volume = {37}, issn = {0929-8215}, shorttitle = {Scanning the {Dial}}, url = {https://doi.org/10.1080/09298210802479268}, doi = {10.1080/09298210802479268}, abstract = {Given brief excerpts of commercially recorded music in one of ten broad genres of music, participants in this study were asked to evaluate each excerpt and to assign it to one of the ten genre labels. It was expected that participants would be good at this task, since they were the very consumers for whom much of this music had been created. But the speed at which participants could perform this task, including above-chance categorizations of excerpts as short as 1/4 second, was quite unexpected.}, number = {2}, urldate = {2021-11-16}, journal = {Journal of New Music Research}, author = {Gjerdingen, Robert O. and Perrott, David}, month = jun, year = {2008}, note = {Publisher: Routledge \_eprint: https://doi.org/10.1080/09298210802479268}, pages = {93--100}, }
@article{caldwell_effects_2007, title = {The effects of music exposure and own genre preference on conscious and unconscious cognitive processes: {A} pilot {ERP} study}, volume = {16}, issn = {1053-8100}, shorttitle = {The effects of music exposure and own genre preference on conscious and unconscious cognitive processes}, url = {https://www.sciencedirect.com/science/article/pii/S1053810006000717}, doi = {10.1016/j.concog.2006.06.015}, abstract = {Did Beethoven and Mozart have more in common with each other than Clapton and Hendrix? The current research demonstrated the widely reported Mozart Effect as only partly significant. Event-related brain potentials (ERPs) were recorded from 16 professional classical and rock musicians during a standard 2 stimulus visual oddball task, while listening to classical and rock music. During the oddball task participants were required to discriminate between an infrequent target stimulus randomly embedded in a train of repetitive background or standard stimuli. Consistent with previous research, the P3 and N2 ERPs were elicited in response to the infrequent target stimuli. Own genre preference resulted in a reduction in amplitude of the P3 for classical musicians exposed to classical music and rock musicians exposed to rock music. Notably, at the pre-attentive stage of processing (N2) beneficial effects of exposure to classical music were observed for both groups of musicians. These data are discussed in terms of short and long-term music benefits on both conscious and unconscious cognitive processes.}, language = {en}, number = {4}, urldate = {2021-11-16}, journal = {Consciousness and Cognition}, author = {Caldwell, George N. and Riby, Leigh M.}, month = dec, year = {2007}, keywords = {Classical, Cognition, Contemporary, Event-related potential (ERP), Familiarity, Mozart, Music, N2, P3, Pre-attention, Preference}, pages = {992--996}, }
@incollection{gfeller_music_2002, address = {St Louis, MO, US}, title = {Music as communication.}, isbn = {1-58106-026-2 (Paperback)}, abstract = {Notes that music, while nondiscursive, does indeed transmit information, including emotional messages. Through association by contiguity, cultural convention, and structural properties (i. e. iconity), it functions as a symbol capable of evoking feelings. Music's nonreferential nature renders it capable of manifold meaning and flexibility. As a nondiscursive language, music transcends intellectual, rational thought and communicates readily through high levels of redundancy. It communicates human needs and values when words no longer suffice. Because music can reflect, influence, and alter emotional response, it has particular merit as a therapeutic tool in those treatment processes that include identification, awareness, reflection, or expression of feelings and relevant issues. The ease with which music can be used in conjunction with textual or visual information further contributes to its value as a highly flexible therapeutic medium. (PsycINFO Database Record (c) 2016 APA, all rights reserved)}, booktitle = {Music therapy in the treatment of adults with mental disorders: {Theoretical} bases and clinical interventions, 2nd ed.}, publisher = {MMB Music}, author = {Gfeller, Kate E.}, year = {2002}, keywords = {*Emotional Responses, *Emotional States, *Interpersonal Communication, *Music, *Music Therapy, Messages}, pages = {42--59}, }
@incollection{newell_you_1973, title = {{YOU} {CAN}'{T} {PLAY} 20 {QUESTIONS} {WITH} {NATURE} {AND} {WIN}: {PROJECTIVE} {COMMENTS} {ON} {THE} {PAPERS} {OF} {THIS} {SYMPOSIUM}}, isbn = {978-0-12-170150-5}, shorttitle = {{YOU} {CAN}'{T} {PLAY} 20 {QUESTIONS} {WITH} {NATURE} {AND} {WIN}}, url = {https://linkinghub.elsevier.com/retrieve/pii/B9780121701505500123}, language = {en}, urldate = {2021-12-07}, booktitle = {Visual {Information} {Processing}}, publisher = {Elsevier}, author = {Newell, Allen}, year = {1973}, doi = {10.1016/B978-0-12-170150-5.50012-3}, pages = {283--308}, }
@misc{noauthor_loneliness_nodate, title = {Loneliness and {Social} {Isolation} as {Risk} {Factors} for {Mortality}: {A} {Meta}-{Analytic} {Review} - {Julianne} {Holt}-{Lunstad}, {Timothy} {B}. {Smith}, {Mark} {Baker}, {Tyler} {Harris}, {David} {Stephenson}, 2015}, url = {https://journals.sagepub.com/doi/full/10.1177/1745691614568352?casa_token=1KZFD-OrM6MAAAAA%3AHDMwr52pIHXDMuZzZ2rQFw4pBZxpnNLXYZC_QQ_Gs2IA5AuZaJgH_oEWg0n0W4UZnaVrNaRGZlmBAg}, urldate = {2024-07-05}, }
@misc{noauthor_loneliness_nodate-1, title = {Loneliness and {Social} {Isolation} as {Risk} {Factors} for {Mortality}: {A} {Meta}-{Analytic} {Review} - {Julianne} {Holt}-{Lunstad}, {Timothy} {B}. {Smith}, {Mark} {Baker}, {Tyler} {Harris}, {David} {Stephenson}, 2015}, url = {https://journals.sagepub.com/doi/full/10.1177/1745691614568352?casa_token=1KZFD-OrM6MAAAAA%3AHDMwr52pIHXDMuZzZ2rQFw4pBZxpnNLXYZC_QQ_Gs2IA5AuZaJgH_oEWg0n0W4UZnaVrNaRGZlmBAg}, urldate = {2024-07-05}, }
@misc{noauthor_loneliness_nodate-2, title = {Loneliness and {Social} {Isolation} as {Risk} {Factors} for {Mortality}: {A} {Meta}-{Analytic} {Review} - {Julianne} {Holt}-{Lunstad}, {Timothy} {B}. {Smith}, {Mark} {Baker}, {Tyler} {Harris}, {David} {Stephenson}, 2015}, url = {https://journals.sagepub.com/doi/full/10.1177/1745691614568352?casa_token=1KZFD-OrM6MAAAAA%3AHDMwr52pIHXDMuZzZ2rQFw4pBZxpnNLXYZC_QQ_Gs2IA5AuZaJgH_oEWg0n0W4UZnaVrNaRGZlmBAg}, urldate = {2024-07-05}, }
@misc{noauthor_loneliness_nodate-3, title = {Loneliness and {Social} {Isolation} as {Risk} {Factors} for {Mortality}: {A} {Meta}-{Analytic} {Review} - {Julianne} {Holt}-{Lunstad}, {Timothy} {B}. {Smith}, {Mark} {Baker}, {Tyler} {Harris}, {David} {Stephenson}, 2015}, url = {https://journals.sagepub.com/doi/full/10.1177/1745691614568352?casa_token=1KZFD-OrM6MAAAAA%3AHDMwr52pIHXDMuZzZ2rQFw4pBZxpnNLXYZC_QQ_Gs2IA5AuZaJgH_oEWg0n0W4UZnaVrNaRGZlmBAg}, urldate = {2024-07-05}, }
@misc{noauthor_loneliness_nodate-4, title = {Loneliness and {Social} {Isolation} as {Risk} {Factors} for {Mortality}: {A} {Meta}-{Analytic} {Review} - {Julianne} {Holt}-{Lunstad}, {Timothy} {B}. {Smith}, {Mark} {Baker}, {Tyler} {Harris}, {David} {Stephenson}, 2015}, url = {https://journals.sagepub.com/doi/full/10.1177/1745691614568352?casa_token=1KZFD-OrM6MAAAAA%3AHDMwr52pIHXDMuZzZ2rQFw4pBZxpnNLXYZC_QQ_Gs2IA5AuZaJgH_oEWg0n0W4UZnaVrNaRGZlmBAg}, urldate = {2024-07-05}, }
@misc{noauthor_pdf_nodate, title = {({PDF}) {The} {Role} of {Sleep} and {Impact} on {Brain} and {Learning}}, url = {https://www.researchgate.net/publication/344433014_The_Role_of_Sleep_and_Impact_on_Brain_and_Learning}, urldate = {2022-06-09}, }
@misc{noauthor_expra_2021_group_3_demo_experiment_in_depth_nodate, title = {{EXPRA}\_2021\_group\_3\_demo\_experiment\_in\_depth}, url = {https://docs.google.com/presentation/d/12l5SAa_ulUlnnsLdqUY2VheISxF4_xfJ0na2MDg04eg}, urldate = {2021-12-02}, }
@misc{noauthor_guide_nodate, title = {Guide to {Representational} {Similarity} {Analysis} for {Social} {Neuroscience} {\textbar} {Social} {Cognitive} and {Affective} {Neuroscience} {\textbar} {Oxford} {Academic}}, url = {https://academic.oup.com/scan/article/14/11/1243/5693905?login=true}, urldate = {2021-11-30}, }