Representation Learning on Graphs with Jumping Knowledge Networks. Xu, K., Li, C., Tian, Y., Sonobe, T., Kawarabayashi, K., & Jegelka, S. 6 2018. Paper abstract bibtex Recent deep learning approaches for representation learning on graphs follow a neighborhood aggregation procedure. We analyze some important properties of these models, and propose a strategy to overcome those. In particular, the range of "neighboring" nodes that a node's representation draws from strongly depends on the graph structure, analogous to the spread of a random walk. To adapt to local neighborhood properties and tasks, we explore an architecture – jumping knowledge (JK) networks – that flexibly leverages, for each node, different neighborhood ranges to enable better structure-aware representation. In a number of experiments on social, bioinformatics and citation networks, we demonstrate that our model achieves state-of-the-art performance. Furthermore, combining the JK framework with models like Graph Convolutional Networks, GraphSAGE and Graph Attention Networks consistently improves those models' performance.
@unpublished{Xu-2018-ID310,
title = {Representation Learning on Graphs with Jumping Knowledge Networks},
abstract = {Recent deep learning approaches for representation learning on graphs
follow a neighborhood aggregation procedure. We analyze some important
properties of these models, and propose a strategy to overcome those. In
particular, the range of "neighboring" nodes that a node's representation
draws from strongly depends on the graph structure, analogous to the spread
of a random walk. To adapt to local neighborhood properties and tasks, we
explore an architecture -- jumping knowledge ({JK}) networks -- that
flexibly leverages, for each node, different neighborhood ranges to enable
better structure-aware representation. In a number of experiments on
social, bioinformatics and citation networks, we demonstrate that our model
achieves state-of-the-art performance. Furthermore, combining the {JK}
framework with models like Graph Convolutional Networks, Graph{SAGE} and
Graph Attention Networks consistently improves those models' performance.},
author = {Xu, Keyulu and Li, Chengtao and Tian, Yonglong and Sonobe, Tomohiro and
Kawarabayashi, Ken-ichi and Jegelka, Stefanie},
year = {2018},
month = {6},
url = {http://arxiv.org/abs/1806.03536},
url = {http://arxiv.org/pdf/1806.03536},
arxiv = {1806.03536},
keywords = {cs.{LG}},
file = {FULLTEXT:pdfs/000/000/000000310.pdf:PDF}
}
Downloads: 0
{"_id":"972tx9y6q85voKLXS","bibbaseid":"xu-li-tian-sonobe-kawarabayashi-jegelka-representationlearningongraphswithjumpingknowledgenetworks-2018","authorIDs":[],"author_short":["Xu, K.","Li, C.","Tian, Y.","Sonobe, T.","Kawarabayashi, K.","Jegelka, S."],"bibdata":{"bibtype":"unpublished","type":"unpublished","title":"Representation Learning on Graphs with Jumping Knowledge Networks","abstract":"Recent deep learning approaches for representation learning on graphs follow a neighborhood aggregation procedure. We analyze some important properties of these models, and propose a strategy to overcome those. In particular, the range of \"neighboring\" nodes that a node's representation draws from strongly depends on the graph structure, analogous to the spread of a random walk. To adapt to local neighborhood properties and tasks, we explore an architecture – jumping knowledge (JK) networks – that flexibly leverages, for each node, different neighborhood ranges to enable better structure-aware representation. In a number of experiments on social, bioinformatics and citation networks, we demonstrate that our model achieves state-of-the-art performance. Furthermore, combining the JK framework with models like Graph Convolutional Networks, GraphSAGE and Graph Attention Networks consistently improves those models' performance.","author":[{"propositions":[],"lastnames":["Xu"],"firstnames":["Keyulu"],"suffixes":[]},{"propositions":[],"lastnames":["Li"],"firstnames":["Chengtao"],"suffixes":[]},{"propositions":[],"lastnames":["Tian"],"firstnames":["Yonglong"],"suffixes":[]},{"propositions":[],"lastnames":["Sonobe"],"firstnames":["Tomohiro"],"suffixes":[]},{"propositions":[],"lastnames":["Kawarabayashi"],"firstnames":["Ken-ichi"],"suffixes":[]},{"propositions":[],"lastnames":["Jegelka"],"firstnames":["Stefanie"],"suffixes":[]}],"year":"2018","month":"6","url":"http://arxiv.org/pdf/1806.03536","arxiv":"1806.03536","keywords":"cs.LG","file":"FULLTEXT:pdfs/000/000/000000310.pdf:PDF","bibtex":"@unpublished{Xu-2018-ID310,\n title = {Representation Learning on Graphs with Jumping Knowledge Networks},\n abstract = {Recent deep learning approaches for representation learning on graphs\n follow a neighborhood aggregation procedure. We analyze some important\n properties of these models, and propose a strategy to overcome those. In\n particular, the range of \"neighboring\" nodes that a node's representation\n draws from strongly depends on the graph structure, analogous to the spread\n of a random walk. To adapt to local neighborhood properties and tasks, we\n explore an architecture -- jumping knowledge ({JK}) networks -- that\n flexibly leverages, for each node, different neighborhood ranges to enable\n better structure-aware representation. In a number of experiments on\n social, bioinformatics and citation networks, we demonstrate that our model\n achieves state-of-the-art performance. Furthermore, combining the {JK}\n framework with models like Graph Convolutional Networks, Graph{SAGE} and\n Graph Attention Networks consistently improves those models' performance.},\n author = {Xu, Keyulu and Li, Chengtao and Tian, Yonglong and Sonobe, Tomohiro and\n Kawarabayashi, Ken-ichi and Jegelka, Stefanie},\n year = {2018},\n month = {6},\n url = {http://arxiv.org/abs/1806.03536},\n url = {http://arxiv.org/pdf/1806.03536},\n arxiv = {1806.03536},\n keywords = {cs.{LG}},\n file = {FULLTEXT:pdfs/000/000/000000310.pdf:PDF}\n}\n\n","author_short":["Xu, K.","Li, C.","Tian, Y.","Sonobe, T.","Kawarabayashi, K.","Jegelka, S."],"key":"Xu-2018-ID310","id":"Xu-2018-ID310","bibbaseid":"xu-li-tian-sonobe-kawarabayashi-jegelka-representationlearningongraphswithjumpingknowledgenetworks-2018","role":"author","urls":{"Paper":"http://arxiv.org/pdf/1806.03536"},"keyword":["cs.LG"],"downloads":0,"html":"","metadata":{"authorlinks":{}}},"bibtype":"unpublished","biburl":"http://woowoowoo.com/ideas/test86.bib","creationDate":"2021-02-10T01:28:56.733Z","downloads":0,"keywords":["cs.lg"],"search_terms":["representation","learning","graphs","jumping","knowledge","networks","xu","li","tian","sonobe","kawarabayashi","jegelka"],"title":"Representation Learning on Graphs with Jumping Knowledge Networks","year":2018,"dataSources":["mkMPR3Lfz5vmkF7ur"]}