How Powerful are Spectral Graph Neural Networks. Wang, X. & Zhang, M. In Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., & Sabato, S., editors, Proceedings of the 39th International Conference on Machine Learning, volume 162, of Proceedings of Machine Learning Research, pages 23341–23362, 17–23 Jul, 2022. PMLR. Paper abstract bibtex Spectral Graph Neural Network is a kind of Graph Neural Network (GNN) based on graph signal filters. Some models able to learn arbitrary spectral filters have emerged recently. However, few works analyze the expressive power of spectral GNNs. This paper studies spectral GNNs’ expressive power theoretically. We first prove that even spectral GNNs without nonlinearity can produce arbitrary graph signals and give two conditions for reaching universality. They are: 1) no multiple eigenvalues of graph Laplacian, and 2) no missing frequency components in node features. We also establish a connection between the expressive power of spectral GNNs and Graph Isomorphism (GI) testing, the latter of which is often used to characterize spatial GNNs’ expressive power. Moreover, we study the difference in empirical performance among different spectral GNNs with the same expressive power from an optimization perspective, and motivate the use of an orthogonal basis whose weight function corresponds to the graph signal density in the spectrum. Inspired by the analysis, we propose JacobiConv, which uses Jacobi basis due to its orthogonality and flexibility to adapt to a wide range of weight functions. JacobiConv deserts nonlinearity while outperforming all baselines on both synthetic and real-world datasets.
@inproceedings{pmlr-v162-wang22am,
abstract = {Spectral Graph Neural Network is a kind of Graph Neural Network (GNN) based on graph signal filters. Some models able to learn arbitrary spectral filters have emerged recently. However, few works analyze the expressive power of spectral GNNs. This paper studies spectral GNNs’ expressive power theoretically. We first prove that even spectral GNNs without nonlinearity can produce arbitrary graph signals and give two conditions for reaching universality. They are: 1) no multiple eigenvalues of graph Laplacian, and 2) no missing frequency components in node features. We also establish a connection between the expressive power of spectral GNNs and Graph Isomorphism (GI) testing, the latter of which is often used to characterize spatial GNNs’ expressive power. Moreover, we study the difference in empirical performance among different spectral GNNs with the same expressive power from an optimization perspective, and motivate the use of an orthogonal basis whose weight function corresponds to the graph signal density in the spectrum. Inspired by the analysis, we propose JacobiConv, which uses Jacobi basis due to its orthogonality and flexibility to adapt to a wide range of weight functions. JacobiConv deserts nonlinearity while outperforming all baselines on both synthetic and real-world datasets.},
added-at = {2023-08-21T15:42:18.000+0200},
author = {Wang, Xiyuan and Zhang, Muhan},
biburl = {https://www.bibsonomy.org/bibtex/24b27d7bfdc5e42fb0fe6232d1d20bc67/tobias.koopmann},
booktitle = {Proceedings of the 39th International Conference on Machine Learning},
editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},
interhash = {5d0683a696eb468206d5ae9c5dec0edc},
intrahash = {4b27d7bfdc5e42fb0fe6232d1d20bc67},
keywords = {graph readinglist},
month = {17--23 Jul},
pages = {23341--23362},
pdf = {https://proceedings.mlr.press/v162/wang22am/wang22am.pdf},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
timestamp = {2023-08-21T15:42:18.000+0200},
title = {How Powerful are Spectral Graph Neural Networks},
url = {https://proceedings.mlr.press/v162/wang22am.html},
volume = 162,
year = 2022
}
Downloads: 0
{"_id":"hq7PAKHBfxL66Gi2o","bibbaseid":"wang-zhang-howpowerfularespectralgraphneuralnetworks-2022","author_short":["Wang, X.","Zhang, M."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","abstract":"Spectral Graph Neural Network is a kind of Graph Neural Network (GNN) based on graph signal filters. Some models able to learn arbitrary spectral filters have emerged recently. However, few works analyze the expressive power of spectral GNNs. This paper studies spectral GNNs’ expressive power theoretically. We first prove that even spectral GNNs without nonlinearity can produce arbitrary graph signals and give two conditions for reaching universality. They are: 1) no multiple eigenvalues of graph Laplacian, and 2) no missing frequency components in node features. We also establish a connection between the expressive power of spectral GNNs and Graph Isomorphism (GI) testing, the latter of which is often used to characterize spatial GNNs’ expressive power. Moreover, we study the difference in empirical performance among different spectral GNNs with the same expressive power from an optimization perspective, and motivate the use of an orthogonal basis whose weight function corresponds to the graph signal density in the spectrum. Inspired by the analysis, we propose JacobiConv, which uses Jacobi basis due to its orthogonality and flexibility to adapt to a wide range of weight functions. JacobiConv deserts nonlinearity while outperforming all baselines on both synthetic and real-world datasets.","added-at":"2023-08-21T15:42:18.000+0200","author":[{"propositions":[],"lastnames":["Wang"],"firstnames":["Xiyuan"],"suffixes":[]},{"propositions":[],"lastnames":["Zhang"],"firstnames":["Muhan"],"suffixes":[]}],"biburl":"https://www.bibsonomy.org/bibtex/24b27d7bfdc5e42fb0fe6232d1d20bc67/tobias.koopmann","booktitle":"Proceedings of the 39th International Conference on Machine Learning","editor":[{"propositions":[],"lastnames":["Chaudhuri"],"firstnames":["Kamalika"],"suffixes":[]},{"propositions":[],"lastnames":["Jegelka"],"firstnames":["Stefanie"],"suffixes":[]},{"propositions":[],"lastnames":["Song"],"firstnames":["Le"],"suffixes":[]},{"propositions":[],"lastnames":["Szepesvari"],"firstnames":["Csaba"],"suffixes":[]},{"propositions":[],"lastnames":["Niu"],"firstnames":["Gang"],"suffixes":[]},{"propositions":[],"lastnames":["Sabato"],"firstnames":["Sivan"],"suffixes":[]}],"interhash":"5d0683a696eb468206d5ae9c5dec0edc","intrahash":"4b27d7bfdc5e42fb0fe6232d1d20bc67","keywords":"graph readinglist","month":"17–23 Jul","pages":"23341–23362","pdf":"https://proceedings.mlr.press/v162/wang22am/wang22am.pdf","publisher":"PMLR","series":"Proceedings of Machine Learning Research","timestamp":"2023-08-21T15:42:18.000+0200","title":"How Powerful are Spectral Graph Neural Networks","url":"https://proceedings.mlr.press/v162/wang22am.html","volume":"162","year":"2022","bibtex":"@inproceedings{pmlr-v162-wang22am,\n abstract = {Spectral Graph Neural Network is a kind of Graph Neural Network (GNN) based on graph signal filters. Some models able to learn arbitrary spectral filters have emerged recently. However, few works analyze the expressive power of spectral GNNs. This paper studies spectral GNNs’ expressive power theoretically. We first prove that even spectral GNNs without nonlinearity can produce arbitrary graph signals and give two conditions for reaching universality. They are: 1) no multiple eigenvalues of graph Laplacian, and 2) no missing frequency components in node features. We also establish a connection between the expressive power of spectral GNNs and Graph Isomorphism (GI) testing, the latter of which is often used to characterize spatial GNNs’ expressive power. Moreover, we study the difference in empirical performance among different spectral GNNs with the same expressive power from an optimization perspective, and motivate the use of an orthogonal basis whose weight function corresponds to the graph signal density in the spectrum. Inspired by the analysis, we propose JacobiConv, which uses Jacobi basis due to its orthogonality and flexibility to adapt to a wide range of weight functions. JacobiConv deserts nonlinearity while outperforming all baselines on both synthetic and real-world datasets.},\n added-at = {2023-08-21T15:42:18.000+0200},\n author = {Wang, Xiyuan and Zhang, Muhan},\n biburl = {https://www.bibsonomy.org/bibtex/24b27d7bfdc5e42fb0fe6232d1d20bc67/tobias.koopmann},\n booktitle = {Proceedings of the 39th International Conference on Machine Learning},\n editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},\n interhash = {5d0683a696eb468206d5ae9c5dec0edc},\n intrahash = {4b27d7bfdc5e42fb0fe6232d1d20bc67},\n keywords = {graph readinglist},\n month = {17--23 Jul},\n pages = {23341--23362},\n pdf = {https://proceedings.mlr.press/v162/wang22am/wang22am.pdf},\n publisher = {PMLR},\n series = {Proceedings of Machine Learning Research},\n timestamp = {2023-08-21T15:42:18.000+0200},\n title = {How Powerful are Spectral Graph Neural Networks},\n url = {https://proceedings.mlr.press/v162/wang22am.html},\n volume = 162,\n year = 2022\n}\n\n","author_short":["Wang, X.","Zhang, M."],"editor_short":["Chaudhuri, K.","Jegelka, S.","Song, L.","Szepesvari, C.","Niu, G.","Sabato, S."],"key":"pmlr-v162-wang22am","id":"pmlr-v162-wang22am","bibbaseid":"wang-zhang-howpowerfularespectralgraphneuralnetworks-2022","role":"author","urls":{"Paper":"https://proceedings.mlr.press/v162/wang22am.html"},"keyword":["graph readinglist"],"metadata":{"authorlinks":{}},"html":""},"bibtype":"inproceedings","biburl":"http://www.bibsonomy.org/bib/author/zhang?items=1000","dataSources":["6yXn8CtuzyEbCSr2m"],"keywords":["graph readinglist"],"search_terms":["powerful","spectral","graph","neural","networks","wang","zhang"],"title":"How Powerful are Spectral Graph Neural Networks","year":2022}