A learning result for continuous-time recurrent neural networks. Sontag, E. Systems Control Lett., 34(3):151–158, Elsevier Science Publishers B. V., Amsterdam, The Netherlands, The Netherlands, 1998. doi abstract bibtex The following learning problem is considered, for continuous-time recurrent neural networks having sigmoidal activation functions. Given a ``black box'' representing an unknown system, measurements of output derivatives are collected, for a set of randomly generated inputs, and a network is used to approximate the observed behavior. It is shown that the number of inputs needed for reliable generalization (the sample complexity of the learning problem) is upper bounded by an expression that grows polynomially with the dimension of the network and logarithmically with the number of output derivatives being matched.
@ARTICLE{MR1632338,
AUTHOR = {E.D. Sontag},
JOURNAL = {Systems Control Lett.},
TITLE = {A learning result for continuous-time recurrent neural
networks},
YEAR = {1998},
OPTMONTH = {},
OPTNOTE = {},
NUMBER = {3},
PAGES = {151--158},
VOLUME = {34},
ADDRESS = {Amsterdam, The Netherlands, The Netherlands},
KEYWORDS = {neural networks, VC dimension,
recurrent neural networks},
PUBLISHER = {Elsevier Science Publishers B. V.},
PDF = {../../FTPDIR/recur-learn.pdf},
ABSTRACT = { The following learning problem is considered, for
continuous-time recurrent neural networks having sigmoidal activation
functions. Given a ``black box'' representing an unknown system,
measurements of output derivatives are collected, for a set of
randomly generated inputs, and a network is used to approximate the
observed behavior. It is shown that the number of inputs needed for
reliable generalization (the sample complexity of the learning
problem) is upper bounded by an expression that grows polynomially
with the dimension of the network and logarithmically with the number
of output derivatives being matched. },
DOI = {http://dx.doi.org/10.1016/S0167-6911(98)00006-1}
}
Downloads: 0
{"_id":"ypQdi3sdEBGp5GwRM","bibbaseid":"sontag-alearningresultforcontinuoustimerecurrentneuralnetworks-1998","downloads":0,"creationDate":"2018-10-18T05:07:06.409Z","title":"A learning result for continuous-time recurrent neural networks","author_short":["Sontag, E."],"year":1998,"bibtype":"article","biburl":"http://www.sontaglab.org/PUBDIR/Biblio/complete-bibliography.bib","bibdata":{"bibtype":"article","type":"article","author":[{"firstnames":["E.D."],"propositions":[],"lastnames":["Sontag"],"suffixes":[]}],"journal":"Systems Control Lett.","title":"A learning result for continuous-time recurrent neural networks","year":"1998","optmonth":"","optnote":"","number":"3","pages":"151–158","volume":"34","address":"Amsterdam, The Netherlands, The Netherlands","keywords":"neural networks, VC dimension, recurrent neural networks","publisher":"Elsevier Science Publishers B. V.","pdf":"../../FTPDIR/recur-learn.pdf","abstract":"The following learning problem is considered, for continuous-time recurrent neural networks having sigmoidal activation functions. Given a ``black box'' representing an unknown system, measurements of output derivatives are collected, for a set of randomly generated inputs, and a network is used to approximate the observed behavior. It is shown that the number of inputs needed for reliable generalization (the sample complexity of the learning problem) is upper bounded by an expression that grows polynomially with the dimension of the network and logarithmically with the number of output derivatives being matched. ","doi":"http://dx.doi.org/10.1016/S0167-6911(98)00006-1","bibtex":"@ARTICLE{MR1632338,\n AUTHOR = {E.D. Sontag},\n JOURNAL = {Systems Control Lett.},\n TITLE = {A learning result for continuous-time recurrent neural \n networks},\n YEAR = {1998},\n OPTMONTH = {},\n OPTNOTE = {},\n NUMBER = {3},\n PAGES = {151--158},\n VOLUME = {34},\n ADDRESS = {Amsterdam, The Netherlands, The Netherlands},\n KEYWORDS = {neural networks, VC dimension, \n recurrent neural networks},\n PUBLISHER = {Elsevier Science Publishers B. V.},\n PDF = {../../FTPDIR/recur-learn.pdf},\n ABSTRACT = { The following learning problem is considered, for \n continuous-time recurrent neural networks having sigmoidal activation \n functions. Given a ``black box'' representing an unknown system, \n measurements of output derivatives are collected, for a set of \n randomly generated inputs, and a network is used to approximate the \n observed behavior. It is shown that the number of inputs needed for \n reliable generalization (the sample complexity of the learning \n problem) is upper bounded by an expression that grows polynomially \n with the dimension of the network and logarithmically with the number \n of output derivatives being matched. },\n DOI = {http://dx.doi.org/10.1016/S0167-6911(98)00006-1}\n}\n\n","author_short":["Sontag, E."],"key":"MR1632338","id":"MR1632338","bibbaseid":"sontag-alearningresultforcontinuoustimerecurrentneuralnetworks-1998","role":"author","urls":{},"keyword":["neural networks","VC dimension","recurrent neural networks"],"downloads":0,"html":""},"search_terms":["learning","result","continuous","time","recurrent","neural","networks","sontag"],"keywords":["neural networks","vc dimension","recurrent neural networks"],"authorIDs":["5bc814f9db768e100000015a"],"dataSources":["DKqZbTmd7peqE4THw"]}