On the practice of rescaling covariates. Sardy, S. *Int Stat Rev*, 76:285–297, 2008. tex.citeulike-article-id= 13265780 tex.posted-at= 2014-07-14 14:10:04 tex.priority= 0abstract bibtex Whether doing parametric or nonparametric regression with shrinkage, thresholding, penalized likelihood, Bayesian posterior estimators (e.g., ridge regression, lasso, principal component regression, waveshrink or Markov random field), it is common practice to rescale covariates by dividing by their respective standard errors ρ. The stated goal of this operation is to provide unitless covariates to compare like with like, especially when penalized likelihood or prior distributions are used. We contend that this vision is too simplistic. Instead, we propose to take into account a more essential component of the structure of the regression matrix by rescaling the covariates based on the diagonal elements of the covariance matrix Σ of the maximum-likelihood estimator. We illustrate the differences between the standard ρ and proposed Σ-rescalings with various estimators and data sets.

@article{sar08pra,
title = {On the practice of rescaling covariates},
volume = {76},
abstract = {Whether doing parametric or nonparametric regression with shrinkage, thresholding, penalized likelihood, Bayesian posterior estimators (e.g., ridge regression, lasso, principal component regression, waveshrink or Markov random field), it is common practice to rescale covariates by dividing by their respective standard errors ρ. The stated goal of this operation is to provide unitless covariates to compare like with like, especially when penalized likelihood or prior distributions are used. We contend that this vision is too simplistic. Instead, we propose to take into account a more essential component of the structure of the regression matrix by rescaling the covariates based on the diagonal elements of the covariance matrix Σ of the maximum-likelihood estimator. We illustrate the differences between the standard ρ and proposed Σ-rescalings with various estimators and data sets.},
journal = {Int Stat Rev},
author = {Sardy, Sylvain},
year = {2008},
note = {tex.citeulike-article-id= 13265780
tex.posted-at= 2014-07-14 14:10:04
tex.priority= 0},
keywords = {lasso, lnu-prior, penalization, principal-component-regression, ridge-regression, scaling, standardization, wavelets},
pages = {285--297},
}

Downloads: 0

{"_id":"ESvjipYfNFvJjbhHX","bibbaseid":"sardy-onthepracticeofrescalingcovariates-2008","downloads":0,"creationDate":"2018-06-23T20:06:34.501Z","title":"On the practice of rescaling covariates","author_short":["Sardy, S."],"year":2008,"bibtype":"article","biburl":"https://api.zotero.org/groups/2199991/items?key=lvYOaZITy5xmJTj2Chg8pvSV&format=bibtex&limit=100","bibdata":{"bibtype":"article","type":"article","title":"On the practice of rescaling covariates","volume":"76","abstract":"Whether doing parametric or nonparametric regression with shrinkage, thresholding, penalized likelihood, Bayesian posterior estimators (e.g., ridge regression, lasso, principal component regression, waveshrink or Markov random field), it is common practice to rescale covariates by dividing by their respective standard errors ρ. The stated goal of this operation is to provide unitless covariates to compare like with like, especially when penalized likelihood or prior distributions are used. We contend that this vision is too simplistic. Instead, we propose to take into account a more essential component of the structure of the regression matrix by rescaling the covariates based on the diagonal elements of the covariance matrix Σ of the maximum-likelihood estimator. We illustrate the differences between the standard ρ and proposed Σ-rescalings with various estimators and data sets.","journal":"Int Stat Rev","author":[{"propositions":[],"lastnames":["Sardy"],"firstnames":["Sylvain"],"suffixes":[]}],"year":"2008","note":"tex.citeulike-article-id= 13265780 tex.posted-at= 2014-07-14 14:10:04 tex.priority= 0","keywords":"lasso, lnu-prior, penalization, principal-component-regression, ridge-regression, scaling, standardization, wavelets","pages":"285–297","bibtex":"@article{sar08pra,\n\ttitle = {On the practice of rescaling covariates},\n\tvolume = {76},\n\tabstract = {Whether doing parametric or nonparametric regression with shrinkage, thresholding, penalized likelihood, Bayesian posterior estimators (e.g., ridge regression, lasso, principal component regression, waveshrink or Markov random field), it is common practice to rescale covariates by dividing by their respective standard errors ρ. The stated goal of this operation is to provide unitless covariates to compare like with like, especially when penalized likelihood or prior distributions are used. We contend that this vision is too simplistic. Instead, we propose to take into account a more essential component of the structure of the regression matrix by rescaling the covariates based on the diagonal elements of the covariance matrix Σ of the maximum-likelihood estimator. We illustrate the differences between the standard ρ and proposed Σ-rescalings with various estimators and data sets.},\n\tjournal = {Int Stat Rev},\n\tauthor = {Sardy, Sylvain},\n\tyear = {2008},\n\tnote = {tex.citeulike-article-id= 13265780\ntex.posted-at= 2014-07-14 14:10:04\ntex.priority= 0},\n\tkeywords = {lasso, lnu-prior, penalization, principal-component-regression, ridge-regression, scaling, standardization, wavelets},\n\tpages = {285--297},\n}\n\n","author_short":["Sardy, S."],"key":"sar08pra","id":"sar08pra","bibbaseid":"sardy-onthepracticeofrescalingcovariates-2008","role":"author","urls":{},"keyword":["lasso","lnu-prior","penalization","principal-component-regression","ridge-regression","scaling","standardization","wavelets"],"downloads":0},"search_terms":["practice","rescaling","covariates","sardy"],"keywords":["lasso","lnu-prior","markov-random-field","penalization","principal-component-regression","ridge-regression","scaling","standardization","wavelets","*import"],"authorIDs":[],"dataSources":["fGxngGYXSjSNcMccW"]}