FLARE: Defending Federated Learning against Model Poisoning Attacks via Latent Space Representations. Wang, N., Xiao, Y., Chen, Y., Hu, Y., Lou, W., & Hou, Y. T. In Proceedings of the 2022 ACM on Asia Conference on Computer and Communications Security, of ASIA CCS '22, pages 946–958, New York, NY, USA, May, 2022. Association for Computing Machinery. Paper doi abstract bibtex Federated learning (FL) has been shown vulnerable to a new class of adversarial attacks, known as model poisoning attacks (MPA), where one or more malicious clients try to poison the global model by sending carefully crafted local model updates to the central parameter server. Existing defenses that have been fixated on analyzing model parameters show limited effectiveness in detecting such carefully crafted poisonous models. In this work, we propose FLARE, a robust model aggregation mechanism for FL, which is resilient against state-of-the-art MPAs. Instead of solely depending on model parameters, FLARE leverages the penultimate layer representations (PLRs) of the model for characterizing the adversarial influence on each local model update. PLRs demonstrate a better capability to differentiate malicious models from benign ones than model parameter-based solutions. We further propose a trust evaluation method that estimates a trust score for each model update based on pairwise PLR discrepancies among all model updates. Under the assumption that honest clients make up the majority, FLARE assigns a trust score to each model update in a way that those far from the benign cluster are assigned low scores. FLARE then aggregates the model updates weighted by their trust scores and finally updates the global model. Extensive experimental results demonstrate the effectiveness of FLARE in defending FL against various MPAs, including semantic backdoor attacks, trojan backdoor attacks, and untargeted attacks, and safeguarding the accuracy of FL.
@inproceedings{wang_flare_2022,
address = {New York, NY, USA},
series = {{ASIA} {CCS} '22},
title = {{FLARE}: {Defending} {Federated} {Learning} against {Model} {Poisoning} {Attacks} via {Latent} {Space} {Representations}},
isbn = {9781450391405},
shorttitle = {{FLARE}},
url = {https://dl.acm.org/doi/10.1145/3488932.3517395},
doi = {10.1145/3488932.3517395},
abstract = {Federated learning (FL) has been shown vulnerable to a new class of adversarial attacks, known as model poisoning attacks (MPA), where one or more malicious clients try to poison the global model by sending carefully crafted local model updates to the central parameter server. Existing defenses that have been fixated on analyzing model parameters show limited effectiveness in detecting such carefully crafted poisonous models. In this work, we propose FLARE, a robust model aggregation mechanism for FL, which is resilient against state-of-the-art MPAs. Instead of solely depending on model parameters, FLARE leverages the penultimate layer representations (PLRs) of the model for characterizing the adversarial influence on each local model update. PLRs demonstrate a better capability to differentiate malicious models from benign ones than model parameter-based solutions. We further propose a trust evaluation method that estimates a trust score for each model update based on pairwise PLR discrepancies among all model updates. Under the assumption that honest clients make up the majority, FLARE assigns a trust score to each model update in a way that those far from the benign cluster are assigned low scores. FLARE then aggregates the model updates weighted by their trust scores and finally updates the global model. Extensive experimental results demonstrate the effectiveness of FLARE in defending FL against various MPAs, including semantic backdoor attacks, trojan backdoor attacks, and untargeted attacks, and safeguarding the accuracy of FL.},
urldate = {2024-02-08},
booktitle = {Proceedings of the 2022 {ACM} on {Asia} {Conference} on {Computer} and {Communications} {Security}},
publisher = {Association for Computing Machinery},
author = {Wang, Ning and Xiao, Yang and Chen, Yimin and Hu, Yang and Lou, Wenjing and Hou, Y. Thomas},
month = may,
year = {2022},
keywords = {defense, federated learning, model poisoning attack},
pages = {946--958},
}
Downloads: 0
{"_id":"xm2GTtFMhWCzBBhNq","bibbaseid":"wang-xiao-chen-hu-lou-hou-flaredefendingfederatedlearningagainstmodelpoisoningattacksvialatentspacerepresentations-2022","author_short":["Wang, N.","Xiao, Y.","Chen, Y.","Hu, Y.","Lou, W.","Hou, Y. T."],"bibdata":{"bibtype":"inproceedings","type":"inproceedings","address":"New York, NY, USA","series":"ASIA CCS '22","title":"FLARE: Defending Federated Learning against Model Poisoning Attacks via Latent Space Representations","isbn":"9781450391405","shorttitle":"FLARE","url":"https://dl.acm.org/doi/10.1145/3488932.3517395","doi":"10.1145/3488932.3517395","abstract":"Federated learning (FL) has been shown vulnerable to a new class of adversarial attacks, known as model poisoning attacks (MPA), where one or more malicious clients try to poison the global model by sending carefully crafted local model updates to the central parameter server. Existing defenses that have been fixated on analyzing model parameters show limited effectiveness in detecting such carefully crafted poisonous models. In this work, we propose FLARE, a robust model aggregation mechanism for FL, which is resilient against state-of-the-art MPAs. Instead of solely depending on model parameters, FLARE leverages the penultimate layer representations (PLRs) of the model for characterizing the adversarial influence on each local model update. PLRs demonstrate a better capability to differentiate malicious models from benign ones than model parameter-based solutions. We further propose a trust evaluation method that estimates a trust score for each model update based on pairwise PLR discrepancies among all model updates. Under the assumption that honest clients make up the majority, FLARE assigns a trust score to each model update in a way that those far from the benign cluster are assigned low scores. FLARE then aggregates the model updates weighted by their trust scores and finally updates the global model. Extensive experimental results demonstrate the effectiveness of FLARE in defending FL against various MPAs, including semantic backdoor attacks, trojan backdoor attacks, and untargeted attacks, and safeguarding the accuracy of FL.","urldate":"2024-02-08","booktitle":"Proceedings of the 2022 ACM on Asia Conference on Computer and Communications Security","publisher":"Association for Computing Machinery","author":[{"propositions":[],"lastnames":["Wang"],"firstnames":["Ning"],"suffixes":[]},{"propositions":[],"lastnames":["Xiao"],"firstnames":["Yang"],"suffixes":[]},{"propositions":[],"lastnames":["Chen"],"firstnames":["Yimin"],"suffixes":[]},{"propositions":[],"lastnames":["Hu"],"firstnames":["Yang"],"suffixes":[]},{"propositions":[],"lastnames":["Lou"],"firstnames":["Wenjing"],"suffixes":[]},{"propositions":[],"lastnames":["Hou"],"firstnames":["Y.","Thomas"],"suffixes":[]}],"month":"May","year":"2022","keywords":"defense, federated learning, model poisoning attack","pages":"946–958","bibtex":"@inproceedings{wang_flare_2022,\n\taddress = {New York, NY, USA},\n\tseries = {{ASIA} {CCS} '22},\n\ttitle = {{FLARE}: {Defending} {Federated} {Learning} against {Model} {Poisoning} {Attacks} via {Latent} {Space} {Representations}},\n\tisbn = {9781450391405},\n\tshorttitle = {{FLARE}},\n\turl = {https://dl.acm.org/doi/10.1145/3488932.3517395},\n\tdoi = {10.1145/3488932.3517395},\n\tabstract = {Federated learning (FL) has been shown vulnerable to a new class of adversarial attacks, known as model poisoning attacks (MPA), where one or more malicious clients try to poison the global model by sending carefully crafted local model updates to the central parameter server. Existing defenses that have been fixated on analyzing model parameters show limited effectiveness in detecting such carefully crafted poisonous models. In this work, we propose FLARE, a robust model aggregation mechanism for FL, which is resilient against state-of-the-art MPAs. Instead of solely depending on model parameters, FLARE leverages the penultimate layer representations (PLRs) of the model for characterizing the adversarial influence on each local model update. PLRs demonstrate a better capability to differentiate malicious models from benign ones than model parameter-based solutions. We further propose a trust evaluation method that estimates a trust score for each model update based on pairwise PLR discrepancies among all model updates. Under the assumption that honest clients make up the majority, FLARE assigns a trust score to each model update in a way that those far from the benign cluster are assigned low scores. FLARE then aggregates the model updates weighted by their trust scores and finally updates the global model. Extensive experimental results demonstrate the effectiveness of FLARE in defending FL against various MPAs, including semantic backdoor attacks, trojan backdoor attacks, and untargeted attacks, and safeguarding the accuracy of FL.},\n\turldate = {2024-02-08},\n\tbooktitle = {Proceedings of the 2022 {ACM} on {Asia} {Conference} on {Computer} and {Communications} {Security}},\n\tpublisher = {Association for Computing Machinery},\n\tauthor = {Wang, Ning and Xiao, Yang and Chen, Yimin and Hu, Yang and Lou, Wenjing and Hou, Y. Thomas},\n\tmonth = may,\n\tyear = {2022},\n\tkeywords = {defense, federated learning, model poisoning attack},\n\tpages = {946--958},\n}\n\n","author_short":["Wang, N.","Xiao, Y.","Chen, Y.","Hu, Y.","Lou, W.","Hou, Y. T."],"key":"wang_flare_2022","id":"wang_flare_2022","bibbaseid":"wang-xiao-chen-hu-lou-hou-flaredefendingfederatedlearningagainstmodelpoisoningattacksvialatentspacerepresentations-2022","role":"author","urls":{"Paper":"https://dl.acm.org/doi/10.1145/3488932.3517395"},"keyword":["defense","federated learning","model poisoning attack"],"metadata":{"authorlinks":{}}},"bibtype":"inproceedings","biburl":"https://api.zotero.org/users/8994433/collections/66SDI5S7/items?key=ozCmKMmrE26WwEN9PAdOLaiV&format=bibtex&limit=100","dataSources":["Rxy8umoNP78qdq56v","8BDe79PKmLEoGEieK","kQ6oEMED6p4RGbihm","yM68t35RzDgJZajbn","nXz2cN22CbQDukEnJ"],"keywords":["defense","federated learning","model poisoning attack"],"search_terms":["flare","defending","federated","learning","against","model","poisoning","attacks","via","latent","space","representations","wang","xiao","chen","hu","lou","hou"],"title":"FLARE: Defending Federated Learning against Model Poisoning Attacks via Latent Space Representations","year":2022}