var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/service/mendeley/dfdf22e7-6c4d-36b8-ad03-fb429b4617f1/group/942f5ce8-4375-3faa-b63c-efca8f649cba?jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/service/mendeley/dfdf22e7-6c4d-36b8-ad03-fb429b4617f1/group/942f5ce8-4375-3faa-b63c-efca8f649cba?jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/service/mendeley/dfdf22e7-6c4d-36b8-ad03-fb429b4617f1/group/942f5ce8-4375-3faa-b63c-efca8f649cba?jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2022\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Delivering Fairness in Human Resources AI: Mutual Information to the Rescue.\n \n \n \n \n\n\n \n Hemamou, L.; and Coleman, W.\n\n\n \n\n\n\n In Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 867-882, 11 2022. Association for Computational Linguistics\n \n\n\n\n
\n\n\n\n \n \n \"DeliveringPaper\n  \n \n \n \"DeliveringWebsite\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Delivering Fairness in Human Resources AI: Mutual Information to the Rescue},\n type = {inproceedings},\n year = {2022},\n pages = {867-882},\n websites = {https://aclanthology.org/2022.aacl-main.64},\n month = {11},\n publisher = {Association for Computational Linguistics},\n city = {Online only},\n id = {a95c84e1-36d4-3c9b-a305-29fc09834546},\n created = {2022-12-30T16:44:57.411Z},\n file_attached = {true},\n profile_id = {dfdf22e7-6c4d-36b8-ad03-fb429b4617f1},\n group_id = {942f5ce8-4375-3faa-b63c-efca8f649cba},\n last_modified = {2022-12-30T16:44:57.719Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n source_type = {inproceedings},\n private_publication = {false},\n abstract = {Automatic language processing is used frequently in the Human Resources (HR) sector for automated candidate sourcing and evaluation of resumes. These models often use pre-trained language models where it is difficult to know if possible biases exist. Recently, Mutual Information (MI) methods have demonstrated notable performance in obtaining representations agnostic to sensitive variables such as gender or ethnicity. However, accessing these variables can sometimes be challenging, and their use is prohibited in some jurisdictions. These factors can make detecting and mitigating biases challenging. In this context, we propose to minimize the MI between a candidate's name and a latent representation of their CV or short biography. This method may mitigate bias from sensitive variables without requiring the collection of these variables. We evaluate this methodology by first projecting the name representation into a smaller space to prevent potential MI minimization problems in high dimensions.},\n bibtype = {inproceedings},\n author = {Hemamou, Leo and Coleman, William},\n booktitle = {Proceedings of the 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)}\n}
\n
\n\n\n
\n Automatic language processing is used frequently in the Human Resources (HR) sector for automated candidate sourcing and evaluation of resumes. These models often use pre-trained language models where it is difficult to know if possible biases exist. Recently, Mutual Information (MI) methods have demonstrated notable performance in obtaining representations agnostic to sensitive variables such as gender or ethnicity. However, accessing these variables can sometimes be challenging, and their use is prohibited in some jurisdictions. These factors can make detecting and mitigating biases challenging. In this context, we propose to minimize the MI between a candidate's name and a latent representation of their CV or short biography. This method may mitigate bias from sensitive variables without requiring the collection of these variables. We evaluate this methodology by first projecting the name representation into a smaller space to prevent potential MI minimization problems in high dimensions.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Multimodal Hierarchical Attention Neural Network: Looking for Candidates Behaviour which Impact Recruiter's Decision.\n \n \n \n \n\n\n \n Hemamou, L.; Guillon, A.; Martin, J., C.; and Clavel, C.\n\n\n \n\n\n\n IEEE Transactions on Affective Computing. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"MultimodalPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{\n title = {Multimodal Hierarchical Attention Neural Network: Looking for Candidates Behaviour which Impact Recruiter's Decision},\n type = {article},\n year = {2021},\n keywords = {Databases,Deep learning,Face recognition,Feature extraction,Interviews,Neural networks,Nonverbal signals,Visualization,deep learning,employment,human resources,interpretability,job interviews,multimodal systems,neural nets},\n id = {d464e654-0b5b-355f-a791-4e837c76ebcf},\n created = {2022-12-30T16:44:44.055Z},\n file_attached = {true},\n profile_id = {dfdf22e7-6c4d-36b8-ad03-fb429b4617f1},\n group_id = {942f5ce8-4375-3faa-b63c-efca8f649cba},\n last_modified = {2022-12-30T16:49:47.883Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n abstract = {Automatic analysis of job interviews has gained in interest amongst academic and industrial research. The particular case of asynchronous video interviews allows to collect vast corpora of videos where candidates answer standardized questions in monologue videos, enabling the use of deep learning algorithms. On the other hand, state-of-the-art approaches still face some obstacles, among which the fusion of information from multiple modalities and the interpretability of the predictions. We study the task of predicting candidates performance in asynchronous video interviews using three modalities (verbal content, prosody and facial expressions) independently or simultaneously, using data from real interviews which take place in real conditions. We propose a sequential and multimodal deep neural network model, called Multimodal HireNet. We compare this model to state-of-the-art approaches and show a clear improvement of the performance. Moreover, the architecture we propose is based on attention mechanism, which provides interpretability about which questions, moments and modalities contribute the most to the output of the network. While other deep learning systems use attention mechanisms to offer a visualization of moments with attention values, the proposed methodology enables an in-depth interpretation of the predictions by an overall analysis of the features of social signals contained in these moments.},\n bibtype = {article},\n author = {Hemamou, Leo and Guillon, Arthur and Martin, Jean Claude and Clavel, Chloe},\n doi = {10.1109/TAFFC.2021.3113159},\n journal = {IEEE Transactions on Affective Computing}\n}
\n
\n\n\n
\n Automatic analysis of job interviews has gained in interest amongst academic and industrial research. The particular case of asynchronous video interviews allows to collect vast corpora of videos where candidates answer standardized questions in monologue videos, enabling the use of deep learning algorithms. On the other hand, state-of-the-art approaches still face some obstacles, among which the fusion of information from multiple modalities and the interpretability of the predictions. We study the task of predicting candidates performance in asynchronous video interviews using three modalities (verbal content, prosody and facial expressions) independently or simultaneously, using data from real interviews which take place in real conditions. We propose a sequential and multimodal deep neural network model, called Multimodal HireNet. We compare this model to state-of-the-art approaches and show a clear improvement of the performance. Moreover, the architecture we propose is based on attention mechanism, which provides interpretability about which questions, moments and modalities contribute the most to the output of the network. While other deep learning systems use attention mechanisms to offer a visualization of moments with attention values, the proposed methodology enables an in-depth interpretation of the predictions by an overall analysis of the features of social signals contained in these moments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Don’t Judge Me by My Face: An Indirect Adversarial Approach to Remove Sensitive Information From Multimodal Neural Representation in Asynchronous Job Video Interviews.\n \n \n \n \n\n\n \n Hemamou, L.; Guillon, A.; Martin, J.; and Clavel, C.\n\n\n \n\n\n\n In 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pages 1-8, 9 2021. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Don’tPaper\n  \n \n \n \"Don’tWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Don’t Judge Me by My Face: An Indirect Adversarial Approach to Remove Sensitive Information From Multimodal Neural Representation in Asynchronous Job Video Interviews},\n type = {inproceedings},\n year = {2021},\n pages = {1-8},\n websites = {https://ieeexplore.ieee.org/document/9597443/},\n month = {9},\n publisher = {IEEE},\n day = {28},\n id = {f0d697f3-2917-3eda-b854-60731fe83156},\n created = {2022-12-30T16:44:44.259Z},\n file_attached = {true},\n profile_id = {dfdf22e7-6c4d-36b8-ad03-fb429b4617f1},\n group_id = {942f5ce8-4375-3faa-b63c-efca8f649cba},\n last_modified = {2022-12-30T16:49:36.955Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Hemamou, Leo and Guillon, Arthur and Martin, Jean-Claude and Clavel, Chloe},\n doi = {10.1109/ACII52823.2021.9597443},\n booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Attention Slices dans les Entretiens d ’ Embauche Vidéo Différés.\n \n \n \n \n\n\n \n Hemamou, L.; Guillon, A.; Martin, J.; and Clavel, C.\n\n\n \n\n\n\n In Workshop sur les “Affects, Compagnons Artificiels et Interactions” (ACAI), pages 1-9, 2020. \n \n\n\n\n
\n\n\n\n \n \n \"AttentionPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n\n\n\n
\n
@inproceedings{\n title = {Attention Slices dans les Entretiens d ’ Embauche Vidéo Différés},\n type = {inproceedings},\n year = {2020},\n keywords = {apprentissage profond,entretiens d'embauche,entretiens vidéo},\n pages = {1-9},\n id = {68123038-ffb1-3bf9-a14d-554a1b3b71e2},\n created = {2022-12-30T16:44:44.465Z},\n file_attached = {true},\n profile_id = {dfdf22e7-6c4d-36b8-ad03-fb429b4617f1},\n group_id = {942f5ce8-4375-3faa-b63c-efca8f649cba},\n last_modified = {2022-12-30T16:44:45.464Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Hemamou, Léo and Guillon, Arthur and Martin, Jean-claude and Clavel, Chloé},\n booktitle = {Workshop sur les “Affects, Compagnons Artificiels et Interactions” (ACAI)}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Slices of Attention in Asynchronous Video Job Interviews.\n \n \n \n \n\n\n \n Hemamou, L.; Felhi, G.; Martin, J.; and Clavel, C.\n\n\n \n\n\n\n In 2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII), pages 1-7, 9 2019. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"SlicesPaper\n  \n \n \n \"SlicesWebsite\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Slices of Attention in Asynchronous Video Job Interviews},\n type = {inproceedings},\n year = {2019},\n pages = {1-7},\n websites = {https://ieeexplore.ieee.org/document/8925439/},\n month = {9},\n publisher = {IEEE},\n id = {1c5bd736-5f84-3944-b038-65fdd4e149b4},\n created = {2022-12-30T16:44:44.846Z},\n file_attached = {true},\n profile_id = {dfdf22e7-6c4d-36b8-ad03-fb429b4617f1},\n group_id = {942f5ce8-4375-3faa-b63c-efca8f649cba},\n last_modified = {2022-12-30T16:44:45.764Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Hemamou, Leo and Felhi, Ghazi and Martin, Jean-claude and Clavel, Chloe},\n doi = {10.1109/ACII.2019.8925439},\n booktitle = {2019 8th International Conference on Affective Computing and Intelligent Interaction (ACII)}\n}
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n HireNet: A Hierarchical Attention Model for the Automatic Analysis of Asynchronous Video Job Interviews.\n \n \n \n \n\n\n \n Hemamou, L.; Felhi, G.; Vandenbussche, V.; Martin, J.; and Clavel, C.\n\n\n \n\n\n\n Proceedings of the AAAI Conference on Artificial Intelligence, 33: 573-581. 7 2019.\n \n\n\n\n
\n\n\n\n \n \n \"HireNet:Paper\n  \n \n \n \"HireNet:Website\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{\n title = {HireNet: A Hierarchical Attention Model for the Automatic Analysis of Asynchronous Video Job Interviews},\n type = {article},\n year = {2019},\n pages = {573-581},\n volume = {33},\n websites = {https://aaai.org/ojs/index.php/AAAI/article/view/3832},\n month = {7},\n day = {17},\n id = {4496b0da-9dc7-309c-a608-ccb564deeabc},\n created = {2022-12-30T16:44:45.020Z},\n file_attached = {true},\n profile_id = {dfdf22e7-6c4d-36b8-ad03-fb429b4617f1},\n group_id = {942f5ce8-4375-3faa-b63c-efca8f649cba},\n last_modified = {2022-12-30T16:46:31.130Z},\n read = {true},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {New technologies drastically change recruitment techniques. Some research projects aim at designing interactive systems that help candidates practice job interviews. Other studies aim at the automatic detection of social signals (e.g. smile, turn of speech, etc...) in videos of job interviews. These studies are limited with respect to the number of interviews they process, but also by the fact that they only analyze simulated job interviews (e.g. students pretending to apply for a fake position). Asynchronous video interviewing tools have become mature products on the human resources market, and thus, a popular step in the recruitment process. As part of a project to help recruiters, we collected a corpus of more than 7000 candidates having asynchronous video job interviews for real positions and recording videos of themselves answering a set of questions. We propose a new hierarchical attention model called HireNet that aims at predicting the hirability of the candidates as evaluated by recruiters. In HireNet, an interview is considered as a sequence of questions and answers containing salient socials signals. Two contextual sources of information are modeled in HireNet: the words contained in the question and in the job position. Our model achieves better F1-scores than previous approaches for each modality (verbal content, audio and video). Results from early and late multimodal fusion suggest that more sophisticated fusion schemes are needed to improve on the monomodal results. Finally, some examples of moments captured by the attention mechanisms suggest our model could potentially be used to help finding key moments in an asynchronous job interview.},\n bibtype = {article},\n author = {Hemamou, Léo and Felhi, Ghazi and Vandenbussche, Vincent and Martin, Jean-claude and Clavel, Chloé},\n doi = {10.1609/aaai.v33i01.3301573},\n journal = {Proceedings of the AAAI Conference on Artificial Intelligence}\n}
\n
\n\n\n
\n New technologies drastically change recruitment techniques. Some research projects aim at designing interactive systems that help candidates practice job interviews. Other studies aim at the automatic detection of social signals (e.g. smile, turn of speech, etc...) in videos of job interviews. These studies are limited with respect to the number of interviews they process, but also by the fact that they only analyze simulated job interviews (e.g. students pretending to apply for a fake position). Asynchronous video interviewing tools have become mature products on the human resources market, and thus, a popular step in the recruitment process. As part of a project to help recruiters, we collected a corpus of more than 7000 candidates having asynchronous video job interviews for real positions and recording videos of themselves answering a set of questions. We propose a new hierarchical attention model called HireNet that aims at predicting the hirability of the candidates as evaluated by recruiters. In HireNet, an interview is considered as a sequence of questions and answers containing salient socials signals. Two contextual sources of information are modeled in HireNet: the words contained in the question and in the job position. Our model achieves better F1-scores than previous approaches for each modality (verbal content, audio and video). Results from early and late multimodal fusion suggest that more sophisticated fusion schemes are needed to improve on the monomodal results. Finally, some examples of moments captured by the attention mechanisms suggest our model could potentially be used to help finding key moments in an asynchronous job interview.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Entretien vidéo différé : modèle prédictif pour pré-sélection de candidats sur la base du contenu verbal.\n \n \n \n \n\n\n \n Hemamou, L.; Wajntrob, G.; Martin, J.; and Clavel, C.\n\n\n \n\n\n\n In Workshop sur les “Affects, Compagnons Artificiels et Interactions” (ACAI), pages 1-8, 2018. \n \n\n\n\n
\n\n\n\n \n \n \"EntretienPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{\n title = {Entretien vidéo différé : modèle prédictif pour pré-sélection de candidats sur la base du contenu verbal},\n type = {inproceedings},\n year = {2018},\n pages = {1-8},\n id = {11eae0d2-9788-3438-ba30-fe24a950ea2b},\n created = {2022-12-30T16:44:44.641Z},\n file_attached = {true},\n profile_id = {dfdf22e7-6c4d-36b8-ad03-fb429b4617f1},\n group_id = {942f5ce8-4375-3faa-b63c-efca8f649cba},\n last_modified = {2022-12-30T16:44:45.617Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {true},\n hidden = {false},\n citation_key = {Hemamou},\n private_publication = {false},\n bibtype = {inproceedings},\n author = {Hemamou, Léo and Wajntrob, Grégory and Martin, Jean-claude and Clavel, Chloé},\n booktitle = {Workshop sur les “Affects, Compagnons Artificiels et Interactions” (ACAI)}\n}
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);