From 24ade7f6d4acb6a0de5bec11c3dd348814117f44 Mon Sep 17 00:00:00 2001 From: BirkhoffG <26811230+BirkhoffG@users.noreply.github.com> Date: Sun, 20 Oct 2024 12:20:23 -0400 Subject: [PATCH] Update reference DOI --- paper/paper.bib | 1624 +++++++---------------------------------------- 1 file changed, 225 insertions(+), 1399 deletions(-) diff --git a/paper/paper.bib b/paper/paper.bib index f4124f4..61dc9c3 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -1,1055 +1,91 @@ -% bi-level -@inproceedings{shaban2019truncated, - title={Truncated back-propagation for bilevel optimization}, - author={Shaban, Amirreza and Cheng, Ching-An and Hatch, Nathan and Boots, Byron}, - booktitle={The 22nd International Conference on Artificial Intelligence and Statistics}, - pages={1723--1732}, - year={2019}, - organization={PMLR} -} - -@article{gdpr, -author = {Chris Jay Hoofnagle and Bart van der Sloot and Frederik Zuiderveen Borgesius}, -title = {The European Union general data protection regulation: what it is and what it means}, -journal = {Information \& Communications Technology Law}, -volume = {28}, -number = {1}, -pages = {65-98}, -year = {2019}, -publisher = {Routledge} - -} - - -@article{gu2022min, - title={Min-Max Bilevel Multi-objective Optimization with Applications in Machine Learning}, - author={Gu, Alex and Lu, Songtao and Ram, Parikshit and Weng, Lily}, - journal={arXiv preprint arXiv:2203.01924}, - year={2022} -} - - -% HPO -@inproceedings{maclaurin2015gradient, - title={Gradient-based hyperparameter optimization through reversible learning}, - author={Maclaurin, Dougal and Duvenaud, David and Adams, Ryan}, - booktitle={International conference on machine learning}, - pages={2113--2122}, - year={2015}, - organization={PMLR} -} - -%German Credit -@misc{asuncion2007uci, - title={UCI machine learning repository}, - author={Asuncion, Arthur and Newman, David}, - year={2007}, - publisher={Irvine, CA, USA} -} - -% student -@article{cortez2008using, - title={Using data mining to predict secondary school student performance}, - author={Cortez, Paulo and Silva, Alice Maria Gon{\c{c}}alves}, - year={2008}, - publisher={EUROSIS-ETI} -} - - -%=========================Adv Training - -@article{goodfellow2014explaining, - title={Explaining and harnessing adversarial examples}, - author={Goodfellow, Ian J and Shlens, Jonathon and Szegedy, Christian}, - journal={arXiv preprint arXiv:1412.6572}, - year={2014} -} - - -% backdoor -@inproceedings{saha2020hidden, - title={Hidden trigger backdoor attacks}, - author={Saha, Aniruddha and Subramanya, Akshayvarun and Pirsiavash, Hamed}, - booktitle={Proceedings of the AAAI conference on artificial intelligence}, - volume={34}, - number={07}, - pages={11957--11965}, - year={2020} -} -% patch -@article{gu2019badnets, - title={Badnets: Evaluating backdooring attacks on deep neural networks}, - author={Gu, Tianyu and Liu, Kang and Dolan-Gavitt, Brendan and Garg, Siddharth}, - journal={IEEE Access}, - volume={7}, - pages={47230--47244}, - year={2019}, - publisher={IEEE} -} - -% Posioning attack: Meta Poison -@article{huang2020metapoison, - title={Metapoison: Practical general-purpose clean-label data poisoning}, - author={Huang, W Ronny and Geiping, Jonas and Fowl, Liam and Taylor, Gavin and Goldstein, Tom}, - journal={Advances in Neural Information Processing Systems}, - volume={33}, - pages={12080--12091}, - year={2020} -} -% Gradient Matching -@inproceedings{ -geiping2021witches, -title={Witches' Brew: Industrial Scale Data Poisoning via Gradient Matching}, -author={Jonas Geiping and Liam H Fowl and W. Ronny Huang and Wojciech Czaja and Gavin Taylor and Michael Moeller and Tom Goldstein}, -booktitle={International Conference on Learning Representations}, -year={2021}, -url={https://openreview.net/forum?id=01olnfLIbD} -} -% Poisoning defense -@article{geiping2021doesn, - title={What Doesn't Kill You Makes You Robust (er): Adversarial Training against Poisons and Backdoors}, - author={Geiping, Jonas and Fowl, Liam and Somepalli, Gowthami and Goldblum, Micah and Moeller, Michael and Goldstein, Tom}, - journal={arXiv preprint arXiv:2102.13624}, - year={2021} -} - -@article{gao2022effectiveness, - title={On the Effectiveness of Adversarial Training against Backdoor Attacks}, - author={Gao, Yinghua and Wu, Dongxian and Zhang, Jingfeng and Gan, Guanhao and Xia, Shu-Tao and Niu, Gang and Sugiyama, Masashi}, - journal={arXiv preprint arXiv:2202.10627}, - year={2022} -} - -@inproceedings{chen2022efficient, - title={Efficient robust training via backward smoothing}, - author={Chen, Jinghui and Cheng, Yu and Gan, Zhe and Gu, Quanquan and Liu, Jingjing}, - booktitle={Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)}, - year={2022} -} - - -@article{madry2017towards, - title={Towards deep learning models resistant to adversarial attacks}, - author={Madry, Aleksander and Makelov, Aleksandar and Schmidt, Ludwig and Tsipras, Dimitris and Vladu, Adrian}, - journal={arXiv preprint arXiv:1706.06083}, - year={2017} -} - -@article{ilyas2019adversarial, - title={Adversarial Examples Are Not Bugs, They Are Features}, - author={Ilyas, Andrew and Santurkar, Shibani and Tsipras, Dimitris and Engstrom, Logan and Tran, Brandon and Madry, Aleksander}, - journal={Advances in Neural Information Processing Systems}, - volume={32}, - pages={125--136}, - year={2019} -} - -@inproceedings{cai2018curriculum, - title={Curriculum adversarial training}, - author={Cai, Qi-Zhi and Liu, Chang and Song, Dawn}, - booktitle={Proceedings of the 27th International Joint Conference on Artificial Intelligence}, - pages={3740--3747}, - year={2018} -} - -@inproceedings{wang2019convergence, - title={On the Convergence and Robustness of Adversarial Training}, - author={Wang, Yisen and Ma, Xingjun and Bailey, James and Yi, Jinfeng and Zhou, Bowen and Gu, Quanquan}, - booktitle={International Conference on Machine Learning}, - pages={6586--6595}, - year={2019}, - organization={PMLR} -} -@article{shafahi2019free, - title={Adversarial training for free!}, - author={Shafahi, Ali and Najibi, Mahyar and Ghiasi, Mohammad Amin and Xu, Zheng and Dickerson, John and Studer, Christoph and Davis, Larry S and Taylor, Gavin and Goldstein, Tom}, - journal={Advances in Neural Information Processing Systems}, - volume={32}, - year={2019} -} - -@inproceedings{wong2019fast, - title={Fast is better than free: Revisiting adversarial training}, - author={Wong, Eric and Rice, Leslie and Kolter, J Zico}, - booktitle={International Conference on Learning Representations}, - year={2019} -} - -@inproceedings{rice2020overfitting, - title={Overfitting in adversarially robust deep learning}, - author={Rice, Leslie and Wong, Eric and Kolter, Zico}, - booktitle={International Conference on Machine Learning}, - pages={8093--8104}, - year={2020}, - organization={PMLR} -} - -@inproceedings{slack2020fooling, - title={Fooling lime and shap: Adversarial attacks on post hoc explanation methods}, - author={Slack, Dylan and Hilgard, Sophie and Jia, Emily and Singh, Sameer and Lakkaraju, Himabindu}, - booktitle={Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}, - pages={180--186}, - year={2020} -} - - -@inproceedings{finn2017model, - title={Model-agnostic meta-learning for fast adaptation of deep networks}, - author={Finn, Chelsea and Abbeel, Pieter and Levine, Sergey}, - booktitle={International conference on machine learning}, - pages={1126--1135}, - year={2017}, - organization={PMLR} -} - - -%=========================Recourse -@article{stepin2021survey, - title={A survey of contrastive and counterfactual explanation generation methods for explainable artificial intelligence}, - author={Stepin, Ilia and Alonso, Jose M and Catala, Alejandro and Pereira-Fari{\~n}a, Mart{\'\i}n}, - journal={IEEE Access}, - volume={9}, - pages={11974--12001}, - year={2021}, - publisher={IEEE} -} - -@article{slack2021counterfactual, - title={Counterfactual explanations can be manipulated}, - author={Slack, Dylan and Hilgard, Anna and Lakkaraju, Himabindu and Singh, Sameer}, - journal={Advances in Neural Information Processing Systems}, - volume={34}, - year={2021} -} - -@article{pawelczyk2022algorithmic, - title={Algorithmic Recourse in the Face of Noisy Human Responses}, - author={Pawelczyk, Martin and Datta, Teresa and van-den-Heuvel, Johannes and Kasneci, Gjergji and Lakkaraju, Himabindu}, - journal={arXiv preprint arXiv:2203.06768}, - year={2022} -} - -@inproceedings{pawelczyk2020counterfactual, - title={On counterfactual explanations under predictive multiplicity}, - author={Pawelczyk, Martin and Broelemann, Klaus and Kasneci, Gjergji}, - booktitle={Conference on Uncertainty in Artificial Intelligence}, - pages={809--818}, - year={2020}, - organization={PMLR} -} - -@inproceedings{barocas2020hidden, - title={The hidden assumptions behind counterfactual explanations and principal reasons}, - author={Barocas, Solon and Selbst, Andrew D and Raghavan, Manish}, - booktitle={Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, - pages={80--89}, - year={2020} -} - -@article{upadhyay2021towards, - title={Towards Robust and Reliable Algorithmic Recourse}, - author={Upadhyay, Sohini and Joshi, Shalmali and Lakkaraju, Himabindu}, - journal={arXiv preprint arXiv:2102.13620}, - year={2021} -} - -@article{rawal2020can, - title={Algorithmic Recourse in the Wild: Understanding the Impact of Data and Model Shifts}, - author={Rawal, Kaivalya and Kamar, Ece and Lakkaraju, Himabindu}, - journal={arXiv preprint arXiv:2012.11788}, - year={2020} -} - -@inproceedings{gilpin2018explaining, - title={Explaining explanations: An overview of interpretability of machine learning}, - author={Gilpin, Leilani H and Bau, David and Yuan, Ben Z and Bajwa, Ayesha and Specter, Michael and Kagal, Lalana}, - booktitle={2018 IEEE 5th International Conference on data science and advanced analytics (DSAA)}, - pages={80--89}, - year={2018}, - organization={IEEE} -} - -@article{murdoch2019definitions, - title={Definitions, methods, and applications in interpretable machine learning}, - author={Murdoch, W James and Singh, Chandan and Kumbier, Karl and Abbasi-Asl, Reza and Yu, Bin}, - journal={Proceedings of the National Academy of Sciences}, - volume={116}, - number={44}, - pages={22071--22080}, - year={2019}, - publisher={National Acad Sciences} -} - -@article{yadav2020kdd, - title={Identifying {H}omeless {Y}outh at-risk of {S}ubstance {U}se {D}isorder: {D}ata {D}riven {I}nsights for {P}olicymakers}, - author={Tabar, Maryam and Park, Heesoo and Winkler, Stephanie and Lee, Dongwon and Barman-Adhikari, Anamika and Yadav, Amulya}, - journal={Proc. 26th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD)}, - pages={3092--3100}, - year={2020}, - publisher={ACM} -} - -@book{molnar2019, - title = {Interpretable Machine Learning}, - author = {Christoph Molnar}, - note = {\url{https://christophm.github.io/interpretable-ml-book/}}, - year = {2019}, - subtitle = {A Guide for Making Black Box Models Explainable} -} - -@article{sushant2020, - title={Trade-Offs between Fairness and Interpretability in Machine Learning}, - author={Agarwal, Sushant}, - journal={Proc. 3rd International Workshop on AI for Social Good}, - year={2020}, - publisher={IJCAI} -} - -@article{blake1998uci, - title={UCI repository of machine learning databases}, - author={Blake, Catherine}, - journal={http://www. ics. uci. edu/\~{} mlearn/MLRepository. html}, - year={1998}, - publisher={University of California, Department of Information and Computer Science} -} - -@article{longoni2019resistance, - title={Resistance to medical artificial intelligence}, - author={Longoni, Chiara and Bonezzi, Andrea and Morewedge, Carey K}, - journal={Journal of Consumer Research}, - volume={46}, - number={4}, - pages={629--650}, - year={2019}, - publisher={Oxford University Press} -} - -@article{kleinberg2018human, - title={Human decisions and machine predictions}, - author={Kleinberg, Jon and Lakkaraju, Himabindu and Leskovec, Jure and Ludwig, Jens and Mullainathan, Sendhil}, - journal={The quarterly journal of economics}, - volume={133}, - number={1}, - pages={237--293}, - year={2018}, - publisher={Oxford University Press} -} - -@article{lipton2018mythos, - title={The mythos of model interpretability}, - author={Lipton, Zachary C}, - journal={Queue}, - volume={16}, - number={3}, - pages={31--57}, - year={2018}, - publisher={ACM New York, NY, USA} -} - -@article{rudin2019stop, - title={Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead}, - author={Rudin, Cynthia}, - journal={Nature Machine Intelligence}, - volume={1}, - number={5}, - pages={206--215}, - year={2019}, - publisher={Nature Publishing Group} -} - -@inproceedings{lou2013accurate, - title={Accurate intelligible models with pairwise interactions}, - author={Lou, Yin and Caruana, Rich and Gehrke, Johannes and Hooker, Giles}, - booktitle={Proceedings of the 19th ACM SIGKDD international conference on Knowledge discovery and data mining}, - pages={623--631}, - year={2013} -} - -@inproceedings{caruana2015intelligible, - title={Intelligible models for healthcare: Predicting pneumonia risk and hospital 30-day readmission}, - author={Caruana, Rich and Lou, Yin and Gehrke, Johannes and Koch, Paul and Sturm, Marc and Elhadad, Noemie}, - booktitle={Proceedings of the 21th ACM SIGKDD international conference on knowledge discovery and data mining}, - pages={1721--1730}, - year={2015} -} - -@article{letham2015interpretable, - title={Interpretable classifiers using rules and bayesian analysis: Building a better stroke prediction model}, - author={Letham, Benjamin and Rudin, Cynthia and McCormick, Tyler H and Madigan, David and others}, - journal={The Annals of Applied Statistics}, - volume={9}, - number={3}, - pages={1350--1371}, - year={2015}, - publisher={Institute of Mathematical Statistics} -} - -@inproceedings{lakkaraju2016interpretable, - title={Interpretable decision sets: A joint framework for description and prediction}, - author={Lakkaraju, Himabindu and Bach, Stephen H and Leskovec, Jure}, - booktitle={Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining}, - pages={1675--1684}, - year={2016} -} - -@inproceedings{tan2018distill, - title={Distill-and-compare: Auditing black-box models using transparent model distillation}, - author={Tan, Sarah and Caruana, Rich and Hooker, Giles and Lou, Yin}, - booktitle={Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society}, - pages={303--310}, - year={2018} -} - -@inproceedings{ribeiro2016lime, - title={" Why should I trust you?" Explaining the predictions of any classifier}, - author={Ribeiro, Marco Tulio and Singh, Sameer and Guestrin, Carlos}, - booktitle={Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining}, - pages={1135--1144}, - year={2016} -} - -@inproceedings{lundberg2017unified, - title={A unified approach to interpreting model predictions}, - author={Lundberg, Scott M and Lee, Su-In}, - booktitle={Advances in neural information processing systems}, - pages={4765--4774}, - year={2017} -} - -@inproceedings{kim2018interpretability, - title={Interpretability beyond feature attribution: Quantitative testing with concept activation vectors (tcav)}, - author={Kim, Been and Wattenberg, Martin and Gilmer, Justin and Cai, Carrie and Wexler, James and Viegas, Fernanda and others}, - booktitle={International conference on machine learning}, - pages={2668--2677}, - year={2018}, - organization={PMLR} -} - -@inproceedings{koh2017understanding, - title={Understanding black-box predictions via influence functions}, - author={Koh, Pang Wei and Liang, Percy}, - booktitle={International conference on machine learning}, - pages={1885--1894}, - year={2017}, - organization={PMLR} -} - -@inproceedings{lakkaraju2019faithful, - title={Faithful and customizable explanations of black box models}, - author={Lakkaraju, Himabindu and Kamar, Ece and Caruana, Rich and Leskovec, Jure}, - booktitle={Proceedings of the 2019 AAAI/ACM Conference on AI, Ethics, and Society}, - pages={131--138}, - year={2019} +@inproceedings{ustun2019actionable, + author = {Ustun, Berk and Spangher, Alexander and Liu, Yang}, + title = {Actionable Recourse in Linear Classification}, + year = {2019}, + isbn = {9781450361255}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + url = {https://doi.org/10.1145/3287560.3287566}, + doi = {10.1145/3287560.3287566}, + booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency}, + pages = {10–19}, + numpages = {10}, + keywords = {accountability, audit, classification, credit scoring, integer programming, recourse}, + location = {Atlanta, GA, USA}, + series = {FAT* '19} } - @article{wachter2017counterfactual, - title={Counterfactual explanations without opening the black box: Automated decisions and the GDPR}, + title={Counterfactual Explanations Without Opening the Black Box: Automated Decisions and the GDPR}, author={Wachter, Sandra and Mittelstadt, Brent and Russell, Chris}, - journal={Harv. JL \& Tech.}, - volume={31}, - pages={841}, + journal={Cybersecurity}, year={2017}, - publisher={HeinOnline} -} - -@article{altmann2010permutation, - title={Permutation importance: a corrected feature importance measure}, - author={Altmann, Andr{\'e} and Tolo{\c{s}}i, Laura and Sander, Oliver and Lengauer, Thomas}, - journal={Bioinformatics}, - volume={26}, - number={10}, - pages={1340--1347}, - year={2010}, - publisher={Oxford University Press} -} - -@inproceedings{Oscar2018casebased, - author = {Oscar Li and - Hao Liu and - Chaofan Chen and - Cynthia Rudin}, - title = {Deep Learning for Case-Based Reasoning Through Prototypes: {A} Neural - Network That Explains Its Predictions}, - booktitle = {{AAAI}}, - pages = {3530--3537}, - publisher = {{AAAI} Press}, - year = {2018} -} - -@inproceedings{melis2018towards, - title={Towards robust interpretability with self-explaining neural networks}, - author={Melis, David Alvarez and Jaakkola, Tommi}, - booktitle={Advances in Neural Information Processing Systems}, - pages={7775--7784}, - year={2018} + url={https://api.semanticscholar.org/CorpusID:3995299} } - -@article{narayanan2018humans, - title={How do humans understand explanations from machine learning systems? an evaluation of the human-interpretability of explanation}, - author={Narayanan, Menaka and Chen, Emily and He, Jeffrey and Kim, Been and Gershman, Sam and Doshi-Velez, Finale}, - journal={arXiv preprint arXiv:1802.00682}, - year={2018} -} - - -@inproceedings{kaur2020interpreting, - title={Interpreting Interpretability: Understanding Data Scientists' Use of Interpretability Tools for Machine Learning}, - author={Kaur, Harmanpreet and Nori, Harsha and Jenkins, Samuel and Caruana, Rich and Wallach, Hanna and Wortman Vaughan, Jennifer}, - booktitle={Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}, - pages={1--14}, +@article{verma2020counterfactual, + title={Counterfactual Explanations for Machine Learning: A Review}, + author={Verma, Sahil and Dickerson, John and Hines, Keegan}, + journal={arXiv preprint arXiv:2010.10596}, year={2020} } - -@inproceedings{dodge2019explaining, - title={Explaining models: an empirical study of how explanations impact fairness judgment}, - author={Dodge, Jonathan and Liao, Q Vera and Zhang, Yunfeng and Bellamy, Rachel KE and Dugan, Casey}, - booktitle={Proceedings of the 24th International Conference on Intelligent User Interfaces}, - pages={275--285}, - year={2019} -} - -@inproceedings{cai2019human, - title={Human-centered tools for coping with imperfect algorithms during medical decision-making}, - author={Cai, Carrie J and Reif, Emily and Hegde, Narayan and Hipp, Jason and Kim, Been and Smilkov, Daniel and Wattenberg, Martin and Viegas, Fernanda and Corrado, Greg S and Stumpe, Martin C and others}, - booktitle={Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems}, - pages={1--14}, - year={2019} -} - -@inproceedings{wang2019designing, - title={Designing theory-driven user-centric explainable AI}, - author={Wang, Danding and Yang, Qian and Abdul, Ashraf and Lim, Brian Y}, - booktitle={Proceedings of the 2019 CHI conference on human factors in computing systems}, - pages={1--15}, - year={2019} -} - -@inproceedings{krause2016interacting, - title={Interacting with predictions: Visual inspection of black-box machine learning models}, - author={Krause, Josua and Perer, Adam and Ng, Kenney}, - booktitle={Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems}, - pages={5686--5697}, - year={2016} -} - -@inproceedings{lage2019human, - title={Human evaluation of models built for interpretability}, - author={Lage, Isaac and Chen, Emily and He, Jeffrey and Narayanan, Menaka and Kim, Been and Gershman, Samuel J and Doshi-Velez, Finale}, - booktitle={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing}, - volume={7}, - number={1}, - pages={59--67}, - year={2019} -} - - -@article{shmueli2010explain, - title={To explain or to predict?}, - author={Shmueli, Galit and others}, - journal={Statistical science}, - volume={25}, - number={3}, - pages={289--310}, - year={2010}, - publisher={Institute of Mathematical Statistics} -} - -@article{miller2019explanation, - title={Explanation in artificial intelligence: Insights from the social sciences}, - author={Miller, Tim}, - journal={Artificial Intelligence}, - volume={267}, - pages={1--38}, - year={2019}, - publisher={Elsevier} +@article{stepin2021survey, + author={Stepin, Ilia and Alonso, Jose M. and Catala, Alejandro and Pereira-Fariña, Martín}, + journal={IEEE Access}, + title={A Survey of Contrastive and Counterfactual Explanation Generation Methods for Explainable Artificial Intelligence}, + year={2021}, + volume={9}, + number={}, + pages={11974-12001}, + keywords={Cognition;Artificial intelligence;Training;Terminology;Taxonomy;Systematics;Signal to noise ratio;Computational intelligence;contrastive explanations;counterfactuals;explainable artificial intelligence;systematic literature review}, + doi={10.1109/ACCESS.2021.3051315} } - -@inproceedings{kim2016examples, - title={Examples are not enough, learn to criticize! criticism for interpretability}, - author={Kim, Been and Khanna, Rajiv and Koyejo, Oluwasanmi O}, - booktitle={Advances in neural information processing systems}, - pages={2280--2288}, - year={2016} +@article{karimi2020survey, + author = {Karimi, Amir-Hossein and Barthe, Gilles and Sch\"{o}lkopf, Bernhard and Valera, Isabel}, + title = {A Survey of Algorithmic Recourse: Contrastive Explanations and Consequential Recommendations}, + year = {2022}, + issue_date = {May 2023}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + volume = {55}, + number = {5}, + issn = {0360-0300}, + url = {https://doi.org/10.1145/3527848}, + doi = {10.1145/3527848}, + abstract = {Machine learning is increasingly used to inform decision making in sensitive situations where decisions have consequential effects on individuals’ lives. In these settings, in addition to requiring models to be accurate and robust, socially relevant values such as fairness, privacy, accountability, and explainability play an important role in the adoption and impact of said technologies. In this work, we focus on algorithmic recourse, which is concerned with providing explanations and recommendations to individuals who are unfavorably treated by automated decision-making systems. We first perform an extensive literature review, and align the efforts of many authors by presenting unified definitions, formulations, and solutions to recourse. Then, we provide an overview of the prospective research directions toward which the community may engage, challenging existing assumptions and making explicit connections to other ethical challenges such as security, privacy, and fairness.}, + journal = {ACM Comput. Surv.}, + month = dec, + articleno = {95}, + numpages = {29}, + keywords = {contrastive explanations and consequential recommendations, Algorithmic recourse} } - @inproceedings{binns2018s, - title={'It's Reducing a Human Being to a Percentage' Perceptions of Justice in Algorithmic Decisions}, - author={Binns, Reuben and Van Kleek, Max and Veale, Michael and Lyngs, Ulrik and Zhao, Jun and Shadbolt, Nigel}, - booktitle={Proceedings of the 2018 Chi conference on human factors in computing systems}, - pages={1--14}, - year={2018} -} - -@inproceedings{ghorbani2019interpretation, - title={Interpretation of neural networks is fragile}, - author={Ghorbani, Amirata and Abid, Abubakar and Zou, James}, - booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, - volume={33}, - pages={3681--3688}, - year={2019} -} - -@inproceedings{mittelstadt2019explaining, - title={Explaining explanations in AI}, - author={Mittelstadt, Brent and Russell, Chris and Wachter, Sandra}, - booktitle={Proceedings of the conference on fairness, accountability, and transparency}, - pages={279--288}, - year={2019} -} - -@article{vandewiele2016genesim, - title={Genesim: genetic extraction of a single, interpretable model}, - author={Vandewiele, Gilles and Janssens, Olivier and Ongenae, Femke and De Turck, Filip and Van Hoecke, Sofie}, - journal={arXiv preprint arXiv:1611.05722}, - year={2016} -} - -@inproceedings{ma2020adacare, - title={AdaCare: Explainable Clinical Health Status Representation Learning via Scale-Adaptive Feature Extraction and Recalibration}, - author={Ma, Liantao and Gao, Junyi and Wang, Yasha and Zhang, Chaohe and Wang, Jiangtao and Ruan, Wenjie and Tang, Wen and Gao, Xin and Ma, Xinyu}, - booktitle={Proceedings of the AAAI Conference on Artificial Intelligence}, - volume={34}, - number={01}, - pages={825--832}, - year={2020} -} - -@inproceedings{chouldechova2018case, - title={A case study of algorithm-assisted decision making in child maltreatment hotline screening decisions}, - author={Chouldechova, Alexandra and Benavides-Prado, Diana and Fialko, Oleksandr and Vaithianathan, Rhema}, - booktitle={Conference on Fairness, Accountability and Transparency}, - pages={134--148}, - year={2018} -} - -@inproceedings{yadav2016using, - title={Using Social Networks to Aid Homeless Shelters: Dynamic Influence Maximization under Uncertainty.}, - author={Yadav, Amulya and Chan, Hau and Jiang, Albert Xin and Xu, Haifeng and Rice, Eric and Tambe, Milind}, - booktitle={AAMAS}, - volume={16}, - pages={740--748}, - year={2016} -} - -@inproceedings{rice2020using, - title={Using artificial intelligence to augment network-based, HIV prevention for youth experiencing homelessness}, - author={Rice, Eric and Onasch-Vera, Laura and Diguiseppi, Graham and Hill, Chyna and Petering, Robin and Wilson, Nicole and Woo, Darlene and Thompson, Nicole and Tambe, Milind and Wilder, Bryan and others}, - booktitle={APHA's 2020 VIRTUAL Annual Meeting and Expo (Oct. 24-28)}, - year={2020}, - organization={American Public Health Association} -} - -@book{tambe2018artificial, - title={Artificial Intelligence and Social Work}, - author={Tambe, Milind and Rice, Eric}, - year={2018}, - publisher={Cambridge University Press} -} - -@article{floridi2020design, - title={How to Design AI for Social Good: Seven Essential Factors}, - author={Floridi, Luciano and Cowls, Josh and King, Thomas C and Taddeo, Mariarosaria}, - journal={Sci Eng Ethics}, - year={2020}, - publisher={Springer} -} - -@inproceedings{fang2016deploying, - title={Deploying PAWS: Field Optimization of the Protection Assistant for Wildlife Security.}, - author={Fang, Fei and Nguyen, Thanh Hong and Pickles, Rob and Lam, Wai Y and Clements, Gopalasamy R and An, Bo and Singh, Amandeep and Tambe, Milind and Lemieux, Andrew and others}, - booktitle={AAAI}, - volume={16}, - pages={3966--3973}, - year={2016} -} - -@inproceedings{lakkaraju2015machine, - title={A machine learning framework to identify students at risk of adverse academic outcomes}, - author={Lakkaraju, Himabindu and Aguiar, Everaldo and Shan, Carl and Miller, David and Bhanpuri, Nasir and Ghani, Rayid and Addison, Kecia L}, - booktitle={Proceedings of the 21th ACM SIGKDD international conference on knowledge discovery and data mining}, - pages={1909--1918}, - year={2015} -} - -@inproceedings{mothilal2020explaining, - title={Explaining machine learning classifiers through diverse counterfactual explanations}, - author={Mothilal, Ramaravind K and Sharma, Amit and Tan, Chenhao}, - booktitle={Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, - pages={607--617}, - year={2020} -} - -@InProceedings{Dandl2020multi, -author="Dandl, Susanne -and Molnar, Christoph -and Binder, Martin -and Bischl, Bernd", -editor="B{\"a}ck, Thomas -and Preuss, Mike -and Deutz, Andr{\'e} -and Wang, Hao -and Doerr, Carola -and Emmerich, Michael -and Trautmann, Heike", -title="Multi-Objective Counterfactual Explanations", -booktitle="Parallel Problem Solving from Nature -- PPSN XVI", -year="2020", -publisher="Springer International Publishing", -address="Cham", -pages="448--469", -abstract="Counterfactual explanations are one of the most popular methods to make predictions of black box machine learning models interpretable by providing explanations in the form of `what-if scenarios'. Most current approaches optimize a collapsed, weighted sum of multiple objectives, which are naturally difficult to balance a-priori. We propose the Multi-Objective Counterfactuals (MOC) method, which translates the counterfactual search into a multi-objective optimization problem. Our approach not only returns a diverse set of counterfactuals with different trade-offs between the proposed objectives, but also maintains diversity in feature space. This enables a more detailed post-hoc analysis to facilitate better understanding and also more options for actionable user responses to change the predicted outcome. Our approach is also model-agnostic and works for numerical and categorical input features. We show the usefulness of MOC in concrete cases and compare our approach with state-of-the-art methods for counterfactual explanations.", -isbn="978-3-030-58112-1" -} - -@inproceedings{sarwar2001item, - title={Item-based collaborative filtering recommendation algorithms}, - author={Sarwar, Badrul and Karypis, George and Konstan, Joseph and Riedl, John}, - booktitle={Proceedings of the 10th international conference on World Wide Web}, - pages={285--295}, - year={2001} -} - -@article{weinberger2009distance, - title={Distance metric learning for large margin nearest neighbor classification.}, - author={Weinberger, Kilian Q and Saul, Lawrence K}, - journal={Journal of Machine Learning Research}, - volume={10}, - number={2}, - year={2009} -} - -@inproceedings{caruana1999case, - title={Case-based explanation of non-case-based learning methods.}, - author={Caruana, Rich and Kangarloo, Hooshang and Dionisio, John David and Sinha, Usha and Johnson, David}, - booktitle={Proceedings of the AMIA Symposium}, - pages={212}, - year={1999}, - organization={American Medical Informatics Association} -} - -@inproceedings{kanehira2019learning, - title={Learning to explain with complemental examples}, - author={Kanehira, Atsushi and Harada, Tatsuya}, - booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, - pages={8603--8611}, - year={2019} -} - -@inproceedings{chen2019looks, - title={This looks like that: deep learning for interpretable image recognition}, - author={Chen, Chaofan and Li, Oscar and Tao, Daniel and Barnett, Alina and Rudin, Cynthia and Su, Jonathan K}, - booktitle={Advances in neural information processing systems}, - pages={8930--8941}, - year={2019} -} - -@inproceedings{ribera2019can, - title={Can we do better explanations? A proposal of user-centered explainable AI.}, - author={Ribera, Mireia and Lapedriza, Agata}, - booktitle={IUI Workshops}, - year={2019} -} - -@inproceedings{yadav2018bridging, - title={Bridging the Gap Between Theory and Practice in Influence Maximization: Raising Awareness about HIV among Homeless Youth.}, - author={Yadav, Amulya and Wilder, Bryan and Rice, Eric and Petering, Robin and Craddock, Jaih and Yoshioka-Maxwell, Amanda and Hemler, Mary and Onasch-Vera, Laura and Tambe, Milind and Woo, Darlene}, - booktitle={IJCAI}, - pages={5399--5403}, - year={2018} -} - -@inproceedings{yadav2017explanation, - title={Explanation Systems for Influence Maximization Algorithms.}, - author={Yadav, Amulya and Rahmattalabi, Aida and Kamar, Ece and Vayanos, Phebe and Tambe, Milind and Noronha, Venil Loyd}, - booktitle={SocInf@ IJCAI}, - pages={8--19}, - year={2017} -} - -@article{schneider2019personalized, - title={Personalized explanation in machine learning: A conceptualization}, - author={Schneider, Johanes and Handali, Joshua}, - journal={arXiv preprint arXiv:1901.00770}, - year={2019} -} - -@article{breiman2001random, - title={Random forests}, - author={Breiman, Leo}, - journal={Machine learning}, - volume={45}, - number={1}, - pages={5--32}, - year={2001}, - publisher={Springer} -} - -@article{lakkaraju2020robust, - title={Robust and stable black box explanations}, - author={Lakkaraju, Himabindu and Arsov, Nino and Bastani, Osbert}, - journal={arXiv preprint arXiv:2011.06169}, - year={2020} -} - -@inproceedings{li2010contextual, - title={A contextual-bandit approach to personalized news article recommendation}, - author={Li, Lihong and Chu, Wei and Langford, John and Schapire, Robert E}, - booktitle={Proceedings of the 19th international conference on World wide web}, - pages={661--670}, - year={2010} + author = {Binns, Reuben and Van Kleek, Max and Veale, Michael and Lyngs, Ulrik and Zhao, Jun and Shadbolt, Nigel}, + title = { 'It's Reducing a Human Being to a Percentage': Perceptions of Justice in Algorithmic Decisions}, + year = {2018}, + isbn = {9781450356206}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + url = {https://doi.org/10.1145/3173574.3173951}, + doi = {10.1145/3173574.3173951}, + abstract = {Data-driven decision-making consequential to individuals raises important questions of accountability and justice. Indeed, European law provides individuals limited rights to 'meaningful information about the logic' behind significant, autonomous decisions such as loan approvals, insurance quotes, and CV filtering. We undertake three experimental studies examining people's perceptions of justice in algorithmic decision-making under different scenarios and explanation styles. Dimensions of justice previously observed in response to human decision-making appear similarly engaged in response to algorithmic decisions. Qualitative analysis identified several concerns and heuristics involved in justice perceptions including arbitrariness, generalisation, and (in)dignity. Quantitative analysis indicates that explanation styles primarily matter to justice perceptions only when subjects are exposed to multiple different styles---under repeated exposure of one style, scenario effects obscure any explanation effects. Our results suggests there may be no 'best' approach to explaining algorithmic decisions, and that reflection on their automated nature both implicates and mitigates justice dimensions.}, + booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems}, + pages = {1–14}, + numpages = {14}, + keywords = {transparency, machine learning, justice, fairness, explanation, algorithmic decision-making}, + location = {Montreal QC, Canada}, + series = {CHI '18} } - -@article{bobadilla2012collaborative, - title={A collaborative filtering approach to mitigate the new user cold start problem}, - author={Bobadilla, Jes{\'u}S and Ortega, Fernando and Hernando, Antonio and Bernal, Jes{\'u}S}, - journal={Knowledge-based systems}, - volume={26}, - pages={225--238}, - year={2012}, - publisher={Elsevier} -} - -@article{shi2020artificial, - title={Artificial intelligence for social good: A survey}, - author={Shi, Zheyuan Ryan and Wang, Claire and Fang, Fei}, - journal={arXiv preprint arXiv:2001.01818}, - year={2020} -} - -@inproceedings{agrawal1993mining, - title={Mining association rules between sets of items in large databases}, - author={Agrawal, Rakesh and Imieli{\'n}ski, Tomasz and Swami, Arun}, - booktitle={Proceedings of the 1993 ACM SIGMOD international conference on Management of data}, - pages={207--216}, - year={1993} -} - -@article{voigt2017eu, - title={The eu general data protection regulation (gdpr)}, - author={Voigt, Paul and Von dem Bussche, Axel}, - journal={A Practical Guide, 1st Ed., Cham: Springer International Publishing}, - year={2017}, - publisher={Springer} -} - -@article{van2019interpretable, - title={Interpretable counterfactual explanations guided by prototypes}, - author={Van Looveren, Arnaud and Klaise, Janis}, - journal={arXiv preprint arXiv:1907.02584}, - year={2019} -} - -@article{mahajan2019preserving, - title={Preserving causal constraints in counterfactual explanations for machine learning classifiers}, - author={Mahajan, Divyat and Tan, Chenhao and Sharma, Amit}, - journal={arXiv preprint arXiv:1912.03277}, - year={2019} -} - - -@inproceedings{ribeiro2018anchors, - title={Anchors: High-Precision Model-Agnostic Explanations.}, - author={Ribeiro, Marco Tulio and Singh, Sameer and Guestrin, Carlos}, - booktitle={AAAI}, - volume={18}, - pages={1527--1535}, - year={2018} -} - -@article{pedregosa2011scikit, - title={Scikit-learn: Machine learning in Python}, - author={Pedregosa, Fabian and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and others}, - journal={the Journal of machine Learning research}, - volume={12}, - pages={2825--2830}, - year={2011}, - publisher={JMLR. org} -} - -@inproceedings{he2016deep, - title={Deep residual learning for image recognition}, - author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, - booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, - pages={770--778}, - year={2016} -} - -@article{xu2015empirical, - title={Empirical evaluation of rectified activations in convolutional network}, - author={Xu, Bing and Wang, Naiyan and Chen, Tianqi and Li, Mu}, - journal={arXiv preprint arXiv:1505.00853}, - year={2015} -} - -@article{ioffe2015batch, - title={Batch normalization: Accelerating deep network training by reducing internal covariate shift}, - author={Ioffe, Sergey and Szegedy, Christian}, - journal={arXiv preprint arXiv:1502.03167}, - year={2015} -} - -@article{salimans2016improved, - title={Improved techniques for training gans}, - author={Salimans, Tim and Goodfellow, Ian and Zaremba, Wojciech and Cheung, Vicki and Radford, Alec and Chen, Xi}, - journal={arXiv preprint arXiv:1606.03498}, - year={2016} -} - -@article{hazan2017adversarial, - title={Adversarial Perturbations of Deep Neural Networks}, - author={Hazan, Tamir and Papandreou, George and Tarlow, Daniel}, - year={2017}, - publisher={MIT Press} -} - -@article{yeh2009comparisons, - title={The comparisons of data mining techniques for the predictive accuracy of probability of default of credit card clients}, - author={Yeh, I-Cheng and Lien, Che-hui}, - journal={Expert Systems with Applications}, - volume={36}, - number={2}, - pages={2473--2480}, - year={2009}, - publisher={Elsevier} -} - - -@misc{kohavi1996uci, - title={UCI Machine Learning Repository: Adult Data Set}, - author={Kohavi, R and Becker, B}, - year={1996}, - journal={1996-05-01)[2014-10-01]. http: ff archive, ies. uci. edu/ml/data-sets/Adult} -} - -@misc{heloc, -title = {Explainable Machine Learning Challenge}, -author = {FICO}, -month = sep, -year = {2018}, -howpublished = "\url{https://community.fico.com/s/explainable-machine-learning-challenge}" -} - -@misc{titanic, -title = {Titanic - Machine Learning from Disaster}, -author = {Kaggle}, -month = sep, -year = {2018}, -howpublished = "\url{https://www.kaggle.com/c/titanic/overview}" -} - -@article{muller2019does, - title={When does label smoothing help?}, - author={M{\"u}ller, Rafael and Kornblith, Simon and Hinton, Geoffrey}, - journal={arXiv preprint arXiv:1906.02629}, - year={2019} -} - -@misc{accesslex, -title = {US Law School Disclosures to the ABA: Standard 509 disclosures from all US ABA-approved law schools}, -author = {AccessLex}, -month = sep, -year = {2017}, -howpublished = "\url{https://www.kaggle.com/orrisis/us-law-schools}" -} - -@article{kuzilek2017open, - title={Open university learning analytics dataset}, - author={Kuzilek, Jakub and Hlosta, Martin and Zdrahal, Zdenek}, - journal={Scientific data}, - volume={4}, - pages={170171}, - year={2017}, - publisher={Nature Publishing Group} -} - -@article{srivastava2014dropout, - title={Dropout: a simple way to prevent neural networks from overfitting}, - author={Srivastava, Nitish and Hinton, Geoffrey and Krizhevsky, Alex and Sutskever, Ilya and Salakhutdinov, Ruslan}, - journal={The journal of machine learning research}, - volume={15}, - number={1}, - pages={1929--1958}, - year={2014}, - publisher={JMLR. org} -} - -@article{ng2011sparse, - title={Sparse autoencoder}, - author={Ng, Andrew and others}, - journal={CS294A Lecture notes}, - volume={72}, - number={2011}, - pages={1--19}, - year={2011} -} - -@article{He2015DelvingDI, - title={Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification}, - author={Kaiming He and X. Zhang and Shaoqing Ren and Jian Sun}, - journal={2015 IEEE International Conference on Computer Vision (ICCV)}, - year={2015}, - pages={1026-1034} -} - -@inproceedings{Goodfellow2014gan, - title={Generative Adversarial Nets}, - author={Ian J. Goodfellow and Jean Pouget-Abadie and M. Mirza and Bing Xu and David Warde-Farley and Sherjil Ozair and Aaron C. Courville and Yoshua Bengio}, - booktitle={NIPS}, - year={2014} -} - -@inproceedings{ustun2019actionable, - title={Actionable recourse in linear classification}, - author={Ustun, Berk and Spangher, Alexander and Liu, Yang}, - booktitle={Proceedings of the Conference on Fairness, Accountability, and Transparency}, - pages={10--19}, - year={2019} -} - -@article{aggarwal2010inverse, - title={The inverse classification problem}, - author={Aggarwal, Charu C and Chen, Chen and Han, Jiawei}, - journal={Journal of Computer Science and Technology}, - volume={25}, - number={3}, - pages={458--468}, - year={2010}, - publisher={Springer} -} - - -@inproceedings{Dhurandhar2018explanations, -author = {Dhurandhar, Amit and Chen, Pin-Yu and Luss, Ronny and Tu, Chun-Chen and Ting, Paishun and Shanmugam, Karthikeyan and Das, Payel}, -title = {Explanations Based on the Missing: Towards Contrastive Explanations with Pertinent Negatives}, -year = {2018}, -publisher = {Curran Associates Inc.}, -address = {Red Hook, NY, USA}, -abstract = {In this paper we propose a novel method that provides contrastive explanations justifying the classification of an input by a black box classifier such as a deep neural network. Given an input we find what should be minimally and sufficiently present (viz. important object pixels in an image) to justify its classification and analogously what should be minimally and necessarily absent (viz. certain background pixels). We argue that such explanations are natural for humans and are used commonly in domains such as health care and criminology. What is minimally but critically absent is an important part of an explanation, which to the best of our knowledge, has not been explicitly identified by current explanation methods that explain predictions of neural networks. We validate our approach on three real datasets obtained from diverse domains; namely, a handwritten digits dataset MNIST, a large procurement fraud dataset and a brain activity strength dataset. In all three cases, we witness the power of our approach in generating precise explanations that are also easy for human experts to understand and evaluate.}, -booktitle = {Proceedings of the 32nd International Conference on Neural Information Processing Systems}, -pages = {590–601}, -numpages = {12}, -location = {Montr\'{e}al, Canada}, -series = {NIPS'18} -} - -@inproceedings{10.1145/3375627.3375812, -author = {Sharma, Shubham and Henderson, Jette and Ghosh, Joydeep}, -title = {CERTIFAI: A Common Framework to Provide Explanations and Analyse the Fairness and Robustness of Black-Box Models}, -year = {2020}, -isbn = {9781450371100}, -publisher = {Association for Computing Machinery}, -address = {New York, NY, USA}, -url = {https://doi.org/10.1145/3375627.3375812}, -doi = {10.1145/3375627.3375812}, -abstract = {Concerns within the machine learning community and external pressures from regulators over the vulnerabilities of machine learning algorithms have spurred on the fields of explainability, robustness, and fairness. Often, issues in explainability, robustness, and fairness are confined to their specific sub-fields and few tools exist for model developers to use to simultaneously build their modeling pipelines in a transparent, accountable, and fair way. This can lead to a bottleneck on the model developer's side as they must juggle multiple methods to evaluate their algorithms. In this paper, we present a single framework for analyzing the robustness, fairness, and explainability of a classifier. The framework, which is based on the generation of counterfactual explanations through a custom genetic algorithm, is flexible, model-agnostic, and does not require access to model internals. The framework allows the user to calculate robustness and fairness scores for individual models and generate explanations for individual predictions which provide a means for actionable recourse (changes to an input to help get a desired outcome). This is the first time that a unified tool has been developed to address three key issues pertaining towards building a responsible artificial intelligence system.}, -booktitle = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}, -pages = {166–172}, -numpages = {7}, -keywords = {robust-ness, machine learning, fairness, explainability, responsible artificial intelligence}, -location = {New York, NY, USA}, -series = {AIES '20} +@article{miller2019explanation, + title = {Explanation in artificial intelligence: Insights from the social sciences}, + journal = {Artificial Intelligence}, + volume = {267}, + pages = {1-38}, + year = {2019}, + issn = {0004-3702}, + doi = {https://doi.org/10.1016/j.artint.2018.07.007}, + url = {https://www.sciencedirect.com/science/article/pii/S0004370218305988}, + author = {Tim Miller}, + keywords = {Explanation, Explainability, Interpretability, Explainable AI, Transparency}, } - @inproceedings{Bhatt20explainable, author = {Bhatt, Umang and Xiang, Alice and Sharma, Shubham and Weller, Adrian and Taly, Ankur and Jia, Yunhan and Ghosh, Joydeep and Puri, Ruchir and Moura, Jos\'{e} M. F. and Eckersley, Peter}, title = {Explainable Machine Learning in Deployment}, @@ -1067,169 +103,94 @@ @inproceedings{Bhatt20explainable location = {Barcelona, Spain}, series = {FAT* '20} } - -@article{verma2020counterfactual, - title={Counterfactual Explanations for Machine Learning: A Review}, - author={Verma, Sahil and Dickerson, John and Hines, Keegan}, - journal={arXiv preprint arXiv:2010.10596}, - year={2020} -} - -@article{karimi2020survey, - title={A survey of algorithmic recourse: definitions, formulations, solutions, and prospects}, - author={Karimi, Amir-Hossein and Barthe, Gilles and Sch{\"o}lkopf, Bernhard and Valera, Isabel}, - journal={arXiv preprint arXiv:2010.04050}, - year={2020} -} - -@article{miller2018contrastive, - title={Contrastive explanation: A structural-model approach}, - author={Miller, Tim}, - journal={arXiv preprint arXiv:1811.03163}, - year={2018} -} - -@inproceedings{yang2021model, -author = {Yang, Fan and Alva, Sahan Suresh and Chen, Jiahao and Hu, Xia}, -title = {Model-Based Counterfactual Synthesizer for Interpretation}, -year = {2021}, -isbn = {9781450383325}, -publisher = {Association for Computing Machinery}, -address = {New York, NY, USA}, -url = {https://doi.org/10.1145/3447548.3467333}, -doi = {10.1145/3447548.3467333}, -booktitle = {Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, -pages = {1964–1974}, -numpages = {11}, -keywords = {counterfactual sample, model interpretation, causal explanation}, -location = {Virtual Event, Singapore}, -series = {KDD '21} -} - -@inproceedings{sharma2020CERTIFAI, -author = {Sharma, Shubham and Henderson, Jette and Ghosh, Joydeep}, -title = {CERTIFAI: A Common Framework to Provide Explanations and Analyse the Fairness and Robustness of Black-Box Models}, -year = {2020}, -isbn = {9781450371100}, -publisher = {Association for Computing Machinery}, -address = {New York, NY, USA}, -url = {https://doi.org/10.1145/3375627.3375812}, -doi = {10.1145/3375627.3375812}, -booktitle = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}, -pages = {166–172}, -numpages = {7}, -keywords = {explainability, responsible artificial intelligence, robust-ness, fairness, machine learning}, -location = {New York, NY, USA}, -series = {AIES '20} +@inproceedings{mothilal2020explaining, + author = {Mothilal, Ramaravind K. and Sharma, Amit and Tan, Chenhao}, + title = {Explaining machine learning classifiers through diverse counterfactual explanations}, + year = {2020}, + isbn = {9781450369367}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + url = {https://doi.org/10.1145/3351095.3372850}, + doi = {10.1145/3351095.3372850}, + abstract = {Post-hoc explanations of machine learning models are crucial for people to understand and act on algorithmic predictions. An intriguing class of explanations is through counterfactuals, hypothetical examples that show people how to obtain a different prediction. We posit that effective counterfactual explanations should satisfy two properties: feasibility of the counterfactual actions given user context and constraints, and diversity among the counterfactuals presented. To this end, we propose a framework for generating and evaluating a diverse set of counterfactual explanations based on determinantal point processes. To evaluate the actionability of counterfactuals, we provide metrics that enable comparison of counterfactual-based methods to other local explanation methods. We further address necessary tradeoffs and point to causal implications in optimizing for counterfactuals. Our experiments on four real-world datasets show that our framework can generate a set of counterfactuals that are diverse and well approximate local decision boundaries, outperforming prior approaches to generating diverse counterfactuals. We provide an implementation of the framework at https://github.com/microsoft/DiCE.}, + booktitle = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, + pages = {607–617}, + numpages = {11}, + location = {Barcelona, Spain}, + series = {FAT* '20} } - -@inproceedings{pawelczyk2020learning, - title={Learning model-agnostic counterfactual explanations for tabular data}, - author={Pawelczyk, Martin and Broelemann, Klaus and Kasneci, Gjergji}, - booktitle={Proceedings of The Web Conference 2020}, - pages={3126--3132}, - year={2020} +@article{upadhyay2021towards, + author = {Upadhyay, Sohini and Joshi, Shalmali and Lakkaraju, Himabindu}, + title = {Towards robust and reliable algorithmic recourse}, + year = {2024}, + isbn = {9781713845393}, + publisher = {Curran Associates Inc.}, + address = {Red Hook, NY, USA}, + abstract = {As predictive models are increasingly being deployed in high-stakes decision making (e.g., loan approvals), there has been growing interest in post-hoc techniques which provide recourse to affected individuals. These techniques generate recourses under the assumption that the underlying predictive model does not change. However, in practice, models are often regularly updated for a variety of reasons (e.g., dataset shifts), thereby rendering previously prescribed recourses ineffective. To address this problem, we propose a novel framework, RObust Algorithmic Recourse (ROAR), that leverages adversarial training for finding recourses that are robust to model shifts. To the best of our knowledge, this work proposes the first ever solution to this critical problem. We also carry out theoretical analysis which underscores the importance of constructing recourses that are robust to model shifts: 1) We quantify the probability of invalidation for recourses generated without accounting for model shifts. 2) We prove that the additional cost incurred due to the robust recourses output by our framework is bounded. Experimental evaluation on multiple synthetic and real-world datasets demonstrates the efficacy of the proposed framework.}, + booktitle = {Proceedings of the 35th International Conference on Neural Information Processing Systems}, + articleno = {1294}, + numpages = {12}, + series = {NIPS '21} } - -@inproceedings{karimi2021algorithmic, - title={Algorithmic recourse: from counterfactual explanations to interventions}, - author={Karimi, Amir-Hossein and Sch{\"o}lkopf, Bernhard and Valera, Isabel}, - booktitle={Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency}, - pages={353--362}, - year={2021} +@inproceedings{vo2023feature, + author = {Vo, Vy and Le, Trung and Nguyen, Van and Zhao, He and Bonilla, Edwin V. and Haffari, Gholamreza and Phung, Dinh}, + title = {Feature-based Learning for Diverse and Privacy-Preserving Counterfactual Explanations}, + year = {2023}, + isbn = {9798400701030}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + url = {https://doi.org/10.1145/3580305.3599343}, + doi = {10.1145/3580305.3599343}, + booktitle = {Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, + pages = {2211–2222}, + numpages = {12}, + keywords = {algorithmic recourse, explainable ai, privacy}, + location = {Long Beach, CA, USA}, + series = {KDD '23} } - @inproceedings{guo2021counternet, - title={CounterNet: End-to-End Training of Prediction Aware Counterfactual Explanation}, - author={Guo, Hangzhi and Nguyen, Thanh and Yadav, Amulya}, - booktitle={Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD '23), August 6--10, 2023, Long Beach, CA, USA}, - year={2023}, - doi={10.1145/3580305.3599290} + author = {Guo, Hangzhi and Nguyen, Thanh H. and Yadav, Amulya}, + title = {CounterNet: End-to-End Training of Prediction Aware Counterfactual Explanations}, + year = {2023}, + isbn = {9798400701030}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + url = {https://doi.org/10.1145/3580305.3599290}, + doi = {10.1145/3580305.3599290}, + abstract = {This work presents CounterNet, a novel end-to-end learning framework which integrates Machine Learning (ML) model training and the generation of corresponding counterfactual (CF) explanations into a single end-to-end pipeline. Counterfactual explanations offer a contrastive case, i.e., they attempt to find the smallest modification to the feature values of an instance that changes the prediction of the ML model on that instance to a predefined output. Prior techniques for generating CF explanations suffer from two major limitations: (i) all of them are post-hoc methods designed for use with proprietary ML models --- as a result, their procedure for generating CF explanations is uninformed by the training of the ML model, which leads to misalignment between model predictions and explanations; and (ii) most of them rely on solving separate time-intensive optimization problems to find CF explanations for each input data point (which negatively impacts their runtime). This work makes a novel departure from the prevalent post-hoc paradigm (of generating CF explanations) by presenting CounterNet, an end-to-end learning framework which integrates predictive model training and the generation of counterfactual (CF) explanations into a single pipeline. Unlike post-hoc methods, CounterNet enables the optimization of the CF explanation generation only once together with the predictive model. We adopt a block-wise coordinate descent procedure which helps in effectively training CounterNet's network. Our extensive experiments on multiple real-world datasets show that CounterNet generates high-quality predictions, and consistently achieves 100\% CF validity and low proximity scores (thereby achieving a well-balanced cost-invalidity trade-off) for any new input instance, and runs 3X faster than existing state-of-the-art baselines.}, + booktitle = {Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, + pages = {577–589}, + numpages = {13}, + keywords = {algorithmic recourse, counterfactual explanation, explainable artificial intelligence, interpretability}, + location = {Long Beach, CA, USA}, + series = {KDD '23} } @inproceedings{guo2023rocoursenet, - title={RoCourseNet: Robust Training of a Prediction Aware Recourse Model}, - author={Guo, Hangzhi and Jia, Feiran and Chen, Jinghui and Squicciarini, Anna and Yadav, Amulya}, - booktitle={Proceedings of the 32nd ACM International Conference on Information and Knowledge Management}, - pages={619--628}, - year={2023} -} -@article{li2018should, - title={“Should This Loan be Approved or Denied?”: A Large Dataset with Class Assignment Guidelines}, - author={Li, Min and Mickel, Amy and Taylor, Stanley}, - journal={Journal of Statistics Education}, - volume={26}, - number={1}, - pages={55--66}, - year={2018}, - publisher={Taylor \& Francis} -} - - -@misc{Dua:2019 , -author = "Dua, Dheeru and Graff, Casey", -year = "2017", -title = "{UCI} Machine Learning Repository", -url = "http://archive.ics.uci.edu/ml", -institution = "University of California, Irvine, School of Information and Computer Sciences" } - - -@article{dhurandhar2019model, - title={Model agnostic contrastive explanations for structured data}, - author={Dhurandhar, Amit and Pedapati, Tejaswini and Balakrishnan, Avinash and Chen, Pin-Yu and Shanmugam, Karthikeyan and Puri, Ruchir}, - journal={arXiv preprint arXiv:1906.00117}, - year={2019} -} - -@inproceedings{leino2018influence, - title={Influence-directed explanations for deep convolutional networks}, - author={Leino, Klas and Sen, Shayak and Datta, Anupam and Fredrikson, Matt and Li, Linyi}, - booktitle={2018 IEEE International Test Conference (ITC)}, - pages={1--8}, - year={2018}, - organization={IEEE} -} - -@article{dhamdhere2018important, - title={How important is a neuron?}, - author={Dhamdhere, Kedar and Sundararajan, Mukund and Yan, Qiqi}, - journal={arXiv preprint arXiv:1805.12233}, - year={2018} -} - -@article{grefenstette2019generalized, - title={Generalized Inner Loop Meta-Learning}, - author={Grefenstette, Edward and Amos, Brandon and Yarats, Denis and Htut, Phu Mon and Molchanov, Artem and Meier, Franziska and Kiela, Douwe and Cho, Kyunghyun and Chintala, Soumith}, - journal={arXiv preprint arXiv:1910.01727}, - year={2019} -} - -@article{frostig2018jax, - title={Compiling machine learning programs via high-level tracing}, - author={Frostig, Roy and Johnson, Matthew James and Leary, Chris}, - journal={Systems for Machine Learning}, - volume={4}, - number={9}, - year={2018}, - publisher={SysML} -} - -@software{jax2018github, - author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James Johnson and Chris Leary and Dougal Maclaurin and George Necula and Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao Zhang}, - title = {{JAX}: composable transformations of {P}ython+{N}um{P}y programs}, - url = {http://github.com/google/jax}, - version = {0.4.10}, - year = {2018}, +author = {Guo, Hangzhi and Jia, Feiran and Chen, Jinghui and Squicciarini, Anna and Yadav, Amulya}, +title = {RoCourseNet: Robust Training of a Prediction Aware Recourse Model}, +year = {2023}, +isbn = {9798400701245}, +publisher = {Association for Computing Machinery}, +address = {New York, NY, USA}, +url = {https://doi.org/10.1145/3583780.3615040}, +doi = {10.1145/3583780.3615040}, +abstract = {Counterfactual (CF) explanations for machine learning (ML) models are preferred by end-users, as they explain the predictions of ML models by providing a recourse (or contrastive) case to individuals who are adversely impacted by predicted outcomes. Existing CF explanation methods generate recourses under the assumption that the underlying target ML model remains stationary over time. However, due to commonly occurring distributional shifts in training data, ML models constantly get updated in practice, which might render previously generated recourses invalid and diminish end-users trust in our algorithmic framework. To address this problem, we propose RoCourseNet, a training framework that jointly optimizes predictions and recourses that are robust to future data shifts. This work contains four key contributions: (1) We formulate the robust recourse generation problem as a tri-level optimization problem which consists of two sub-problems: (i) a bi-level problem that finds the worst-case adversarial shift in the training data, and (ii) an outer minimization problem to generate robust recourses against this worst-case shift. (2) We leverage adversarial training to solve this tri-level optimization problem by: (i) proposing a novel virtual data shift (VDS) algorithm to find worst-case shifted ML models via explicitly considering the worst-case data shift in the training dataset, and (ii) a block-wise coordinate descent procedure to optimize for prediction and corresponding robust recourses. (3) We evaluate RoCourseNet's performance on three real-world datasets, and show that RoCourseNet consistently achieves more than 96\% robust validity and outperforms state-of-the-art baselines by at least 10\% in generating robust CF explanations. (4) Finally, we generalize the RoCourseNet framework to accommodate any parametric post-hoc methods for improving robust validity.}, +booktitle = {Proceedings of the 32nd ACM International Conference on Information and Knowledge Management}, +pages = {619–628}, +numpages = {10}, +keywords = {interpretability, explainable artificial intelligence, counterfactual explanation, algorithmic recourse, adversarial machine learning}, +location = {Birmingham, United Kingdom}, +series = {CIKM '23} } - -@software{haiku2020github, - author = {Tom Hennigan and Trevor Cai and Tamara Norman and Igor Babuschkin}, - title = {{H}aiku: {S}onnet for {JAX}}, - url = {http://github.com/deepmind/dm-haiku}, - version = {0.0.9}, - year = {2020}, +@article{pawelczyk2021carla, + title={CARLA: A Python Library to Benchmark Algorithmic Recourse and Counterfactual Explanation Algorithms}, + author={Martin Pawelczyk and Sascha Bielawski and Jan van den Heuvel and Tobias Richter and Gjergji Kasneci}, + journal={ArXiv}, + year={2021}, + volume={abs/2108.00783}, + url={https://api.semanticscholar.org/CorpusID:236772193} } - @article{klaise2021alibi, author = {Janis Klaise and Arnaud Van Looveren and Giovanni Vacanti and Alexandru Coca}, title = {Alibi Explain: Algorithms for Explaining Machine Learning Models}, @@ -1240,222 +201,87 @@ @article{klaise2021alibi pages = {1-7}, url = {http://jmlr.org/papers/v22/21-0017.html} } - -@article{pawelczyk2021carla, - title={CARLA: A Python Library to Benchmark Algorithmic Recourse and Counterfactual Explanation Algorithms}, - author={Pawelczyk, Martin and Bielawski, Sascha and van den Heuvel, Johannes and Richter, Tobias and Kasneci, Gjergji}, - year={2021}, - journal={Advances in Neural Information Processing Systems Track on Datasets and Benchmarks}, -} - -@inproceedings{alexnet2012, - author = {Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E}, - booktitle = {Advances in Neural Information Processing Systems}, - editor = {F. Pereira and C.J. Burges and L. Bottou and K.Q. Weinberger}, - pages = {}, - publisher = {Curran Associates, Inc.}, - title = {ImageNet Classification with Deep Convolutional Neural Networks}, - url = {https://proceedings.neurips.cc/paper_files/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf}, - volume = {25}, - year = {2012} -} - -@inproceedings{imagenet2009, - author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Kai Li and Li Fei-Fei}, - booktitle={2009 IEEE Conference on Computer Vision and Pattern Recognition}, - title={ImageNet: A large-scale hierarchical image database}, - year={2009}, - volume={}, - number={}, - pages={248-255}, - doi={10.1109/CVPR.2009.5206848} -} -@inproceedings{devlin2019bert, - title = "{BERT}: Pre-training of Deep Bidirectional Transformers for Language Understanding", - author = "Devlin, Jacob and - Chang, Ming-Wei and - Lee, Kenton and - Toutanova, Kristina", - booktitle = "Proceedings of the 2019 Conference of the North {A}merican Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)", - month = jun, - year = "2019", - address = "Minneapolis, Minnesota", - publisher = "Association for Computational Linguistics", - url = "https://aclanthology.org/N19-1423", - doi = "10.18653/v1/N19-1423", - pages = "4171--4186", -} -@article{brown2020language, - title={Language models are few-shot learners}, - author={Brown, Tom and Mann, Benjamin and Ryder, Nick and Subbiah, Melanie and Kaplan, Jared D and Dhariwal, Prafulla and Neelakantan, Arvind and Shyam, Pranav and Sastry, Girish and Askell, Amanda and others}, - journal={Advances in neural information processing systems}, - volume={33}, - pages={1877--1901}, - year={2020} -} - -@article{ouyang2022training, - title={Training language models to follow instructions with human feedback}, - author={Ouyang, Long and Wu, Jeffrey and Jiang, Xu and Almeida, Diogo and Wainwright, Carroll and Mishkin, Pamela and Zhang, Chong and Agarwal, Sandhini and Slama, Katarina and Ray, Alex and others}, - journal={Advances in Neural Information Processing Systems}, - volume={35}, - pages={27730--27744}, - year={2022} -} - -@article{hopkins1999spambase, - title={Spambase data set}, - author={Hopkins, Mark and Reeber, Erik and Forman, George and Suermondt, Jaap}, - journal={Hewlett-Packard Labs}, - volume={1}, - number={7}, - year={1999} -} - -% ozone -@article{zhang2008forecasting, - title={Forecasting skewed biased stochastic ozone days: analyses, solutions and beyond}, - author={Zhang, Kun and Fan, Wei}, - journal={Knowledge and Information Systems}, - volume={14}, - pages={299--326}, - year={2008}, - publisher={Springer} -} - -% qsar -@article{mansouri2013quantitative, - title={Quantitative structure--activity relationship models for ready biodegradability of chemicals}, - author={Mansouri, Kamel and Ringsted, Tine and Ballabio, Davide and Todeschini, Roberto and Consonni, Viviana}, - journal={Journal of chemical information and modeling}, - volume={53}, - number={4}, - pages={867--878}, - year={2013}, - publisher={ACS Publications} +@misc{kohavi1996uci, + title={UCI Machine Learning Repository: Adult Data Set}, + author={Kohavi, R and Becker, B}, + year={1996}, + journal={1996-05-01)[2014-10-01]. http: ff archive, ies. uci. edu/ml/data-sets/Adult} } - -@misc{bioresponse, - author = {Ben Hamner, dcthompson, Jorg}, - title = {Predicting a Biological Response}, - publisher = {Kaggle}, - year = {2012}, - url = {https://kaggle.com/competitions/bioresponse} +@software{jax2018github, + author = {James Bradbury and Roy Frostig and Peter Hawkins and Matthew James Johnson and Chris Leary and Dougal Maclaurin and George Necula and Adam Paszke and Jake Vander{P}las and Skye Wanderman-{M}ilne and Qiao Zhang}, + title = {{JAX}: composable transformations of {P}ython+{N}um{P}y programs}, + url = {http://github.com/google/jax}, + version = {0.4.10}, + year = {2018}, } - -@misc{churn, - author = {BlastChar}, - title = {Telco Customer Churn}, - publisher = {Kaggle}, - year = {2019}, - url = {https://www.kaggle.com/datasets/blastchar/telco-customer-churn} +@inproceedings{frostig2018jax, + title = {Compiling machine learning programs via high-level tracing}, + author = {Roy Frostig and Matthew Johnson and Chris Leary}, + year = {2018}, + URL = {https://mlsys.org/Conferences/doc/2018/146.pdf} } - -@misc{road, - author = {Leo Grin}, - title = {Road Safety Data}, - publisher = {OpenML}, - year = {2023}, - url = {https://www.openml.org/search?type=data&sort=runs&id=45038&status=active} +@article{laugel2017inverse, + title={Inverse classification for comparison-based interpretability in machine learning}, + author={Laugel, Thibault and Lesot, Marie-Jeanne and Marsala, Christophe and Renard, Xavier and Detyniecki, Marcin}, + journal={arXiv preprint arXiv:1712.08443}, + year={2017} } - -@article{ding2021retiring, - title={Retiring Adult: New Datasets for Fair Machine Learning}, - author={Ding, Frances and Hardt, Moritz and Miller, John and Schmidt, Ludwig}, - journal={Advances in Neural Information Processing Systems}, - volume={34}, - year={2021} +@article{van2019interpretable, + author = {Van Looveren, Arnaud and Klaise, Janis}, + title = {Interpretable Counterfactual Explanations Guided by Prototypes}, + year = {2021}, + isbn = {978-3-030-86519-1}, + publisher = {Springer-Verlag}, + address = {Berlin, Heidelberg}, + url = {https://doi.org/10.1007/978-3-030-86520-7_40}, + doi = {10.1007/978-3-030-86520-7_40}, + abstract = {We propose a fast, model agnostic method for finding interpretable counterfactual explanations of classifier predictions by using class prototypes. We show that class prototypes, obtained using either an encoder or through class specific k-d trees, significantly speed up the search for counterfactual instances and result in more interpretable explanations. We quantitatively evaluate interpretability of the generated counterfactuals to illustrate the effectiveness of our method on an image and tabular dataset, respectively MNIST and Breast Cancer Wisconsin (Diagnostic). Additionally, we propose a principled approach to handle categorical variables and illustrate our method on the Adult (Census) dataset. Our method also eliminates the computational bottleneck that arises because of numerical gradient evaluation for black box models.}, + booktitle = {Machine Learning and Knowledge Discovery in Databases. Research Track: European Conference, ECML PKDD 2021, Bilbao, Spain, September 13–17, 2021, Proceedings, Part II}, + pages = {650–665}, + numpages = {16}, + keywords = {Interpretation, Transparency/Explainability, Counterfactual explanations}, + location = {Bilbao, Spain} } - -@inproceedings{ -antoran2021clue, +@inproceedings{pawelczyk2020learning, + author = {Pawelczyk, Martin and Broelemann, Klaus and Kasneci, Gjergji}, + title = {Learning Model-Agnostic Counterfactual Explanations for Tabular Data}, + year = {2020}, + isbn = {9781450370233}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + url = {https://doi.org/10.1145/3366423.3380087}, + doi = {10.1145/3366423.3380087}, + abstract = {Counterfactual explanations can be obtained by identifying the smallest change made to an input vector to influence a prediction in a positive way from a user’s viewpoint; for example, from ’loan rejected’ to ’awarded’ or from ’high risk of cardiovascular disease’ to ’low risk’. Previous approaches would not ensure that the produced counterfactuals be proximate (i.e., not local outliers) and connected to regions with substantial data density (i.e., close to correctly classified observations), two requirements known as counterfactual faithfulness. Our contribution is twofold. First, drawing ideas from the manifold learning literature, we develop a framework, called C-CHVAE, that generates faithful counterfactuals. Second, we suggest to complement the catalog of counterfactual quality measures using a criterion to quantify the degree of difficulty for a certain counterfactual suggestion. Our real world experiments suggest that faithful counterfactuals come at the cost of higher degrees of difficulty.}, + booktitle = {Proceedings of The Web Conference 2020}, + pages = {3126–3132}, + numpages = {7}, + keywords = {Counterfactual explanations, Interpretability, Transparency}, + location = {Taipei, Taiwan}, + series = {WWW '20} +} +@inproceedings{antoran2021clue, title={Getting a {\{}CLUE{\}}: A Method for Explaining Uncertainty Estimates}, author={Javier Antoran and Umang Bhatt and Tameem Adel and Adrian Weller and Jos{\'e} Miguel Hern{\'a}ndez-Lobato}, booktitle={International Conference on Learning Representations}, year={2021}, url={https://openreview.net/forum?id=XSLF1XFq5h} } - - -@article{joshi2019towards, - title={Towards realistic individual recourse and actionable explanations in black-box decision making systems}, - author={Joshi, Shalmali and Koyejo, Oluwasanmi and Vijitbenjaronk, Warut and Kim, Been and Ghosh, Joydeep}, - journal={arXiv preprint arXiv:1907.09615}, - year={2019} -} - -@inproceedings{nemirovsky2022countergan, - title={CounteRGAN: Generating counterfactuals for real-time recourse and interpretability using residual GANs}, - author={Nemirovsky, Daniel and Thiebaut, Nicolas and Xu, Ye and Gupta, Abhishek}, - booktitle={Uncertainty in Artificial Intelligence}, - pages={1488--1497}, - year={2022}, - organization={PMLR} -} - -@inproceedings{rolf2022rax, -title = {Rax: Composable Learning-to-Rank using JAX}, -author = {Rolf Jagerman and Xuanhui Wang and Honglei Zhuang and Zhen Qin and Mike Bendersky and Marc Najork}, -year = {2022}, -booktitle = {Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, -pages = {3051–3060} -} - -@inproceedings{dehghani2022scenic, - title={Scenic: A JAX library for computer vision research and beyond}, - author={Dehghani, Mostafa and Gritsenko, Alexey and Arnab, Anurag and Minderer, Matthias and Tay, Yi}, - booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, - pages={21393--21398}, - year={2022} -} - -@article{subramani2021enabling, - title={Enabling fast differentially private sgd via just-in-time compilation and vectorization}, - author={Subramani, Pranav and Vadivelu, Nicholas and Kamath, Gautam}, - journal={Advances in Neural Information Processing Systems}, - volume={34}, - pages={26409--26421}, - year={2021} -} - -@software{deepmind2020jax, - title = {The {D}eep{M}ind {JAX} {E}cosystem}, - author = {Babuschkin, Igor and Baumli, Kate and Bell, Alison and Bhupatiraju, Surya and Bruce, Jake and Buchlovsky, Peter and Budden, David and Cai, Trevor and Clark, Aidan and Danihelka, Ivo and Dedieu, Antoine and Fantacci, Claudio and Godwin, Jonathan and Jones, Chris and Hemsley, Ross and Hennigan, Tom and Hessel, Matteo and Hou, Shaobo and Kapturowski, Steven and Keck, Thomas and Kemaev, Iurii and King, Michael and Kunesch, Markus and Martens, Lena and Merzic, Hamza and Mikulik, Vladimir and Norman, Tamara and Papamakarios, George and Quan, John and Ring, Roman and Ruiz, Francisco and Sanchez, Alvaro and Schneider, Rosalia and Sezener, Eren and Spencer, Stephen and Srinivasan, Srivatsan and Stokowiec, Wojciech and Wang, Luyu and Zhou, Guangyao and Viola, Fabio}, - url = {http://github.com/deepmind}, - year = {2020}, -} - -@phdthesis{kidger2021on, - title={{O}n {N}eural {D}ifferential {E}quations}, - author={Patrick Kidger}, - year={2021}, - school={University of Oxford}, -} - -@article{phan2019composable, - title={Composable Effects for Flexible and Accelerated Probabilistic Programming in NumPyro}, - author={Phan, Du and Pradhan, Neeraj and Jankowiak, Martin}, - journal={arXiv preprint arXiv:1912.11554}, +@article{mahajan2019preserving, + title={Preserving causal constraints in counterfactual explanations for machine learning classifiers}, + author={Mahajan, Divyat and Tan, Chenhao and Sharma, Amit}, + journal={arXiv preprint arXiv:1912.03277}, year={2019} } - -@article{laugel2017inverse, - title={Inverse classification for comparison-based interpretability in machine learning}, - author={Laugel, Thibault and Lesot, Marie-Jeanne and Marsala, Christophe and Renard, Xavier and Detyniecki, Marcin}, - journal={arXiv preprint arXiv:1712.08443}, - year={2017} -} - -@software{altmeyer2022CounterfactualExplanations, - author = {Patrick Altmeyer}, - title = {{CounterfactualExplanations.jl - a Julia package for Counterfactual Explanations and Algorithmic Recourse}}, - url = {https://github.com/juliatrustworthyai/CounterfactualExplanations.jl}, - year = {2022} +@article{ding2021retiring, +author = {Ding, Frances and Hardt, Moritz and Miller, John and Schmidt, Ludwig}, +title = {Retiring adult: new datasets for fair machine learning}, +year = {2024}, +isbn = {9781713845393}, +publisher = {Curran Associates Inc.}, +address = {Red Hook, NY, USA}, +abstract = {Although the fairness community has recognized the importance of data, researchers in the area primarily rely on UCI Adult when it comes to tabular data. Derived from a 1994 US Census survey, this dataset has appeared in hundreds of research papers where it served as the basis for the development and comparison of many algorithmic fairness interventions. We reconstruct a superset of the UCI Adult data from available US Census sources and reveal idiosyncrasies of the UCI Adult dataset that limit its external validity. Our primary contribution is a suite of new datasets derived from US Census surveys that extend the existing data ecosystem for research on fair machine learning. We create prediction tasks relating to income, employment, health, transportation, and housing. The data span multiple years and all states of the United States, allowing researchers to study temporal shift and geographic variation. We highlight a broad initial sweep of new empirical insights relating to trade-offs between fairness criteria, performance of algorithmic interventions, and the role of distribution shift based on our new datasets. Our findings inform ongoing debates, challenge some existing narratives, and point to future research directions.}, +booktitle = {Proceedings of the 35th International Conference on Neural Information Processing Systems}, +articleno = {496}, +numpages = {13}, +series = {NIPS '21} } -@inproceedings{vo2023feature, - title={Feature-based Learning for Diverse and Privacy-Preserving Counterfactual Explanations}, - author={Vo, Vy and Le, Trung and Nguyen, Van and Zhao, He and Bonilla, Edwin V and Haffari, Gholamreza and Phung, Dinh}, - booktitle={Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining}, - pages={2211--2222}, - year={2023} -} \ No newline at end of file