diff --git a/paper/paper.tex b/paper/paper.tex index 6c8231d1dd09662c81c1080af2e123838f98a372..4184aaa81fdabb28fef826ec3d180b87cbce3286 100644 --- a/paper/paper.tex +++ b/paper/paper.tex @@ -1,589 +1,569 @@ -\documentclass{article} - - -% if you need to pass options to natbib, use, e.g.: -% \PassOptionsToPackage{numbers, compress}{natbib} -% before loading neurips_2022 - - -% ready for submission -\usepackage{neurips_2022} - - -% to compile a preprint version, e.g., for submission to arXiv, add add the -% [preprint] option: -% \usepackage[preprint]{neurips_2022} - - -% to compile a camera-ready version, add the [final] option, e.g.: -% \usepackage[final]{neurips_2022} - - -% to avoid loading the natbib package, add option nonatbib: -% \usepackage[nonatbib]{neurips_2022} - - -\usepackage[utf8]{inputenc} % allow utf-8 input -\usepackage[T1]{fontenc} % use 8-bit T1 fonts -\usepackage{hyperref} % hyperlinks -\usepackage{url} % simple URL typesetting -\usepackage{booktabs} % professional-quality tables -\usepackage{amsfonts} % blackboard math symbols -\usepackage{nicefrac} % compact symbols for 1/2, etc. -\usepackage{microtype} % microtypography -\usepackage{xcolor} % colors - - -\title{High-Fidelity Counterfactual Explanations through Conformal Prediction} - - -% The \author macro works with any number of authors. There are two commands -% used to separate the names and addresses of multiple authors: \And and \AND. -% -% Using \And between authors leaves it to LaTeX to determine where to break the -% lines. Using \AND forces a line break at that point. So, if LaTeX puts 3 of 4 -% authors names on the first line, and the last on the second line, try using -% \AND instead of \And before the third author name. - - -\author{% - David S.~Hippocampus\thanks{Use footnote for providing further information - about author (webpage, alternative address)---\emph{not} for acknowledging - funding agencies.} \\ - Department of Computer Science\\ - Cranberry-Lemon University\\ - Pittsburgh, PA 15213 \\ - \texttt{hippo@cs.cranberry-lemon.edu} \\ - % examples of more authors - % \And - % Coauthor \\ - % Affiliation \\ - % Address \\ - % \texttt{email} \\ - % \AND - % Coauthor \\ - % Affiliation \\ - % Address \\ - % \texttt{email} \\ - % \And - % Coauthor \\ - % Affiliation \\ - % Address \\ - % \texttt{email} \\ - % \And - % Coauthor \\ - % Affiliation \\ - % Address \\ - % \texttt{email} \\ -} - - -\begin{document} - - -\maketitle - - -\begin{abstract} - The abstract paragraph should be indented \nicefrac{1}{2}~inch (3~picas) on - both the left- and right-hand margins. Use 10~point type, with a vertical - spacing (leading) of 11~points. The word \textbf{Abstract} must be centered, - bold, and in point size 12. Two line spaces precede the abstract. The abstract - must be limited to one paragraph. -\end{abstract} - - -\section{Submission of papers to NeurIPS 2022} - - -Please read the instructions below carefully and follow them faithfully. - - -\subsection{Style} - - -Papers to be submitted to NeurIPS 2022 must be prepared according to the -instructions presented here. Papers may only be up to {\bf nine} pages long, -including figures. Additional pages \emph{containing only acknowledgments and -references} are allowed. Papers that exceed the page limit will not be -reviewed, or in any other way considered for presentation at the conference. - - -The margins in 2022 are the same as those in 2007, which allow for $\sim$$15\%$ -more words in the paper compared to earlier years. - - -Authors are required to use the NeurIPS \LaTeX{} style files obtainable at the -NeurIPS website as indicated below. Please make sure you use the current files -and not previous versions. Tweaking the style files may be grounds for -rejection. - - -\subsection{Retrieval of style files} - - -The style files for NeurIPS and other conference information are available on -the World Wide Web at -\begin{center} - \url{http://www.neurips.cc/} -\end{center} -The file \verb+neurips_2022.pdf+ contains these instructions and illustrates the -various formatting requirements your NeurIPS paper must satisfy. - - -The only supported style file for NeurIPS 2022 is \verb+neurips_2022.sty+, -rewritten for \LaTeXe{}. \textbf{Previous style files for \LaTeX{} 2.09, - Microsoft Word, and RTF are no longer supported!} - - -The \LaTeX{} style file contains three optional arguments: \verb+final+, which -creates a camera-ready copy, \verb+preprint+, which creates a preprint for -submission to, e.g., arXiv, and \verb+nonatbib+, which will not load the -\verb+natbib+ package for you in case of package clash. - - -\paragraph{Preprint option} -If you wish to post a preprint of your work online, e.g., on arXiv, using the -NeurIPS style, please use the \verb+preprint+ option. This will create a -nonanonymized version of your work with the text ``Preprint. Work in progress.'' -in the footer. This version may be distributed as you see fit. Please \textbf{do - not} use the \verb+final+ option, which should \textbf{only} be used for -papers accepted to NeurIPS. - - -At submission time, please omit the \verb+final+ and \verb+preprint+ -options. This will anonymize your submission and add line numbers to aid -review. Please do \emph{not} refer to these line numbers in your paper as they -will be removed during generation of camera-ready copies. - - -The file \verb+neurips_2022.tex+ may be used as a ``shell'' for writing your -paper. All you have to do is replace the author, title, abstract, and text of -the paper with your own. - - -The formatting instructions contained in these style files are summarized in -Sections \ref{gen_inst}, \ref{headings}, and \ref{others} below. - - -\section{General formatting instructions} -\label{gen_inst} - - -The text must be confined within a rectangle 5.5~inches (33~picas) wide and -9~inches (54~picas) long. The left margin is 1.5~inch (9~picas). Use 10~point -type with a vertical spacing (leading) of 11~points. Times New Roman is the -preferred typeface throughout, and will be selected for you by default. -Paragraphs are separated by \nicefrac{1}{2}~line space (5.5 points), with no -indentation. - - -The paper title should be 17~point, initial caps/lower case, bold, centered -between two horizontal rules. The top rule should be 4~points thick and the -bottom rule should be 1~point thick. Allow \nicefrac{1}{4}~inch space above and -below the title to rules. All pages should start at 1~inch (6~picas) from the -top of the page. - - -For the final version, authors' names are set in boldface, and each name is -centered above the corresponding address. The lead author's name is to be listed -first (left-most), and the co-authors' names (if different address) are set to -follow. If there is only one co-author, list both author and co-author side by -side. - - -Please pay special attention to the instructions in Section \ref{others} -regarding figures, tables, acknowledgments, and references. - - -\section{Headings: first level} -\label{headings} - - -All headings should be lower case (except for first word and proper nouns), -flush left, and bold. - - -First-level headings should be in 12-point type. - - -\subsection{Headings: second level} - - -Second-level headings should be in 10-point type. - - -\subsubsection{Headings: third level} - - -Third-level headings should be in 10-point type. - - -\paragraph{Paragraphs} - - -There is also a \verb+\paragraph+ command available, which sets the heading in -bold, flush left, and inline with the text, with the heading followed by 1\,em -of space. - - -\section{Citations, figures, tables, references} -\label{others} - - -These instructions apply to everyone. - - -\subsection{Citations within the text} - - -The \verb+natbib+ package will be loaded for you by default. Citations may be -author/year or numeric, as long as you maintain internal consistency. As to the -format of the references themselves, any style is acceptable as long as it is -used consistently. - - -The documentation for \verb+natbib+ may be found at -\begin{center} - \url{http://mirrors.ctan.org/macros/latex/contrib/natbib/natnotes.pdf} -\end{center} -Of note is the command \verb+\citet+, which produces citations appropriate for -use in inline text. For example, -\begin{verbatim} - \citet{hasselmo} investigated\dots -\end{verbatim} -produces -\begin{quote} - Hasselmo, et al.\ (1995) investigated\dots -\end{quote} - - -If you wish to load the \verb+natbib+ package with options, you may add the -following before loading the \verb+neurips_2022+ package: -\begin{verbatim} - \PassOptionsToPackage{options}{natbib} -\end{verbatim} - - -If \verb+natbib+ clashes with another package you load, you can add the optional -argument \verb+nonatbib+ when loading the style file: -\begin{verbatim} - \usepackage[nonatbib]{neurips_2022} -\end{verbatim} - - -As submission is double blind, refer to your own published work in the third -person. That is, use ``In the previous work of Jones et al.\ [4],'' not ``In our -previous work [4].'' If you cite your other papers that are not widely available -(e.g., a journal paper under review), use anonymous author names in the -citation, e.g., an author of the form ``A.\ Anonymous.'' - - -\subsection{Footnotes} - - -Footnotes should be used sparingly. If you do require a footnote, indicate -footnotes with a number\footnote{Sample of the first footnote.} in the -text. Place the footnotes at the bottom of the page on which they appear. -Precede the footnote with a horizontal rule of 2~inches (12~picas). - - -Note that footnotes are properly typeset \emph{after} punctuation -marks.\footnote{As in this example.} - - -\subsection{Figures} - - -\begin{figure} - \centering - \fbox{\rule[-.5cm]{0cm}{4cm} \rule[-.5cm]{4cm}{0cm}} - \caption{Sample figure caption.} -\end{figure} - - -All artwork must be neat, clean, and legible. Lines should be dark enough for -purposes of reproduction. The figure number and caption always appear after the -figure. Place one line space before the figure caption and one line space after -the figure. The figure caption should be lower case (except for first word and -proper nouns); figures are numbered consecutively. - - -You may use color figures. However, it is best for the figure captions and the -paper body to be legible if the paper is printed in either black/white or in -color. - - -\subsection{Tables} - - -All tables must be centered, neat, clean and legible. The table number and -title always appear before the table. See Table~\ref{sample-table}. - - -Place one line space before the table title, one line space after the -table title, and one line space after the table. The table title must -be lower case (except for first word and proper nouns); tables are -numbered consecutively. - - -Note that publication-quality tables \emph{do not contain vertical rules.} We -strongly suggest the use of the \verb+booktabs+ package, which allows for -typesetting high-quality, professional tables: -\begin{center} - \url{https://www.ctan.org/pkg/booktabs} -\end{center} -This package was used to typeset Table~\ref{sample-table}. - - -\begin{table} - \caption{Sample table title} - \label{sample-table} - \centering - \begin{tabular}{lll} - \toprule - \multicolumn{2}{c}{Part} \\ - \cmidrule(r){1-2} - Name & Description & Size ($\mu$m) \\ - \midrule - Dendrite & Input terminal & $\sim$100 \\ - Axon & Output terminal & $\sim$10 \\ - Soma & Cell body & up to $10^6$ \\ - \bottomrule - \end{tabular} -\end{table} - - -\section{Final instructions} - - -Do not change any aspects of the formatting parameters in the style files. In -particular, do not modify the width or length of the rectangle the text should -fit into, and do not change font sizes (except perhaps in the -\textbf{References} section; see below). Please note that pages should be -numbered. - - -\section{Preparing PDF files} - - -Please prepare submission files with paper size ``US Letter,'' and not, for -example, ``A4.'' - - -Fonts were the main cause of problems in the past years. Your PDF file must only -contain Type 1 or Embedded TrueType fonts. Here are a few instructions to -achieve this. - - -\begin{itemize} - - -\item You should directly generate PDF files using \verb+pdflatex+. - - -\item You can check which fonts a PDF files uses. In Acrobat Reader, select the - menu Files$>$Document Properties$>$Fonts and select Show All Fonts. You can - also use the program \verb+pdffonts+ which comes with \verb+xpdf+ and is - available out-of-the-box on most Linux machines. - - -\item The IEEE has recommendations for generating PDF files whose fonts are also - acceptable for NeurIPS. Please see - \url{http://www.emfield.org/icuwb2010/downloads/IEEE-PDF-SpecV32.pdf} - - -\item \verb+xfig+ "patterned" shapes are implemented with bitmap fonts. Use - "solid" shapes instead. - - -\item The \verb+\bbold+ package almost always uses bitmap fonts. You should use - the equivalent AMS Fonts: -\begin{verbatim} - \usepackage{amsfonts} -\end{verbatim} -followed by, e.g., \verb+\mathbb{R}+, \verb+\mathbb{N}+, or \verb+\mathbb{C}+ -for $\mathbb{R}$, $\mathbb{N}$ or $\mathbb{C}$. You can also use the following -workaround for reals, natural and complex: -\begin{verbatim} - \newcommand{\RR}{I\!\!R} %real numbers - \newcommand{\Nat}{I\!\!N} %natural numbers - \newcommand{\CC}{I\!\!\!\!C} %complex numbers -\end{verbatim} -Note that \verb+amsfonts+ is automatically loaded by the \verb+amssymb+ package. - - -\end{itemize} - - -If your file contains type 3 fonts or non embedded TrueType fonts, we will ask -you to fix it. - - -\subsection{Margins in \LaTeX{}} - - -Most of the margin problems come from figures positioned by hand using -\verb+\special+ or other commands. We suggest using the command -\verb+\includegraphics+ from the \verb+graphicx+ package. Always specify the -figure width as a multiple of the line width as in the example below: -\begin{verbatim} - \usepackage[pdftex]{graphicx} ... - \includegraphics[width=0.8\linewidth]{myfile.pdf} -\end{verbatim} -See Section 4.4 in the graphics bundle documentation -(\url{http://mirrors.ctan.org/macros/latex/required/graphics/grfguide.pdf}) - - -A number of width problems arise when \LaTeX{} cannot properly hyphenate a -line. Please give LaTeX hyphenation hints using the \verb+\-+ command when -necessary. - - -\begin{ack} -Use unnumbered first level headings for the acknowledgments. All acknowledgments -go at the end of the paper before the list of references. Moreover, you are required to declare -funding (financial activities supporting the submitted work) and competing interests (related financial activities outside the submitted work). -More information about this disclosure can be found at: \url{https://neurips.cc/Conferences/2022/PaperInformation/FundingDisclosure}. - - -Do {\bf not} include this section in the anonymized submission, only in the final paper. You can use the \texttt{ack} environment provided in the style file to autmoatically hide this section in the anonymized submission. -\end{ack} - - -\section*{References} - - -References follow the acknowledgments. Use unnumbered first-level heading for -the references. Any choice of citation style is acceptable as long as you are -consistent. It is permissible to reduce the font size to \verb+small+ (9 point) -when listing the references. -Note that the Reference section does not count towards the page limit. -\medskip - - -{ -\small - - -[1] Alexander, J.A.\ \& Mozer, M.C.\ (1995) Template-based algorithms for -connectionist rule extraction. In G.\ Tesauro, D.S.\ Touretzky and T.K.\ Leen -(eds.), {\it Advances in Neural Information Processing Systems 7}, -pp.\ 609--616. Cambridge, MA: MIT Press. - - -[2] Bower, J.M.\ \& Beeman, D.\ (1995) {\it The Book of GENESIS: Exploring - Realistic Neural Models with the GEneral NEural SImulation System.} New York: -TELOS/Springer--Verlag. - - -[3] Hasselmo, M.E., Schnell, E.\ \& Barkai, E.\ (1995) Dynamics of learning and -recall at excitatory recurrent synapses and cholinergic modulation in rat -hippocampal region CA3. {\it Journal of Neuroscience} {\bf 15}(7):5249-5262. -} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -\section*{Checklist} - - -%%% BEGIN INSTRUCTIONS %%% -The checklist follows the references. Please -read the checklist guidelines carefully for information on how to answer these -questions. For each question, change the default \answerTODO{} to \answerYes{}, -\answerNo{}, or \answerNA{}. You are strongly encouraged to include a {\bf -justification to your answer}, either by referencing the appropriate section of -your paper or providing a brief inline description. For example: -\begin{itemize} - \item Did you include the license to the code and datasets? \answerYes{See Section~\ref{gen_inst}.} - \item Did you include the license to the code and datasets? \answerNo{The code and the data are proprietary.} - \item Did you include the license to the code and datasets? \answerNA{} -\end{itemize} -Please do not modify the questions and only use the provided macros for your -answers. Note that the Checklist section does not count towards the page -limit. In your paper, please delete this instructions block and only keep the -Checklist section heading above along with the questions/answers below. -%%% END INSTRUCTIONS %%% - - -\begin{enumerate} - - -\item For all authors... -\begin{enumerate} - \item Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? - \answerTODO{} - \item Did you describe the limitations of your work? - \answerTODO{} - \item Did you discuss any potential negative societal impacts of your work? - \answerTODO{} - \item Have you read the ethics review guidelines and ensured that your paper conforms to them? - \answerTODO{} -\end{enumerate} - - -\item If you are including theoretical results... -\begin{enumerate} - \item Did you state the full set of assumptions of all theoretical results? - \answerTODO{} - \item Did you include complete proofs of all theoretical results? - \answerTODO{} -\end{enumerate} - - -\item If you ran experiments... -\begin{enumerate} - \item Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? - \answerTODO{} - \item Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? - \answerTODO{} - \item Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? - \answerTODO{} - \item Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? - \answerTODO{} -\end{enumerate} - - -\item If you are using existing assets (e.g., code, data, models) or curating/releasing new assets... -\begin{enumerate} - \item If your work uses existing assets, did you cite the creators? - \answerTODO{} - \item Did you mention the license of the assets? - \answerTODO{} - \item Did you include any new assets either in the supplemental material or as a URL? - \answerTODO{} - \item Did you discuss whether and how consent was obtained from people whose data you're using/curating? - \answerTODO{} - \item Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? - \answerTODO{} -\end{enumerate} - - -\item If you used crowdsourcing or conducted research with human subjects... -\begin{enumerate} - \item Did you include the full text of instructions given to participants and screenshots, if applicable? - \answerTODO{} - \item Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? - \answerTODO{} - \item Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? - \answerTODO{} -\end{enumerate} - - -\end{enumerate} - - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - - -\appendix - - -\section{Appendix} - - -Optionally include extra information (complete proofs, additional experiments and plots) in the appendix. -This section will often be part of the supplemental material. - - +\documentclass{article} + +% if you need to pass options to natbib, use, e.g.: +% \PassOptionsToPackage{numbers, compress}{natbib} +% before loading neurips_2022 + + +% ready for submission +\usepackage{neurips_2022} + + +% to compile a preprint version, e.g., for submission to arXiv, add add the +% [preprint] option: +% \usepackage[preprint]{neurips_2022} + + +% to compile a camera-ready version, add the [final] option, e.g.: +% \usepackage[final]{neurips_2022} + + +% to avoid loading the natbib package, add option nonatbib: +% \usepackage[nonatbib]{neurips_2022} + + +\usepackage[utf8]{inputenc} % allow utf-8 input +\usepackage[T1]{fontenc} % use 8-bit T1 fonts +\usepackage{hyperref} % hyperlinks +\usepackage{url} % simple URL typesetting +\usepackage{booktabs} % professional-quality tables +\usepackage{amsfonts} % blackboard math symbols +\usepackage{nicefrac} % compact symbols for 1/2, etc. +\usepackage{microtype} % microtypography +\usepackage{xcolor} % colors + + +\title{High-Fidelity Counterfactual Explanations through Conformal Prediction} + + +% The \author macro works with any number of authors. There are two commands +% used to separate the names and addresses of multiple authors: \And and \AND. +% +% Using \And between authors leaves it to LaTeX to determine where to break the +% lines. Using \AND forces a line break at that point. So, if LaTeX puts 3 of 4 +% authors names on the first line, and the last on the second line, try using +% \AND instead of \And before the third author name. + + +\author{% + David S.~Hippocampus\thanks{Use footnote for providing further information + about author (webpage, alternative address)---\emph{not} for acknowledging + funding agencies.} \\ + Department of Computer Science\\ + Cranberry-Lemon University\\ + Pittsburgh, PA 15213 \\ + \texttt{hippo@cs.cranberry-lemon.edu} \\ + % examples of more authors + % \And + % Coauthor \\ + % Affiliation \\ + % Address \\ + % \texttt{email} \\ + % \AND + % Coauthor \\ + % Affiliation \\ + % Address \\ + % \texttt{email} \\ + % \And + % Coauthor \\ + % Affiliation \\ + % Address \\ + % \texttt{email} \\ + % \And + % Coauthor \\ + % Affiliation \\ + % Address \\ + % \texttt{email} \\ +} + + +\begin{document} + +\maketitle + + +\begin{abstract} + The abstract paragraph should be indented \nicefrac{1}{2}~inch (3~picas) on + both the left- and right-hand margins. Use 10~point type, with a vertical + spacing (leading) of 11~points. The word \textbf{Abstract} must be centered, + bold, and in point size 12. Two line spaces precede the abstract. The abstract + must be limited to one paragraph. +\end{abstract} + + +\section{Submission of papers to NeurIPS 2022} + + +Please read the instructions below carefully and follow them faithfully. + + +\subsection{Style} + + +Papers to be submitted to NeurIPS 2022 must be prepared according to the +instructions presented here. Papers may only be up to {\bf nine} pages long, +including figures. Additional pages \emph{containing only acknowledgments and +references} are allowed. Papers that exceed the page limit will not be +reviewed, or in any other way considered for presentation at the conference. + + +The margins in 2022 are the same as those in 2007, which allow for $\sim$$15\%$ +more words in the paper compared to earlier years. + +\cite{abadie2002instrumental} + +Authors are required to use the NeurIPS \LaTeX{} style files obtainable at the +NeurIPS website as indicated below. Please make sure you use the current files +and not previous versions. Tweaking the style files may be grounds for +rejection. + + +\subsection{Retrieval of style files} + + +The style files for NeurIPS and other conference information are available on +the World Wide Web at +\begin{center} + \url{http://www.neurips.cc/} +\end{center} +The file \verb+neurips_2022.pdf+ contains these instructions and illustrates the +various formatting requirements your NeurIPS paper must satisfy. + + +The only supported style file for NeurIPS 2022 is \verb+neurips_2022.sty+, +rewritten for \LaTeXe{}. \textbf{Previous style files for \LaTeX{} 2.09, + Microsoft Word, and RTF are no longer supported!} + + +The \LaTeX{} style file contains three optional arguments: \verb+final+, which +creates a camera-ready copy, \verb+preprint+, which creates a preprint for +submission to, e.g., arXiv, and \verb+nonatbib+, which will not load the +\verb+natbib+ package for you in case of package clash. + + +\paragraph{Preprint option} +If you wish to post a preprint of your work online, e.g., on arXiv, using the +NeurIPS style, please use the \verb+preprint+ option. This will create a +nonanonymized version of your work with the text ``Preprint. Work in progress.'' +in the footer. This version may be distributed as you see fit. Please \textbf{do + not} use the \verb+final+ option, which should \textbf{only} be used for +papers accepted to NeurIPS. + + +At submission time, please omit the \verb+final+ and \verb+preprint+ +options. This will anonymize your submission and add line numbers to aid +review. Please do \emph{not} refer to these line numbers in your paper as they +will be removed during generation of camera-ready copies. + + +The file \verb+neurips_2022.tex+ may be used as a ``shell'' for writing your +paper. All you have to do is replace the author, title, abstract, and text of +the paper with your own. + + +The formatting instructions contained in these style files are summarized in +Sections \ref{gen_inst}, \ref{headings}, and \ref{others} below. + + +\section{General formatting instructions} +\label{gen_inst} + + +The text must be confined within a rectangle 5.5~inches (33~picas) wide and +9~inches (54~picas) long. The left margin is 1.5~inch (9~picas). Use 10~point +type with a vertical spacing (leading) of 11~points. Times New Roman is the +preferred typeface throughout, and will be selected for you by default. +Paragraphs are separated by \nicefrac{1}{2}~line space (5.5 points), with no +indentation. + + +The paper title should be 17~point, initial caps/lower case, bold, centered +between two horizontal rules. The top rule should be 4~points thick and the +bottom rule should be 1~point thick. Allow \nicefrac{1}{4}~inch space above and +below the title to rules. All pages should start at 1~inch (6~picas) from the +top of the page. + + +For the final version, authors' names are set in boldface, and each name is +centered above the corresponding address. The lead author's name is to be listed +first (left-most), and the co-authors' names (if different address) are set to +follow. If there is only one co-author, list both author and co-author side by +side. + + +Please pay special attention to the instructions in Section \ref{others} +regarding figures, tables, acknowledgments, and references. + + +\section{Headings: first level} +\label{headings} + + +All headings should be lower case (except for first word and proper nouns), +flush left, and bold. + + +First-level headings should be in 12-point type. + + +\subsection{Headings: second level} + + +Second-level headings should be in 10-point type. + + +\subsubsection{Headings: third level} + + +Third-level headings should be in 10-point type. + + +\paragraph{Paragraphs} + + +There is also a \verb+\paragraph+ command available, which sets the heading in +bold, flush left, and inline with the text, with the heading followed by 1\,em +of space. + + +\section{Citations, figures, tables, references} +\label{others} + + +These instructions apply to everyone. + + +\subsection{Citations within the text} + + +The \verb+natbib+ package will be loaded for you by default. Citations may be +author/year or numeric, as long as you maintain internal consistency. As to the +format of the references themselves, any style is acceptable as long as it is +used consistently. + + +The documentation for \verb+natbib+ may be found at +\begin{center} + \url{http://mirrors.ctan.org/macros/latex/contrib/natbib/natnotes.pdf} +\end{center} +Of note is the command \verb+\citet+, which produces citations appropriate for +use in inline text. For example, +\begin{verbatim} + \citet{hasselmo} investigated\dots +\end{verbatim} +produces +\begin{quote} + Hasselmo, et al.\ (1995) investigated\dots +\end{quote} + + +If you wish to load the \verb+natbib+ package with options, you may add the +following before loading the \verb+neurips_2022+ package: +\begin{verbatim} + \PassOptionsToPackage{options}{natbib} +\end{verbatim} + + +If \verb+natbib+ clashes with another package you load, you can add the optional +argument \verb+nonatbib+ when loading the style file: +\begin{verbatim} + \usepackage[nonatbib]{neurips_2022} +\end{verbatim} + + +As submission is double blind, refer to your own published work in the third +person. That is, use ``In the previous work of Jones et al.\ [4],'' not ``In our +previous work [4].'' If you cite your other papers that are not widely available +(e.g., a journal paper under review), use anonymous author names in the +citation, e.g., an author of the form ``A.\ Anonymous.'' + + +\subsection{Footnotes} + + +Footnotes should be used sparingly. If you do require a footnote, indicate +footnotes with a number\footnote{Sample of the first footnote.} in the +text. Place the footnotes at the bottom of the page on which they appear. +Precede the footnote with a horizontal rule of 2~inches (12~picas). + + +Note that footnotes are properly typeset \emph{after} punctuation +marks.\footnote{As in this example.} + + +\subsection{Figures} + + +\begin{figure} + \centering + \fbox{\rule[-.5cm]{0cm}{4cm} \rule[-.5cm]{4cm}{0cm}} + \caption{Sample figure caption.} +\end{figure} + + +All artwork must be neat, clean, and legible. Lines should be dark enough for +purposes of reproduction. The figure number and caption always appear after the +figure. Place one line space before the figure caption and one line space after +the figure. The figure caption should be lower case (except for first word and +proper nouns); figures are numbered consecutively. + + +You may use color figures. However, it is best for the figure captions and the +paper body to be legible if the paper is printed in either black/white or in +color. + + +\subsection{Tables} + + +All tables must be centered, neat, clean and legible. The table number and +title always appear before the table. See Table~\ref{sample-table}. + + +Place one line space before the table title, one line space after the +table title, and one line space after the table. The table title must +be lower case (except for first word and proper nouns); tables are +numbered consecutively. + + +Note that publication-quality tables \emph{do not contain vertical rules.} We +strongly suggest the use of the \verb+booktabs+ package, which allows for +typesetting high-quality, professional tables: +\begin{center} + \url{https://www.ctan.org/pkg/booktabs} +\end{center} +This package was used to typeset Table~\ref{sample-table}. + + +\begin{table} + \caption{Sample table title} + \label{sample-table} + \centering + \begin{tabular}{lll} + \toprule + \multicolumn{2}{c}{Part} \\ + \cmidrule(r){1-2} + Name & Description & Size ($\mu$m) \\ + \midrule + Dendrite & Input terminal & $\sim$100 \\ + Axon & Output terminal & $\sim$10 \\ + Soma & Cell body & up to $10^6$ \\ + \bottomrule + \end{tabular} +\end{table} + + +\section{Final instructions} + + +Do not change any aspects of the formatting parameters in the style files. In +particular, do not modify the width or length of the rectangle the text should +fit into, and do not change font sizes (except perhaps in the +\textbf{References} section; see below). Please note that pages should be +numbered. + + +\section{Preparing PDF files} + + +Please prepare submission files with paper size ``US Letter,'' and not, for +example, ``A4.'' + + +Fonts were the main cause of problems in the past years. Your PDF file must only +contain Type 1 or Embedded TrueType fonts. Here are a few instructions to +achieve this. + + +\begin{itemize} + + +\item You should directly generate PDF files using \verb+pdflatex+. + + +\item You can check which fonts a PDF files uses. In Acrobat Reader, select the + menu Files$>$Document Properties$>$Fonts and select Show All Fonts. You can + also use the program \verb+pdffonts+ which comes with \verb+xpdf+ and is + available out-of-the-box on most Linux machines. + + +\item The IEEE has recommendations for generating PDF files whose fonts are also + acceptable for NeurIPS. Please see + \url{http://www.emfield.org/icuwb2010/downloads/IEEE-PDF-SpecV32.pdf} + + +\item \verb+xfig+ "patterned" shapes are implemented with bitmap fonts. Use + "solid" shapes instead. + + +\item The \verb+\bbold+ package almost always uses bitmap fonts. You should use + the equivalent AMS Fonts: +\begin{verbatim} + \usepackage{amsfonts} +\end{verbatim} +followed by, e.g., \verb+\mathbb{R}+, \verb+\mathbb{N}+, or \verb+\mathbb{C}+ +for $\mathbb{R}$, $\mathbb{N}$ or $\mathbb{C}$. You can also use the following +workaround for reals, natural and complex: +\begin{verbatim} + \newcommand{\RR}{I\!\!R} %real numbers + \newcommand{\Nat}{I\!\!N} %natural numbers + \newcommand{\CC}{I\!\!\!\!C} %complex numbers +\end{verbatim} +Note that \verb+amsfonts+ is automatically loaded by the \verb+amssymb+ package. + + +\end{itemize} + + +If your file contains type 3 fonts or non embedded TrueType fonts, we will ask +you to fix it. + + +\subsection{Margins in \LaTeX{}} + + +Most of the margin problems come from figures positioned by hand using +\verb+\special+ or other commands. We suggest using the command +\verb+\includegraphics+ from the \verb+graphicx+ package. Always specify the +figure width as a multiple of the line width as in the example below: +\begin{verbatim} + \usepackage[pdftex]{graphicx} ... + \includegraphics[width=0.8\linewidth]{myfile.pdf} +\end{verbatim} +See Section 4.4 in the graphics bundle documentation +(\url{http://mirrors.ctan.org/macros/latex/required/graphics/grfguide.pdf}) + + +A number of width problems arise when \LaTeX{} cannot properly hyphenate a +line. Please give LaTeX hyphenation hints using the \verb+\-+ command when +necessary. + + +\begin{ack} +Use unnumbered first level headings for the acknowledgments. All acknowledgments +go at the end of the paper before the list of references. Moreover, you are required to declare +funding (financial activities supporting the submitted work) and competing interests (related financial activities outside the submitted work). +More information about this disclosure can be found at: \url{https://neurips.cc/Conferences/2022/PaperInformation/FundingDisclosure}. + + +Do {\bf not} include this section in the anonymized submission, only in the final paper. You can use the \texttt{ack} environment provided in the style file to autmoatically hide this section in the anonymized submission. +\end{ack} + + +\section*{References} + + +References follow the acknowledgments. Use unnumbered first-level heading for +the references. Any choice of citation style is acceptable as long as you are +consistent. It is permissible to reduce the font size to \verb+small+ (9 point) +when listing the references. +Note that the Reference section does not count towards the page limit. + +\medskip + +\bibliography{paper/references} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\section*{Checklist} + + +%%% BEGIN INSTRUCTIONS %%% +The checklist follows the references. Please +read the checklist guidelines carefully for information on how to answer these +questions. For each question, change the default \answerTODO{} to \answerYes{}, +\answerNo{}, or \answerNA{}. You are strongly encouraged to include a {\bf +justification to your answer}, either by referencing the appropriate section of +your paper or providing a brief inline description. For example: +\begin{itemize} + \item Did you include the license to the code and datasets? \answerYes{See Section~\ref{gen_inst}.} + \item Did you include the license to the code and datasets? \answerNo{The code and the data are proprietary.} + \item Did you include the license to the code and datasets? \answerNA{} +\end{itemize} +Please do not modify the questions and only use the provided macros for your +answers. Note that the Checklist section does not count towards the page +limit. In your paper, please delete this instructions block and only keep the +Checklist section heading above along with the questions/answers below. +%%% END INSTRUCTIONS %%% + + +\begin{enumerate} + + +\item For all authors... +\begin{enumerate} + \item Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? + \answerTODO{} + \item Did you describe the limitations of your work? + \answerTODO{} + \item Did you discuss any potential negative societal impacts of your work? + \answerTODO{} + \item Have you read the ethics review guidelines and ensured that your paper conforms to them? + \answerTODO{} +\end{enumerate} + + +\item If you are including theoretical results... +\begin{enumerate} + \item Did you state the full set of assumptions of all theoretical results? + \answerTODO{} + \item Did you include complete proofs of all theoretical results? + \answerTODO{} +\end{enumerate} + + +\item If you ran experiments... +\begin{enumerate} + \item Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? + \answerTODO{} + \item Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? + \answerTODO{} + \item Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? + \answerTODO{} + \item Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? + \answerTODO{} +\end{enumerate} + + +\item If you are using existing assets (e.g., code, data, models) or curating/releasing new assets... +\begin{enumerate} + \item If your work uses existing assets, did you cite the creators? + \answerTODO{} + \item Did you mention the license of the assets? + \answerTODO{} + \item Did you include any new assets either in the supplemental material or as a URL? + \answerTODO{} + \item Did you discuss whether and how consent was obtained from people whose data you're using/curating? + \answerTODO{} + \item Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? + \answerTODO{} +\end{enumerate} + + +\item If you used crowdsourcing or conducted research with human subjects... +\begin{enumerate} + \item Did you include the full text of instructions given to participants and screenshots, if applicable? + \answerTODO{} + \item Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? + \answerTODO{} + \item Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? + \answerTODO{} +\end{enumerate} + + +\end{enumerate} + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +\appendix + + +\section{Appendix} + + +Optionally include extra information (complete proofs, additional experiments and plots) in the appendix. +This section will often be part of the supplemental material. + + \end{document} \ No newline at end of file diff --git a/paper/references.bib b/paper/references.bib new file mode 100644 index 0000000000000000000000000000000000000000..dbd5fd69d0a1a0fb8fe28a43f98e3a5b492f6877 --- /dev/null +++ b/paper/references.bib @@ -0,0 +1,1666 @@ +@inproceedings{LakkarajuExplanations, + title = {{" How Do I Fool You?" Manipulating User Trust via Misleading Black Box Explanations}}, + booktitle = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}, + author = {Lakkaraju, Himabindu and Bastani, Osbert}, + pages = {79--85} +} + +@inproceedings{RibeiroWhyClassifier, + title = {{"Why Should i Trust You?" Explaining the Predictions of Any Classifier}}, + booktitle = {Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining}, + author = {Ribeiro, Marco Tulio and Singh, Sameer and Guestrin, Carlos}, + pages = {1135--1144} +} + +@inproceedings{HannekeALearning, + title = {{A Bound on the Label Complexity of Agnostic Active Learning}}, + booktitle = {Proceedings of the 24th International Conference on Machine Learning}, + author = {Hanneke, Steve}, + pages = {353--360} +} + +@article{deOliveiraAData, + title = {{A Framework and Benchmarking Study for Counterfactual Generating Methods on Tabular Data}}, + author = {de Oliveira, Raphael Mazzine Barbosa and Martens, David}, + number = {16}, + pages = {7274}, + volume = {11} +} + +@unpublished{AngelopoulosAQuantification, + title = {{A Gentle Introduction to Conformal Prediction and Distribution-Free Uncertainty Quantification}}, + author = {Angelopoulos, Anastasios N and Bates, Stephen}, + arxivId = {2107.07511} +} + +@article{GrettonATest, + title = {{A Kernel Two-Sample Test}}, + author = {Gretton, Arthur and Borgwardt, Karsten M and Rasch, Malte J and Sch{\"{o}}lkopf, Bernhard and Smola, Alexander}, + number = {1}, + pages = {723--773}, + volume = {13} +} + +@article{McCullochAActivity, + title = {{A Logical Calculus of the Ideas Immanent in Nervous Activity}}, + author = {McCulloch, Warren S and Pitts, Walter}, + number = {1}, + pages = {99--115}, + volume = {52} +} + +@article{JolliffeALASSO, + title = {{A Modified Principal Component Technique Based on the LASSO}}, + author = {Jolliffe, Ian T and Trendafilov, Nickolay T and Uddin, Mudassir}, + number = {3}, + pages = {531--547}, + volume = {12} +} + +@book{FriedmanA1867-1960, + title = {{A Monetary History of the United States, 1867-1960}}, + author = {Friedman, Milton and Schwartz, Anna Jacobson}, + volume = {14}, + publisher = {Princeton University Press} +} + +@article{WittenAAnalysis, + title = {{A Penalized Matrix Decomposition, with Applications to Sparse Principal Components and Canonical Correlation Analysis}}, + author = {Witten, Daniela M and Tibshirani, Robert and Hastie, Trevor}, + number = {3}, + pages = {515--534}, + volume = {10} +} + +@article{SturmAHorse, + title = {{A Simple Method to Determine If a Music Information Retrieval System Is a ``Horse''}}, + author = {Sturm, Bob L}, + number = {6}, + pages = {1636--1644}, + volume = {16} +} + +@article{HsiehASelectivity, + title = {{A Social Interactions Model with Endogenous Friendship Formation and Selectivity}}, + author = {Hsieh, Chih-Sheng and Lee, Lung Fei}, + number = {2}, + pages = {301--319}, + volume = {31} +} + +@unpublished{KarimiAProspects, + title = {{A Survey of Algorithmic Recourse: Definitions, Formulations, Solutions, and Prospects}}, + author = {Karimi, Amir-Hossein and Barthe, Gilles and Sch{\"{o}}lkopf, Bernhard and Valera, Isabel}, + arxivId = {2010.04050} +} + +@unpublished{BrancoADistributions, + title = {{A Survey of Predictive Modelling under Imbalanced Distributions}}, + author = {Branco, Paula and Torgo, Luis and Ribeiro, Rita}, + arxivId = {1505.01658} +} + +@article{GamaAAdaptation, + title = {{A Survey on Concept Drift Adaptation}}, + author = {Gama, João and {\v{Z}}liobait{\.{e}}, Indrė and Bifet, Albert and Pechenizkiy, Mykola and Bouchachia, Abdelhamid}, + number = {4}, + pages = {1--37}, + volume = {46} +} + +@inproceedings{LundbergAPredictions, + title = {{A Unified Approach to Interpreting Model Predictions}}, + booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems}, + author = {Lundberg, Scott M and Lee, Su-In}, + pages = {4768--4777} +} + +@inproceedings{UstunActionableClassification, + title = {{Actionable Recourse in Linear Classification}}, + booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency}, + author = {Ustun, Berk and Spangher, Alexander and Liu, Yang}, + pages = {10--19} +} + +@unpublished{KingmaAdam:Optimization, + title = {{Adam: A Method for Stochastic Optimization}}, + author = {Kingma, Diederik P and Ba, Jimmy}, + arxivId = {1412.6980} +} + +@article{ChettyAdjustmentRecords, + title = {{Adjustment Costs, Firm Responses, and Micro vs. Macro Labor Supply Elasticities: Evidence from Danish Tax Records}}, + author = {Chetty, Raj and Friedman, John N and Olsen, Tore and Pistaferri, Luigi}, + number = {2}, + pages = {749--804}, + volume = {126} +} + +@unpublished{RaghunathanAdversarialGeneralization, + title = {{Adversarial Training Can Hurt Generalization}}, + author = {Raghunathan, Aditi and Xie, Sang Michael and Yang, Fanny and Duchi, John C and Liang, Percy}, + arxivId = {1906.06032} +} + +@unpublished{KarimiAlgorithmicApproach, + title = {{Algorithmic Recourse under Imperfect Causal Knowledge: A Probabilistic Approach}}, + author = {Karimi, Amir-Hossein and Von K{\"{u}}gelgen, Julius and Sch{\"{o}}lkopf, Bernhard and Valera, Isabel}, + arxivId = {2006.06831} +} + +@inproceedings{KarimiAlgorithmicInterventions, + title = {{Algorithmic Recourse: From Counterfactual Explanations to Interventions}}, + booktitle = {Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency}, + author = {Karimi, Amir-Hossein and Sch{\"{o}}lkopf, Bernhard and Valera, Isabel}, + pages = {353--362} +} + +@book{WassermanAllStatistics, + title = {{All of Nonparametric Statistics}}, + author = {Wasserman, Larry}, + publisher = {Springer Science {\&} Business Media} +} + +@book{WassermanAllInference, + title = {{All of Statistics: A Concise Course in Statistical Inference}}, + author = {Wasserman, Larry}, + publisher = {Springer Science {\&} Business Media} +} + +@article{HeckmanAlternativeOverview, + title = {{Alternative Methods for Evaluating the Impact of Interventions: An Overview}}, + author = {Heckman, James J and Robb Jr, Richard}, + number = {1-2}, + pages = {239--267}, + volume = {30} +} + +@article{GrahamAnHeterogeneity, + title = {{An Econometric Model of Network Formation with Degree Heterogeneity}}, + author = {Graham, Bryan S}, + number = {4}, + pages = {1033--1063}, + volume = {85} +} + +@article{ChapelleAnSampling, + title = {{An Empirical Evaluation of Thompson Sampling}}, + author = {Chapelle, Olivier and Li, Lihong}, + pages = {2249--2257}, + volume = {24} +} + +@article{MostellerAnUtility, + title = {{An Experimental Measurement of Utility}}, + author = {Mosteller, Frederick and Nogee, Philip}, + number = {5}, + pages = {371--404}, + volume = {59} +} + +@article{FixAnEstimation, + title = {{An Important Contribution to Nonparametric Discriminant Analysis and Density Estimation}}, + author = {Fix, E and Hodges, J}, + number = {57}, + pages = {233--238}, + volume = {3} +} + +@article{ChandolaAnomalySurvey, + title = {{Anomaly Detection: A Survey}}, + author = {Chandola, Varun and Banerjee, Arindam and Kumar, Vipin}, + number = {3}, + pages = {1--58}, + volume = {41} +} + +@article{SimsAreAnalysis, + title = {{Are Forecasting Models Usable for Policy Analysis?}}, + author = {Sims, Christopher A and {others}}, + number = {Win}, + pages = {2--16}, + volume = {10} +} + +@article{DanielssonArtificialRisk, + title = {{Artificial Intelligence and Systemic Risk}}, + author = {Danielsson, Jon and Macrae, Robert and Uthemann, Andreas}, + pages = {106290} +} + +@misc{OECDArtificialMakers, + title = {{Artificial Intelligence, Machine Learning and Big Data in Finance: Opportunities, Challenges and Implications for Policy Makers}}, + author = {{OECD}}, + url = {https://www.oecd.org/finance/financial-markets/Artificial-intelligence-machine-learning-big-data-in-finance.pdf} +} + +@misc{OECDArtificialMakersb, + title = {{Artificial Intelligence, Machine Learning and Big Data in Finance: Opportunities, Challenges and Implications for Policy Makers}}, + author = {{OECD}}, + publisher = {OECD}, + url = {https://www.oecd.org/finance/financial-markets/Artificial-intelligence-machine-learning-big-data-in-finance.pdf} +} + +@misc{ManokhinAwesomePrediction, + title = {{Awesome Conformal Prediction}}, + author = {Manokhin, Valery} +} + +@article{KirschBatchbald:Learning, + title = {{Batchbald: Efficient and Diverse Batch Acquisition for Deep Bayesian Active Learning}}, + author = {Kirsch, Andreas and Van Amersfoort, Joost and Gal, Yarin}, + pages = {7026--7037}, + volume = {32} +} + +@unpublished{HoffBayes-OptimalControl, + title = {{Bayes-Optimal Prediction with Frequentist Coverage Control}}, + author = {Hoff, Peter}, + arxivId = {2105.14045} +} + +@unpublished{HoulsbyBayesianLearning, + title = {{Bayesian Active Learning for Classification and Preference Learning}}, + author = {Houlsby, Neil and Husz{\'{a}}r, Ferenc and Ghahramani, Zoubin and Lengyel, Máté}, + arxivId = {1112.5745} +} + +@book{GelmanBayesianAnalysis, + title = {{Bayesian Data Analysis}}, + author = {Gelman, Andrew and Carlin, John B and Stern, Hal S and Dunson, David B and Vehtari, Aki and Rubin, Donald B}, + publisher = {CRC press} +} + +@incollection{GoanBayesianSurvey, + title = {{Bayesian Neural Networks: An Introduction and Survey}}, + booktitle = {Case Studies in Applied Bayesian Data Science}, + author = {Goan, Ethan and Fookes, Clinton}, + pages = {45--87}, + publisher = {Springer} +} + +@unpublished{StantonBayesianGuarantees, + title = {{Bayesian Optimization with Conformal Coverage Guarantees}}, + author = {Stanton, Samuel and Maddox, Wesley and Wilson, Andrew Gordon}, + arxivId = {2210.12496} +} + +@article{LeeBestDisturbances, + title = {{Best Spatial Two-Stage Least Squares Estimators for a Spatial Autoregressive Model with Autoregressive Disturbances}}, + author = {Lee, Lung-fei}, + number = {4}, + pages = {307--335}, + volume = {22} +} + +@unpublished{Navarro-MartinezBridgingDonations, + title = {{Bridging the Gap between the Lab and the Field: Dictator Games and Donations}}, + author = {Navarro-Martinez, Daniel and Wang, Xinghua} +} + +@unpublished{PawelczykCarla:Algorithms, + title = {{Carla: A Python Library to Benchmark Algorithmic Recourse and Counterfactual Explanation Algorithms}}, + author = {Pawelczyk, Martin and Bielawski, Sascha and van den Heuvel, Johannes and Richter, Tobias and Kasneci, Gjergji}, + arxivId = {2108.00783} +} + +@article{DehejiaCausalPrograms, + title = {{Causal Effects in Nonexperimental Studies: Reevaluating the Evaluation of Training Programs}}, + author = {Dehejia, Rajeev H and Wahba, Sadek}, + number = {448}, + pages = {1053--1062}, + volume = {94} +} + +@article{SimonsonChoiceEffects, + title = {{Choice Based on Reasons: The Case of Attraction and Compromise Effects}}, + author = {Simonson, Itamar}, + number = {2}, + pages = {158--174}, + volume = {16} +} + +@article{FalkCleanEffects, + title = {{Clean Evidence on Peer Effects}}, + author = {Falk, Armin and Ichino, Andrea}, + number = {1}, + pages = {39--57}, + volume = {24} +} + +@article{FehrCooperationExperiments, + title = {{Cooperation and Punishment in Public Goods Experiments}}, + author = {Fehr, Ernst and Gachter, Simon}, + number = {4}, + pages = {980--994}, + volume = {90} +} + +@article{SlackCounterfactualManipulated, + title = {{Counterfactual Explanations Can Be Manipulated}}, + author = {Slack, Dylan and Hilgard, Anna and Lakkaraju, Himabindu and Singh, Sameer}, + volume = {34} +} + +@unpublished{SpoonerCounterfactualModels, + title = {{Counterfactual Explanations for Arbitrary Regression Models}}, + author = {Spooner, Thomas and Dervovic, Danial and Long, Jason and Shepard, Jon and Chen, Jiahao and Magazzeni, Daniele}, + arxivId = {2106.15212} +} + +@unpublished{VermaCounterfactualReview, + title = {{Counterfactual Explanations for Machine Learning: A Review}}, + author = {Verma, Sahil and Dickerson, John and Hines, Keegan}, + arxivId = {2010.10596} +} + +@inproceedings{DaiCounterfactualXai, + title = {{Counterfactual Explanations for Prediction and Diagnosis in Xai}}, + author = {Dai, Xinyue and Keane, Mark T and Shalloo, Laurence and Ruelle, Elodie and Byrne, Ruth M J}, + pages = {215--226} +} + +@article{WachterCounterfactualGDPR, + title = {{Counterfactual Explanations without Opening the Black Box: Automated Decisions and the GDPR}}, + author = {Wachter, Sandra and Mittelstadt, Brent and Russell, Chris}, + pages = {841}, + volume = {31} +} + +@misc{AltmeyerCounterfactualExplanations.JlRecourse, + title = {{CounterfactualExplanations.Jl - a Julia Package for Counterfactual Explanations and Algorithmic Recourse}}, + author = {Altmeyer, Patrick}, + url = {https://github.com/pat-alt/CounterfactualExplanations.jl} +} + +@misc{AltmeyerCounterfactualExplanations.JlRecourseb, + title = {{CounterfactualExplanations.Jl - a Julia Package for Counterfactual Explanations and Algorithmic Recourse}}, + author = {Altmeyer, Patrick}, + url = {https://github.com/pat-alt/CounterfactualExplanations.jl} +} + +@book{MorganCounterfactualsInference, + title = {{Counterfactuals and Causal Inference}}, + author = {Morgan, Stephen L and Winship, Christopher}, + publisher = {Cambridge University Press} +} + +@unpublished{ZhengDagsLearning, + title = {{Dags with No Tears: Continuous Optimization for Structure Learning}}, + author = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P}, + arxivId = {1803.01422} +} + +@article{BecharaDecidingStrategy, + title = {{Deciding Advantageously before Knowing the Advantageous Strategy}}, + author = {Bechara, Antoine and Damasio, Hanna and Tranel, Daniel and Damasio, Antonio R}, + number = {5304}, + pages = {1293--1295}, + volume = {275} +} + +@inproceedings{GalDeepData, + title = {{Deep Bayesian Active Learning with Image Data}}, + booktitle = {International Conference on Machine Learning}, + author = {Gal, Yarin and Islam, Riashat and Ghahramani, Zoubin}, + pages = {1183--1192}, + publisher = {PMLR} +} + +@book{GoodfellowDeepLearning, + title = {{Deep Learning}}, + author = {Goodfellow, Ian and Bengio, Yoshua and Courville, Aaron}, + publisher = {MIT Press} +} + +@unpublished{BorisovDeepSurvey, + title = {{Deep Neural Networks and Tabular Data: A Survey}}, + author = {Borisov, Vadim and Leemann, Tobias and Se{\ss}ler, Kathrin and Haug, Johannes and Pawelczyk, Martin and Kasneci, Gjergji}, + arxivId = {2110.01889} +} + +@article{AltmeyerDeepData, + title = {{Deep Vector Autoregression for Macroeconomic Data}}, + author = {Altmeyer, Patrick and Agusti, Marc and Vidal-Quadras Costa, Ignacio}, + url = {https://thevoice.bse.eu/wp-content/uploads/2021/07/ds21-project-agusti-et-al.pdf} +} + +@book{AltmeyerDeepvars:Autoregession, + title = {{Deepvars: Deep Vector Autoregession}}, + author = {Altmeyer, Patrick} +} + +@article{KehoeDefenceMachines, + title = {{Defence against Adversarial Attacks Using Classical and Quantum-Enhanced Boltzmann Machines}}, + author = {Kehoe, Aidan and Wittek, Peter and Xue, Yanbo and Pozas-Kerstjens, Alejandro} +} + +@misc{GroupDetailedOutbreak, + title = {{Detailed Epidemiological Data from the COVID-19 Outbreak}}, + author = {Group, Open COVID-19 Data Working} +} + +@inproceedings{DombrowskiDiffeomorphicFlows, + title = {{Diffeomorphic Explanations with Normalizing Flows}}, + booktitle = {ICML Workshop on Invertible Neural Networks, Normalizing Flows, and Explicit Likelihood Models}, + author = {Dombrowski, Ann-Kathrin and Gerken, Jan E and Kessel, Pan} +} + +@unpublished{JeanneretDiffusionExplanations, + title = {{Diffusion Models for Counterfactual Explanations}}, + author = {Jeanneret, Guillaume and Simon, Lo\"\ic and Jurie, Frédéric}, + arxivId = {2203.15636} +} + +@article{RomerDoesSchwartz, + title = {{Does Monetary Policy Matter? A New Test in the Spirit of Friedman and Schwartz}}, + author = {Romer, Christina D and Romer, David H}, + pages = {121--170}, + volume = {4} +} + +@article{CarrellDoesAchievement, + title = {{Does Your Cohort Matter? Measuring Peer Effects in College Achievement}}, + author = {Carrell, Scott E and Fullerton, Richard L and West, James E}, + number = {3}, + pages = {439--464}, + volume = {27} +} + +@inproceedings{GalDropoutLearning, + title = {{Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning}}, + booktitle = {International Conference on Machine Learning}, + author = {Gal, Yarin and Ghahramani, Zoubin}, + pages = {1050--1059}, + publisher = {PMLR} +} + +@article{SrivastavaDropout:Overfitting, + title = {{Dropout: A Simple Way to Prevent Neural Networks from Overfitting}}, + author = {Srivastava, Nitish and Hinton, Geoffrey and Krizhevsky, Alex and Sutskever, Ilya and Salakhutdinov, Ruslan}, + number = {1}, + pages = {1929--1958}, + volume = {15} +} + +@article{FangDynamicEvidence, + title = {{Dynamic Inefficiencies in an Employment-Based Health Insurance System: Theory and Evidence}}, + author = {Fang, Hanming and Gavazza, Alessandro}, + number = {7}, + pages = {3047--3077}, + volume = {101} +} + +@article{GreeneEconometric71e, + title = {{Econometric Analysis, 71e}}, + author = {Greene, William H} +} + +@article{LucasEconometricHolland, + title = {{Econometric Policy Evaluation: A Critique `, in K. Brunner and A Meltzer, The Phillips Curve and Labor Markets, North Holland}}, + author = {Lucas, J R} +} + +@book{PerryEconomicAfter, + title = {{Economic Events, Ideas, and Policies: The 1960s and After}}, + author = {Perry, George L and Tobin, James}, + publisher = {Brookings Institution Press} +} + +@article{GretherEconomicPhenomenon, + title = {{Economic Theory of Choice and the Preference Reversal Phenomenon}}, + author = {Grether, David M and Plott, Charles R}, + number = {4}, + pages = {623--638}, + volume = {69} +} + +@article{VanBovenEgocentricEffect., + title = {{Egocentric Empathy Gaps between Owners and Buyers: Misperceptions of the Endowment Effect.}}, + author = {Van Boven, Leaf and Dunning, David and Loewenstein, George}, + number = {1}, + pages = {66}, + volume = {79} +} + +@inproceedings{AltmeyerEndogenousRecourse, + title = {{Endogenous Macrodynamics in Algorithmic Recourse}}, + booktitle = {First IEEE Conference on Secure and Trustworthy Machine Learning}, + author = {Altmeyer, Patrick and Angela, Giovan and Buszydlik, Aleksander and Dobiczek, Karol and van Deursen, Arie and Liem, Cynthia} +} + +@article{XuEpidemiologicalInformation, + title = {{Epidemiological Data from the COVID-19 Outbreak, Real-Time Case Information}}, + author = {Xu, Bo and Gutierrez, Bernardo and Mekaru, Sumiko and Sewalk, Kara and Goodwin, Lauren and Loskill, Alyssa and Cohn, Emily and Hswen, Yulin and Hill, Sarah C and Cobo, Maria M and Zarebski, Alexander and Li, Sabrina and Wu, Chieh-Hsi and Hulland, Erin and Morgan, Julia and Wang, Lin and O'Brien, Katelynn and Scarpino, Samuel V and Brownstein, John S and Pybus, Oliver G and Pigott, David M and Kraemer, Moritz U G}, + number = {106}, + volume = {7}, + doi = {doi.org/10.1038/s41597-020-0448-0} +} + +@article{QuEstimatingMatrix, + title = {{Estimating a Spatial Autoregressive Model with an Endogenous Spatial Weight Matrix}}, + author = {Qu, Xi and Lee, Lung-fei}, + number = {2}, + pages = {209--232}, + volume = {184} +} + +@article{JohnssonEstimationApproach, + title = {{Estimation of Peer Effects in Endogenous Social Networks: Control Function Approach}}, + author = {Johnsson, Ida and Moon, Hyungsik Roger}, + number = {2}, + pages = {328--345}, + volume = {103} +} + +@inproceedings{NelsonEvaluatingAlgorithms, + title = {{Evaluating Model Drift in Machine Learning Algorithms}}, + booktitle = {2015 IEEE Symposium on Computational Intelligence for Security and Defense Applications (CISDA)}, + author = {Nelson, Kevin and Corbin, George and Anania, Mark and Kovacs, Matthew and Tobias, Jeremy and Blowers, Misty}, + pages = {1--8}, + publisher = {IEEE} +} + +@article{KahnemanExperimentalTheorem, + title = {{Experimental Tests of the Endowment Effect and the Coase Theorem}}, + author = {Kahneman, Daniel and Knetsch, Jack L and Thaler, Richard H}, + number = {6}, + pages = {1325--1348}, + volume = {98} +} + +@article{ArrietaExplainableAI, + title = {{Explainable Artificial Intelligence (XAI): Concepts, Taxonomies, Opportunities and Challenges toward Responsible AI}}, + author = {Arrieta, Alejandro Barredo and Diaz-Rodriguez, Natalia and Del Ser, Javier and Bennetot, Adrien and Tabik, Siham and Barbado, Alberto and Garcia, Salvador and Gil-Lopez, Sergio and Molina, Daniel and Benjamins, Richard and {others}}, + pages = {82--115}, + volume = {58} +} + +@article{CascarinoExplainableLearning, + title = {{Explainable Artificial Intelligence: Interpreting Default Forecasting Models Based on Machine Learning}}, + author = {Cascarino, Giuseppe and Moscatelli, Mirko and Parlapiano, Fabio}, + number = {674} +} + +@unpublished{GoodfellowExplainingExamples, + title = {{Explaining and Harnessing Adversarial Examples}}, + author = {Goodfellow, Ian J and Shlens, Jonathon and Szegedy, Christian}, + arxivId = {1412.6572} +} + +@inproceedings{MittelstadtExplainingAI, + title = {{Explaining Explanations in AI}}, + booktitle = {Proceedings of the Conference on Fairness, Accountability, and Transparency}, + author = {Mittelstadt, Brent and Russell, Chris and Wachter, Sandra}, + pages = {279--288} +} + +@inproceedings{MothilalExplainingExplanations, + title = {{Explaining Machine Learning Classifiers through Diverse Counterfactual Explanations}}, + booktitle = {Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency}, + author = {Mothilal, Ramaravind K and Sharma, Amit and Tan, Chenhao}, + pages = {607--617} +} + +@article{MillerExplanationSciences, + title = {{Explanation in Artificial Intelligence: Insights from the Social Sciences}}, + author = {Miller, Tim}, + pages = {1--38}, + volume = {267} +} + +@article{DhurandharExplanationsNegatives, + title = {{Explanations Based on the Missing: Towards Contrastive Explanations with Pertinent Negatives}}, + author = {Dhurandhar, Amit and Chen, Pin-Yu and Luss, Ronny and Tu, Chun-Chen and Ting, Paishun and Shanmugam, Karthikeyan and Das, Payel}, + volume = {31} +} + +@unpublished{KuiperExploringAuthorities, + title = {{Exploring Explainable AI in the Financial Sector: Perspectives of Banks and Supervisory Authorities}}, + author = {Kuiper, Ouren and van den Berg, Martin and van den Burgt, Joost and Leijnen, Stefan}, + arxivId = {2111.02244} +} + +@inproceedings{PoyiadziFACE:Explanations, + title = {{FACE: Feasible and Actionable Counterfactual Explanations}}, + booktitle = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}, + author = {Poyiadzi, Rafael and Sokol, Kacper and Santos-Rodriguez, Raul and De Bie, Tijl and Flach, Peter}, + pages = {344--350} +} + +@article{RabanserFailingShift, + title = {{Failing Loudly: An Empirical Study of Methods for Detecting Dataset Shift}}, + author = {Rabanser, Stephan and G{\"{u}}nnemann, Stephan and Lipton, Zachary}, + volume = {32} +} + +@article{JohanssonFailureTask, + title = {{Failure to Detect Mismatches between Intention and Outcome in a Simple Decision Task}}, + author = {Johansson, Petter and Hall, Lars and Sikstr{\"{o}}m, Sverker and Olsson, Andreas}, + number = {5745}, + pages = {116--119}, + volume = {310} +} + +@misc{BarocasFairnessLearning, + title = {{Fairness and Machine Learning}}, + author = {Barocas, Solon and Hardt, Moritz and Narayanan, Arvind}, + url = {https://fairmlbook.org/index.html} +} + +@inproceedings{JabbariFairnessLearning, + title = {{Fairness in Reinforcement Learning}}, + booktitle = {International Conference on Machine Learning}, + author = {Jabbari, Shahin and Joseph, Matthew and Kearns, Michael and Morgenstern, Jamie and Roth, Aaron}, + pages = {1617--1626}, + publisher = {PMLR} +} + +@unpublished{InnesFashionableFlux, + title = {{Fashionable Modelling with Flux}}, + author = {Innes, Michael and Saba, Elliot and Fischer, Keno and Gandhi, Dhairya and Rudilosso, Marco Concetto and Joy, Neethu Mariya and Karmali, Tejan and Pal, Avik and Shah, Viral}, + arxivId = {1811.01457} +} + +@inproceedings{SatopaaFindingBehavior, + title = {{Finding a" Kneedle" in a Haystack: Detecting Knee Points in System Behavior}}, + booktitle = {2011 31st International Conference on Distributed Computing Systems Workshops}, + author = {Satopaa, Ville and Albrecht, Jeannie and Irwin, David and Raghavan, Barath}, + pages = {166--171}, + publisher = {IEEE} +} + +@article{AuerFinite-TimeProblem, + title = {{Finite-Time Analysis of the Multiarmed Bandit Problem}}, + author = {Auer, Peter and Cesa-Bianchi, Nicolo and Fischer, Paul}, + number = {2}, + pages = {235--256}, + volume = {47} +} + +@article{InnesFlux:Julia, + title = {{Flux: Elegant Machine Learning with Julia}}, + author = {Innes, Mike}, + number = {25}, + pages = {602}, + volume = {3} +} + +@inproceedings{SlackFoolingMethods, + title = {{Fooling Lime and Shap: Adversarial Attacks on Post Hoc Explanation Methods}}, + booktitle = {Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society}, + author = {Slack, Dylan and Hilgard, Sophie and Jia, Emily and Singh, Sameer and Lakkaraju, Himabindu}, + pages = {180--186} +} + +@article{JosephForecastingUp, + title = {{Forecasting Uk Inflation Bottom Up}}, + author = {Joseph, Andreas and Kalamara, Eleni and Kapetanios, George and Potjagailo, Galina} +} + +@article{ZhangForecastingArt, + title = {{Forecasting with Artificial Neural Networks:: The State of the Art}}, + author = {Zhang, Guoqiang and Patuwo, B Eddy and Hu, Michael Y}, + number = {1}, + pages = {35--62}, + volume = {14} +} + +@article{McCrackenFRED-MD:Research, + title = {{FRED-MD: A Monthly Database for Macroeconomic Research}}, + author = {McCracken, Michael W and Ng, Serena}, + number = {4}, + pages = {574--589}, + volume = {34} +} + +@article{CarrellFromFormation, + title = {{From Natural Variation to Optimal Policy? The Importance of Endogenous Peer Group Formation}}, + author = {Carrell, Scott E and Sacerdote, Bruce I and West, James E}, + number = {3}, + pages = {855--882}, + volume = {81} +} + +@inproceedings{RasmussenGaussianLearning, + title = {{Gaussian Processes in Machine Learning}}, + booktitle = {Summer School on Machine Learning}, + author = {Rasmussen, Carl Edward}, + pages = {63--71}, + publisher = {Springer} +} + +@inproceedings{BuolamwiniGenderClassification, + title = {{Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification}}, + booktitle = {Conference on Fairness, Accountability and Transparency}, + author = {Buolamwini, Joy and Gebru, Timnit}, + pages = {77--91}, + publisher = {PMLR} +} + +@article{CarrizosaGeneratingOptimization, + title = {{Generating Collective Counterfactual Explanations in Score-Based Classification via Mathematical Optimization}}, + author = {Carrizosa, Emilio and Ramırez-Ayerbe, Jasone and Romero, Dolores} +} + +@inproceedings{SchutGeneratingUncertainties, + title = {{Generating Interpretable Counterfactual Explanations By Implicit Minimisation of Epistemic and Aleatoric Uncertainties}}, + booktitle = {International Conference on Artificial Intelligence and Statistics}, + author = {Schut, Lisa and Key, Oscar and Mc Grath, Rory and Costabello, Luca and Sacaleanu, Bogdan and Gal, Yarin and {others}}, + pages = {1756--1764}, + publisher = {PMLR} +} + +@misc{HoffmanGermanData, + title = {{German Credit Data}}, + author = {Hoffman, Hans}, + url = {https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)} +} + +@misc{HoffmanGermanDatab, + title = {{German Credit Data}}, + author = {Hoffman, Hans}, + url = {https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)} +} + +@unpublished{AntoranGettingEstimates, + title = {{Getting a Clue: A Method for Explaining Uncertainty Estimates}}, + author = {Antor{\'{a}}n, Javier and Bhatt, Umang and Adel, Tameem and Weller, Adrian and Hern{\'{a}}ndez-Lobato, José Miguel}, + arxivId = {2006.06848} +} + +@misc{KaggleGiveYears., + title = {{Give Me Some Credit, Improve on the State of the Art in Credit Scoring by Predicting the Probability That Somebody Will Experience Financial Distress in the next Two Years.}}, + author = {{Kaggle}}, + publisher = {Kaggle}, + url = {https://www.kaggle.com/c/GiveMeSomeCredit} +} + +@misc{CompetitionGiveYears., + title = {{Give Me Some Credit, Improve on the State of the Art in Credit Scoring by Predicting the Probability That Somebody Will Experience Financial Distress in the next Two Years.}}, + author = {Competition, Kaggle}, + url = {https://www.kaggle.com/c/GiveMeSomeCredit} +} + +@article{MarkleGoalsDependence, + title = {{Goals as Reference Points in Marathon Running: A Novel Test of Reference Dependence}}, + author = {Markle, Alex and Wu, George and White, Rebecca and Sackett, Aaron}, + number = {1}, + pages = {19--50}, + volume = {56} +} + +@unpublished{LachapelleGradient-BasedLearning, + title = {{Gradient-Based Neural Dag Learning}}, + author = {Lachapelle, Sébastien and Brouillard, Philippe and Deleu, Tristan and Lacoste-Julien, Simon}, + arxivId = {1906.02226} +} + +@unpublished{JospinHands-onUsers, + title = {{Hands-on Bayesian Neural Networks–a Tutorial for Deep Learning Users}}, + author = {Jospin, Laurent Valentin and Buntine, Wray and Boussaid, Farid and Laga, Hamid and Bennamoun, Mohammed}, + arxivId = {2007.06823} +} + +@article{UngemachHowDelay, + title = {{How Incidental Values from the Environment Affect Decisions about Money, Risk, and Delay}}, + author = {Ungemach, Christoph and Stewart, Neil and Reimers, Stian}, + number = {2}, + pages = {253--260}, + volume = {22} +} + +@article{ManskiIdentificationProblem, + title = {{Identification of Endogenous Social Effects: The Reflection Problem}}, + author = {Manski, Charles F}, + number = {3}, + pages = {531--542}, + volume = {60} +} + +@article{BramoulleIdentificationNetworks, + title = {{Identification of Peer Effects through Social Networks}}, + author = {Bramoull{\'{e}}, Yann and Djebbari, Habiba and Fortin, Bernard}, + number = {1}, + pages = {41--55}, + volume = {150} +} + +@article{GilbertImmuneForecasting., + title = {{Immune Neglect: A Source of Durability Bias in Affective Forecasting.}}, + author = {Gilbert, Daniel T and Pinel, Elizabeth C and Wilson, Timothy D and Blumberg, Stephen J and Wheatley, Thalia P}, + number = {3}, + pages = {617}, + volume = {75} +} + +@article{HamzacebiImprovingForecasting, + title = {{Improving Artificial Neural Networks' Performance in Seasonal Time Series Forecasting}}, + author = {Hamza{\c{c}}ebi, Coşkun}, + number = {23}, + pages = {4550--4559}, + volume = {178} +} + +@unpublished{ImmerImprovingLinearization, + title = {{Improving Predictions of Bayesian Neural Networks via Local Linearization}}, + author = {Immer, Alexander and Korzepa, Maciej and Bauer, Matthias}, + arxivId = {2008.08400} +} + +@article{HershfieldIncreasingSelf, + title = {{Increasing Saving Behavior through Age-Progressed Renderings of the Future Self}}, + author = {Hershfield, Hal E and Goldstein, Daniel G and Sharpe, William F and Fox, Jesse and Yeykelis, Leo and Carstensen, Laura L and Bailenson, Jeremy N}, + number = {SPL}, + pages = {S23–S37}, + volume = {48} +} + +@article{AngelucciIndirectConsumption, + title = {{Indirect Effects of an Aid Program: How Do Cash Transfers Affect Ineligibles' Consumption?}}, + author = {Angelucci, Manuela and De Giorgi, Giacomo}, + number = {1}, + pages = {486--508}, + volume = {99} +} + +@article{AbadieInstrumentalEarnings, + title = {{Instrumental Variables Estimates of the Effect of Subsidized Training on the Quantiles of Trainee Earnings}}, + author = {Abadie, Alberto and Angrist, Joshua and Imbens, Guido}, + number = {1}, + pages = {91--117}, + volume = {70} +} + +@book{MolnarInterpretableLearning, + title = {{Interpretable Machine Learning}}, + author = {Molnar, Christoph}, + publisher = {Lulu. com} +} + +@unpublished{Ish-HorowiczInterpretingImportance, + title = {{Interpreting Deep Neural Networks through Variable Importance}}, + author = {Ish-Horowicz, Jonathan and Udwin, Dana and Flaxman, Seth and Filippi, Sarah and Crawford, Lorin}, + arxivId = {1901.09839} +} + +@inproceedings{KaurInterpretingLearning, + title = {{Interpreting Interpretability: Understanding Data Scientists' Use of Interpretability Tools for Machine Learning}}, + booktitle = {Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}, + author = {Kaur, Harmanpreet and Nori, Harsha and Jenkins, Samuel and Caruana, Rich and Wallach, Hanna and Wortman Vaughan, Jennifer}, + pages = {1--14} +} + +@unpublished{SzegedyIntriguingNetworks, + title = {{Intriguing Properties of Neural Networks}}, + author = {Szegedy, Christian and Zaremba, Wojciech and Sutskever, Ilya and Bruna, Joan and Erhan, Dumitru and Goodfellow, Ian and Fergus, Rob}, + arxivId = {1312.6199} +} + +@book{ManningIntroductionRetrieval, + title = {{Introduction to Information Retrieval}}, + author = {Manning, Christopher D and Sch{\"{u}}tze, Hinrich and Raghavan, Prabhakar}, + publisher = {Cambridge university press} +} + +@book{SchutzeIntroductionRetrieval, + title = {{Introduction to Information Retrieval}}, + author = {Sch{\"{u}}tze, Hinrich and Manning, Christopher D and Raghavan, Prabhakar}, + volume = {39}, + publisher = {Cambridge University Press Cambridge} +} + +@unpublished{LaugelInverseLearning, + title = {{Inverse Classification for Comparison-Based Interpretability in Machine Learning}}, + author = {Laugel, Thibault and Lesot, Marie-Jeanne and Marsala, Christophe and Renard, Xavier and Detyniecki, Marcin}, + arxivId = {1712.08443} +} + +@article{DaxbergerLaplaceLearning, + title = {{Laplace Redux-Effortless Bayesian Deep Learning}}, + author = {Daxberger, Erik and Kristiadi, Agustinus and Immer, Alexander and Eschenhagen, Runa and Bauer, Matthias and Hennig, Philipp}, + volume = {34} +} + +@article{WidmerLearningContexts, + title = {{Learning in the Presence of Concept Drift and Hidden Contexts}}, + author = {Widmer, Gerhard and Kubat, Miroslav}, + number = {1}, + pages = {69--101}, + volume = {23} +} + +@inproceedings{StutzLearningClassifiers, + title = {{Learning Optimal Conformal Classifiers}}, + author = {Stutz, David and Dvijotham, Krishnamurthy Dj and Cemgil, Ali Taylan and Doucet, Arnaud}, + url = {https://openreview.net/forum?id=t8O-4LKFVx}, + language = {en} +} + +@article{SadinleLeastLevels, + title = {{Least Ambiguous Set-Valued Classifiers with Bounded Error Levels}}, + author = {Sadinle, Mauricio and Lei, Jing and Wasserman, Larry}, + number = {525}, + pages = {223--234}, + volume = {114}, + publisher = {Taylor {\&} Francis} +} + +@article{SunsteinLibertarianOxymoron, + title = {{Libertarian Paternalism Is Not an Oxymoron}}, + author = {Sunstein, Cass R and Thaler, Richard H}, + pages = {1159--1202} +} + +@article{AngristLifetimeRecords, + title = {{Lifetime Earnings and the Vietnam Era Draft Lottery: Evidence from Social Security Administrative Records}}, + author = {Angrist, Joshua D}, + pages = {313--336} +} + +@article{HochreiterLongMemory, + title = {{Long Short-Term Memory}}, + author = {Hochreiter, Sepp and Schmidhuber, Jürgen}, + number = {8}, + pages = {1735--1780}, + volume = {9} +} + +@article{MasiniMachineForecasting, + title = {{Machine Learning Advances for Time Series Forecasting}}, + author = {Masini, Ricardo P and Medeiros, Marcelo C and Mendes, Eduardo F} +} + +@inproceedings{AckermanMachineSlices, + title = {{Machine Learning Model Drift Detection Via Weak Data Slices}}, + booktitle = {2021 IEEE/ACM Third International Workshop on Deep Learning for Testing and Testing for Deep Learning (DeepTest)}, + author = {Ackerman, Samuel and Dube, Parijat and Farchi, Eitan and Raz, Orna and Zalmanovici, Marcel}, + pages = {1--8}, + publisher = {IEEE} +} + +@article{BorchMachineTrading, + title = {{Machine Learning, Knowledge Risk, and Principal-Agent Problems in Automated Trading}}, + author = {Borch, Christian}, + pages = {101852} +} + +@book{MurphyMachinePerspective, + title = {{Machine Learning: A Probabilistic Perspective}}, + author = {Murphy, Kevin P}, + publisher = {MIT press} +} + +@article{JacksonMeetingNetworks, + title = {{Meeting Strangers and Friends of Friends: How Random Are Social Networks?}}, + author = {Jackson, Matthew O and Rogers, Brian W}, + number = {3}, + pages = {890--915}, + volume = {97} +} + +@book{PindyckMicroeconomics, + title = {{Microeconomics}}, + author = {Pindyck, Robert S and Rubinfeld, Daniel L}, + publisher = {Pearson Education} +} + +@misc{CardMinimumPennsylvania, + title = {{Minimum Wages and Employment: A Case Study of the Fast Food Industry in New Jersey and Pennsylvania}}, + author = {Card, David and Krueger, Alan B}, + institution = {National Bureau of Economic Research} +} + +@article{BlaomMLJ:Learning, + title = {{MLJ: A Julia Package for Composable Machine Learning}}, + shorttitle = {MLJ}, + author = {Blaom, Anthony D and Kiraly, Franz and Lienart, Thibaut and Simillides, Yiannis and Arenas, Diego and Vollmer, Sebastian J}, + number = {55}, + pages = {2704}, + volume = {5}, + url = {https://joss.theoj.org/papers/10.21105/joss.02704}, + doi = {10.21105/joss.02704}, + issn = {2475-9066} +} + +@article{VerstyukModelingNetworks, + title = {{Modeling Multivariate Time Series in Economics: From Auto-Regressions to Recurrent Neural Networks}}, + author = {Verstyuk, Sergiy} +} + +@article{HartlandMulti-ArmedMeta-Bandits, + title = {{Multi-Armed Bandit, Dynamic Environments and Meta-Bandits}}, + author = {Hartland, Cédric and Gelly, Sylvain and Baskiotis, Nicolas and Teytaud, Olivier and Sebag, Michele} +} + +@article{HseeMusicValue., + title = {{Music, Pandas, and Muggers: On the Affective Psychology of Value.}}, + author = {Hsee, Christopher K and Rottenstreich, Yuval}, + number = {1}, + pages = {23}, + volume = {133} +} + +@unpublished{GriffithNameData, + title = {{Name Your Friends, but Only Five? The Importance of Censoring in Peer Effects Estimates Using Social Network Data}}, + author = {Griffith, Alan} +} + +@article{ListNeoclassicalMarketplace, + title = {{Neoclassical Theory versus Prospect Theory: Evidence from the Marketplace}}, + author = {List, John A}, + number = {2}, + pages = {615--625}, + volume = {72} +} + +@article{BarabasiNetworkScience, + title = {{Network Science}}, + author = {Barab{\'{a}}si, Albert-László} +} + +@unpublished{BussmannNeuralData, + title = {{Neural Additive Vector Autoregression Models for Causal Discovery in Time Series Data}}, + author = {Bussmann, Bart and Nys, Jannes and Latr{\'{e}}, Steven}, + arxivId = {2010.09429} +} + +@inproceedings{DorffnerNeuralProcessing, + title = {{Neural Networks for Time Series Processing}}, + booktitle = {Neural Network World}, + author = {Dorffner, Georg}, + publisher = {Citeseer} +} + +@book{LutkepohlNewAnalysis, + title = {{New Introduction to Multiple Time Series Analysis}}, + author = {L{\"{u}}tkepohl, Helmut}, + publisher = {Springer Science {\&} Business Media} +} + +@book{BrockNonlinearEvidence, + title = {{Nonlinear Dynamics, Chaos, and Instability: Statistical Theory and Economic Evidence}}, + author = {Brock, William Allen and Brock, William A and Hsieh, David Arthur and LeBaron, Blake Dean and Brock, William E}, + publisher = {MIT press} +} + +@book{NocedalNumericalOptimization, + title = {{Numerical Optimization}}, + author = {Nocedal, Jorge and Wright, Stephen}, + publisher = {Springer Science {\&} Business Media} +} + +@unpublished{FanOnNetworks, + title = {{On Interpretability of Artificial Neural Networks}}, + author = {Fan, Fenglei and Xiong, Jinjun and Wang, Ge}, + arxivId = {2001.02522} +} + +@article{ArconesOnStatistics, + title = {{On the Bootstrap of U and V Statistics}}, + author = {Arcones, Miguel A and Gine, Evarist}, + pages = {655--674} +} + +@article{GalizziOnStudy, + title = {{On the External Validity of Social Preference Games: A Systematic Lab-Field Study}}, + author = {Galizzi, Matteo M and Navarro-Martinez, Daniel}, + number = {3}, + pages = {976--1002}, + volume = {65} +} + +@unpublished{GarivierOnProblems, + title = {{On Upper-Confidence Bound Policies for Non-Stationary Bandit Problems}}, + author = {Garivier, Aurélien and Moulines, Eric}, + arxivId = {0805.3415} +} + +@article{ZhuOptimalRegression, + title = {{Optimal Subsampling Approaches for Large Sample Linear Regression}}, + author = {Zhu, Rong and Ma, Ping and Mahoney, Michael W and Yu, Bin}, + pages = {arXiv–1509} +} + +@article{WangOptimalRegression, + title = {{Optimal Subsampling for Large Sample Logistic Regression}}, + author = {Wang, HaiYing and Zhu, Rong and Ma, Ping}, + number = {522}, + pages = {829--844}, + volume = {113} +} + +@article{AltmeyerOptionEvaluation, + title = {{Option Pricing in the Heston Stochastic Volatility Model: An Empirical Evaluation}}, + author = {Altmeyer, Patrick and Grapendal, Jacob Daniel and Pravosud, Makar and Quintana, Gand Derry} +} + +@book{BishopPatternLearning, + title = {{Pattern Recognition and Machine Learning}}, + author = {Bishop, Christopher M}, + publisher = {springer} +} + +@article{BramoullePeerSurvey, + title = {{Peer Effects in Networks: A Survey}}, + author = {Bramoull{\'{e}}, Yann and Djebbari, Habiba and Fortin, Bernard}, + pages = {603--629}, + volume = {12} +} + +@article{SacerdotePeerRoommates, + title = {{Peer Effects with Random Assignment: Results for Dartmouth Roommates}}, + author = {Sacerdote, Bruce}, + number = {2}, + pages = {681--704}, + volume = {116} +} + +@article{BarberPredictiveJackknife+, + title = {{Predictive inference with the jackknife+}}, + author = {Barber, Rina Foygel and Cand{\`{e}}s, Emmanuel J and Ramdas, Aaditya and Tibshirani, Ryan J}, + number = {1}, + pages = {486--507}, + volume = {49}, + publisher = {Institute of Mathematical Statistics}, + url = {https://projecteuclid.org/journals/annals-of-statistics/volume-49/issue-1/Predictive-inference-with-the-jackknife/10.1214/20-AOS1965.full}, + doi = {10.1214/20-AOS1965}, + issn = {0090-5364, 2168-8966}, + keywords = {62F40, 62G08, 62G09, conformal inference, cross-validation, distribution-free, jackknife, leave-one-out, stability} +} + +@book{MurphyProbabilisticIntroduction, + title = {{Probabilistic Machine Learning: An Introduction}}, + author = {Murphy, Kevin P}, + publisher = {MIT Press} +} + +@article{PawelczykProbabilisticallyRecourse, + title = {{Probabilistically Robust Recourse: Navigating the Trade-offs between Costs and Robustness in Algorithmic Recourse}}, + shorttitle = {Probabilistically Robust Recourse}, + author = {Pawelczyk, Martin and Datta, Teresa and van-den-Heuvel, Johannes and Kasneci, Gjergji and Lakkaraju, Himabindu} +} + +@article{KahnemanProspectRisk, + title = {{Prospect Theory: An Analysis of Decision under Risk}}, + author = {Kahneman, Daniel and Tversky, Amos}, + pages = {263--291} +} + +@article{CarlisleRacistControversy, + title = {{Racist Data Destruction? - a Boston Housing Dataset Controversy}}, + author = {Carlisle, M}, + url = {https://medium.com/@docintangible/racist-data-destruction-113e3eff54a8} +} + +@inproceedings{HoRandomForests, + title = {{Random Decision Forests}}, + booktitle = {Proceedings of 3rd International Conference on Document Analysis and Recognition}, + author = {Ho, Tin Kam}, + pages = {278--282}, + volume = {1}, + publisher = {IEEE} +} + +@article{ShafirReason-BasedChoice, + title = {{Reason-Based Choice}}, + author = {Shafir, Eldar and Simonson, Itamar and Tversky, Amos}, + number = {1-2}, + pages = {11--36}, + volume = {49} +} + +@article{KahnemanReferenceFeelings, + title = {{Reference Points, Anchors, Norms, and Mixed Feelings}}, + author = {Kahneman, Daniel}, + number = {2}, + pages = {296--312}, + volume = {51} +} + +@article{AllenReference-DependentRunners, + title = {{Reference-Dependent Preferences: Evidence from Marathon Runners}}, + author = {Allen, Eric J and Dechow, Patricia M and Pope, Devin G and Wu, George}, + number = {6}, + pages = {1657--1672}, + volume = {63} +} + +@article{denHengstReinforcementReview, + title = {{Reinforcement Learning for Personalization: A Systematic Literature Review}}, + author = {den Hengst, Floris and Grua, Eoin Martino and el Hassouni, Ali and Hoogendoorn, Mark}, + number = {Preprint}, + pages = {1--41} +} + +@book{SuttonReinforcementIntroduction, + title = {{Reinforcement Learning: An Introduction}}, + author = {Sutton, Richard S and Barto, Andrew G}, + publisher = {MIT press} +} + +@book{BerlinetReproducingStatistics, + title = {{Reproducing Kernel Hilbert Spaces in Probability and Statistics}}, + author = {Berlinet, Alain and Thomas-Agnan, Christine}, + publisher = {Springer Science {\&} Business Media} +} + +@article{HamonRobustnessIntelligence, + title = {{Robustness and Explainability of Artificial Intelligence}}, + author = {Hamon, Ronan and Junklewitz, Henrik and Sanchez, Ignacio} +} + +@article{PopeRoundLab, + title = {{Round Numbers as Goals: Evidence from Baseball, SAT Takers, and the Lab}}, + author = {Pope, Devin and Simonsohn, Uri}, + number = {1}, + pages = {71--79}, + volume = {22} +} + +@article{ThalerSaveSaving, + title = {{Save More Tomorrow: Using Behavioral Economics to Increase Employee Saving}}, + author = {Thaler, Richard H and Benartzi, Shlomo}, + number = {S1}, + pages = {S164–S187}, + volume = {112} +} + +@article{PedregosaScikit-Learn:Python, + title = {{Scikit-Learn: Machine Learning in Python}}, + author = {Pedregosa, Fabian and Varoquaux, Gaël and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and {others}}, + pages = {2825--2830}, + volume = {12} +} + +@article{KihoroSeasonalModels, + title = {{Seasonal Time Series Forecasting: A Comparative Study of ARIMA and ANN Models}}, + author = {Kihoro, J and Otieno, R O and Wafula, C} +} + +@unpublished{LakshminarayananSimpleEnsembles, + title = {{Simple and Scalable Predictive Uncertainty Estimation Using Deep Ensembles}}, + author = {Lakshminarayanan, Balaji and Pritzel, Alexander and Blundell, Charles}, + arxivId = {1612.01474} +} + +@article{Goldsmith-PinkhamSocialEffects, + title = {{Social Networks and the Identification of Peer Effects}}, + author = {Goldsmith-Pinkham, Paul and Imbens, Guido W}, + number = {3}, + pages = {253--264}, + volume = {31} +} + +@article{ThalerSomeInconsistency, + title = {{Some Empirical Evidence on Dynamic Inconsistency}}, + author = {Thaler, Richard}, + number = {3}, + pages = {201--207}, + volume = {8} +} + +@article{PaceSparseAutoregressions, + title = {{Sparse Spatial Autoregressions}}, + author = {Pace, R Kelley and Barry, Ronald}, + number = {3}, + pages = {291--297}, + volume = {33} +} + +@article{BesbesStochasticRewards, + title = {{Stochastic Multi-Armed-Bandit Problem with Non-Stationary Rewards}}, + author = {Besbes, Omar and Gur, Yonatan and Zeevi, Assaf}, + pages = {199--207}, + volume = {27} +} + +@article{RudinStopInstead, + title = {{Stop Explaining Black Box Machine Learning Models for High Stakes Decisions and Use Interpretable Models Instead}}, + author = {Rudin, Cynthia}, + number = {5}, + pages = {206--215}, + volume = {1} +} + +@inproceedings{MillerStrategicDisguise, + title = {{Strategic Classification Is Causal Modeling in Disguise}}, + booktitle = {Proceedings of the 37th International Conference on Machine Learning}, + author = {Miller, John and Milli, Smitha and Hardt, Moritz}, + pages = {6917--6926}, + publisher = {PMLR}, + url = {https://proceedings.mlr.press/v119/miller20b.html}, + issn = {2640-3498} +} + +@book{KilianStructuralAnalysis, + title = {{Structural Vector Autoregressive Analysis}}, + author = {Kilian, Lutz and L{\"{u}}tkepohl, Helmut}, + publisher = {Cambridge University Press} +} + +@article{CortesSupport-VectorNetworks, + title = {{Support-Vector Networks}}, + author = {Cortes, Corinna and Vapnik, Vladimir}, + number = {3}, + pages = {273--297}, + volume = {20} +} + +@unpublished{RajTamingApproach, + title = {{Taming Non-Stationary Bandits: A Bayesian Approach}}, + author = {Raj, Vishnu and Kalyani, Sheetal}, + arxivId = {1707.09727} +} + +@book{PearlTheEffect, + title = {{The Book of Why: The New Science of Cause and Effect}}, + author = {Pearl, Judea and Mackenzie, Dana}, + publisher = {Basic books} +} + +@unpublished{WilsonTheLearning, + title = {{The Case for Bayesian Deep Learning}}, + author = {Wilson, Andrew Gordon}, + arxivId = {2001.10995} +} + +@article{YehTheClients, + title = {{The Comparisons of Data Mining Techniques for the Predictive Accuracy of Probability of Default of Credit Card Clients}}, + author = {Yeh, I-Cheng and Lien, Che-hui}, + number = {2}, + pages = {2473--2480}, + volume = {36} +} + +@article{AbadieTheCountry, + title = {{The Economic Costs of Conflict: A Case Study of the Basque Country}}, + author = {Abadie, Alberto and Gardeazabal, Javier}, + number = {1}, + pages = {113--132}, + volume = {93} +} + +@article{HseeTheAlternatives, + title = {{The Evaluability Hypothesis: An Explanation for Preference Reversals between Joint and Separate Evaluations of Alternatives}}, + author = {Hsee, Christopher K}, + number = {3}, + pages = {247--257}, + volume = {67} +} + +@misc{BernankeTheTransnission, + title = {{The Federal Funds Rate and the Channels of Monetary Transnission}}, + author = {Bernanke, Ben S}, + publisher = {National Bureau of Economic Research Cambridge, Mass., USA} +} + +@article{LernerTheSadness, + title = {{The Financial Costs of Sadness}}, + author = {Lerner, Jennifer S and Li, Ye and Weber, Elke U}, + number = {1}, + pages = {72--79}, + volume = {24} +} + +@article{TverskyTheChoice, + title = {{The Framing of Decisions and the Psychology of Choice}}, + author = {Tversky, Amos and Kahneman, Daniel}, + number = {4481}, + pages = {453--458}, + volume = {211} +} + +@techreport{ChouldechovaTheLearning, + title = {{The Frontiers of Fairness in Machine Learning}}, + author = {Chouldechova, Alexandra and Roth, Aaron}, + url = {http://arxiv.org/abs/1810.08810}, + institution = {arXiv}, + doi = {10.48550/arXiv.1810.08810}, + keywords = {Computer Science - Computer Science and Game Theory, Computer Science - Data Structures and Algorithms, Computer Science - Machine Learning, Statistics - Machine Learning} +} + +@article{BholatTheBanking, + title = {{The Impact of Covid on Machine Learning and Data Science in UK Banking}}, + author = {Bholat, D and Gharbawi, M and Thew, O} +} + +@article{GoodfriendTheDisinflation, + title = {{The Incredible Volcker Disinflation}}, + author = {Goodfriend, Marvin and King, Robert G}, + number = {5}, + pages = {981--1015}, + volume = {52} +} + +@unpublished{BastounisTheNetworks, + title = {{The Mathematics of Adversarial Attacks in AI–Why Deep Learning Is Unstable despite the Existence of Stable Neural Networks}}, + author = {Bastounis, Alexander and Hansen, Anders C and Vla{\v{c}}i{\'{c}}, Verner}, + arxivId = {2109.06098} +} + +@unpublished{ParrTheLearning, + title = {{The Matrix Calculus You Need for Deep Learning}}, + author = {Parr, Terence and Howard, Jeremy}, + arxivId = {1802.01528} +} + +@article{LeCunTheDigits, + title = {{The MNIST Database of Handwritten Digits}}, + author = {LeCun, Yann} +} + +@article{MischelTheGratification., + title = {{The Nature of Adolescent Competencies Predicted by Preschool Delay of Gratification.}}, + author = {Mischel, Walter and Shoda, Yuichi and Peake, Philip K}, + number = {4}, + pages = {687}, + volume = {54} +} + +@article{DellTheMita, + title = {{The Persistent Effects of Peru's Mining Mita}}, + author = {Dell, Melissa}, + number = {6}, + pages = {1863--1903}, + volume = {78} +} + +@article{MadrianTheBehavior, + title = {{The Power of Suggestion: Inertia in 401 (k) Participation and Savings Behavior}}, + author = {Madrian, Brigitte C and Shea, Dennis F}, + number = {4}, + pages = {1149--1187}, + volume = {116} +} + +@article{PearlTheLearning, + title = {{The Seven Tools of Causal Inference, with Reflections on Machine Learning}}, + author = {Pearl, Judea}, + number = {3}, + pages = {54--60}, + volume = {62} +} + +@article{EpsteinTheTime., + title = {{The Stability of Behavior: I. On Predicting Most of the People Much of the Time.}}, + author = {Epstein, Seymour}, + number = {7}, + pages = {1097}, + volume = {37} +} + +@article{GneezyTheOutcome, + title = {{The Uncertainty Effect: When a Risky Prospect Is Valued Less than Its Worst Possible Outcome}}, + author = {Gneezy, Uri and List, John A and Wu, George}, + number = {4}, + pages = {1283--1309}, + volume = {121} +} + +@article{HansenTheTrading, + title = {{The Virtue of Simplicity: On Machine Learning Models in Algorithmic Trading}}, + author = {Hansen, Kristian Bondo}, + number = {1}, + pages = {2053951720926558}, + volume = {7} +} + +@inproceedings{GuptaThompsonBandits, + title = {{Thompson Sampling for Dynamic Multi-Armed Bandits}}, + booktitle = {2011 10th International Conference on Machine Learning and Applications and Workshops}, + author = {Gupta, Neha and Granmo, Ole-Christoffer and Agrawala, Ashok}, + pages = {484--489}, + volume = {1}, + publisher = {IEEE} +} + +@book{HamiltonTimeAnalysis, + title = {{Time Series Analysis}}, + author = {Hamilton, James Douglas}, + publisher = {Princeton university press} +} + +@article{ZhangTimeModel, + title = {{Time Series Forecasting Using a Hybrid ARIMA and Neural Network Model}}, + author = {Zhang, G Peter}, + pages = {159--175}, + volume = {50} +} + +@article{KydlandTimeFluctuations, + title = {{Time to Build and Aggregate Fluctuations}}, + author = {Kydland, Finn E and Prescott, Edward C}, + pages = {1345--1370} +} + +@article{ArielyTomValue, + title = {{Tom Sawyer and the Construction of Value}}, + author = {Ariely, Dan and Loewenstein, George and Prelec, Drazen}, + number = {1}, + pages = {1--10}, + volume = {60} +} + +@inproceedings{CarliniTowardsNetworks, + title = {{Towards Evaluating the Robustness of Neural Networks}}, + booktitle = {2017 Ieee Symposium on Security and Privacy (Sp)}, + author = {Carlini, Nicholas and Wagner, David}, + pages = {39--57}, + publisher = {IEEE} +} + +@unpublished{JoshiTowardsSystems, + title = {{Towards Realistic Individual Recourse and Actionable Explanations in Black-Box Decision Making Systems}}, + author = {Joshi, Shalmali and Koyejo, Oluwasanmi and Vijitbenjaronk, Warut and Kim, Been and Ghosh, Joydeep}, + arxivId = {1907.09615} +} + +@unpublished{UpadhyayTowardsRecourse, + title = {{Towards Robust and Reliable Algorithmic Recourse}}, + author = {Upadhyay, Sohini and Joshi, Shalmali and Lakkaraju, Himabindu}, + arxivId = {2102.13620} +} + +@book{VarshneyTrustworthyLearning, + title = {{Trustworthy Machine Learning}}, + author = {Varshney, Kush R}, + publisher = {Independently Published} +} + +@misc{AngelopoulosUncertaintyPrediction, + title = {{Uncertainty Sets for Image Classifiers Using Conformal Prediction}}, + author = {Angelopoulos, Anastasios and Bates, Stephen and Malik, Jitendra and Jordan, Michael I}, + number = {arXiv:2009.14193}, + publisher = {arXiv}, + url = {http://arxiv.org/abs/2009.14193}, + arxivId = {2009.14193}, + keywords = {Computer Science - Computer Vision and Pattern Recognition, Mathematics - Statistics Theory, Statistics - Machine Learning} +} + +@article{NagelUnravelingStudy, + title = {{Unraveling in Guessing Games: An Experimental Study}}, + author = {Nagel, Rosemarie}, + number = {5}, + pages = {1313--1326}, + volume = {85} +} + +@article{PfaffVARVars, + title = {{VAR, SVAR and SVEC Models: Implementation within R Package Vars}}, + author = {Pfaff, Bernhard and {others}}, + number = {4}, + pages = {1--32}, + volume = {27} +} + +@article{CrawfordVariableStudy, + title = {{Variable Prioritization in Nonlinear Black Box Methods: A Genetic Association Case Study}}, + author = {Crawford, Lorin and Flaxman, Seth R and Runcie, Daniel E and West, Mike}, + number = {2}, + pages = {958}, + volume = {13} +} + +@misc{LawrenceVariationalModels, + title = {{Variational Inference in Probabilistic Models}}, + author = {Lawrence, Neil David}, + institution = {University of Cambridge} +} + +@article{MigutVisualizing2D, + title = {{Visualizing Multi-Dimensional Decision Boundaries in 2D}}, + author = {Migut, M A and Worring, Marcel and Veenman, Cor J}, + number = {1}, + pages = {273--295}, + volume = {29} +} + +@book{ONeilWeaponsDemocracy, + title = {{Weapons of Math Destruction: How Big Data Increases Inequality and Threatens Democracy}}, + author = {O'Neil, Cathy}, + publisher = {Crown} +} + +@inproceedings{BlundellWeightNetwork, + title = {{Weight Uncertainty in Neural Network}}, + booktitle = {International Conference on Machine Learning}, + author = {Blundell, Charles and Cornebise, Julien and Kavukcuoglu, Koray and Wierstra, Daan}, + pages = {1613--1622}, + publisher = {PMLR} +} + +@unpublished{KendallWhatVision, + title = {{What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?}}, + author = {Kendall, Alex and Gal, Yarin}, + arxivId = {1703.04977} +} + +@article{SlovicWhoAxiom, + title = {{Who Accepts Savage's Axiom?}}, + author = {Slovic, Paul and Tversky, Amos}, + number = {6}, + pages = {368--373}, + volume = {19} +} + +@unpublished{GrinsztajnWhyData, + title = {{Why Do Tree-Based Models Still Outperform Deep Learning on Tabular Data?}}, + author = {Grinsztajn, Léo and Oyallon, Edouard and Varoquaux, Gaël}, + arxivId = {2207.08815} +} + +@inproceedings{GrathwohlYourOne, + title = {{Your classifier is secretly an energy based model and you should treat it like one}}, + author = {Grathwohl, Will and Wang, Kuan-Chieh and Jacobsen, Joern-Henrik and Duvenaud, David and Norouzi, Mohammad and Swersky, Kevin}, + url = {https://openreview.net/forum?id=Hkxzx0NtDB}, + language = {en} +} + +@article{ArielyCoherentPreferences, + title = {{``Coherent Arbitrariness'': Stable Demand Curves without Stable Preferences}}, + author = {Ariely, Dan and Loewenstein, George and Prelec, Drazen}, + number = {1}, + pages = {73--106}, + volume = {118} +} \ No newline at end of file