Skip to content
Snippets Groups Projects
bib.bib 130 KiB
Newer Older
Pat Alt's avatar
Pat Alt committed
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837
  eprinttype    = {arxiv},
  year          = {2021},
}

@Book{pearl2018book,
  author        = {Pearl, Judea and Mackenzie, Dana},
  title         = {The Book of Why: The New Science of Cause and Effect},
  publisher     = {{Basic books}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2018},
}

@Article{pearl2019seven,
  author        = {Pearl, Judea},
  title         = {The Seven Tools of Causal Inference, with Reflections on Machine Learning},
  number        = {3},
  pages         = {54--60},
  volume        = {62},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Communications of the ACM},
  year          = {2019},
}

@Article{pedregosa2011scikitlearn,
  author        = {Pedregosa, Fabian and Varoquaux, Ga{\"e}l and Gramfort, Alexandre and Michel, Vincent and Thirion, Bertrand and Grisel, Olivier and Blondel, Mathieu and Prettenhofer, Peter and Weiss, Ron and Dubourg, Vincent and others},
  title         = {Scikit-Learn: {{Machine}} Learning in {{Python}}},
  pages         = {2825--2830},
  volume        = {12},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {the Journal of machine Learning research},
  year          = {2011},
}

@Book{perry2010economic,
  author        = {Perry, George L and Tobin, James},
  title         = {Economic {{Events}}, {{Ideas}}, and {{Policies}}: The 1960s and After},
  publisher     = {{Brookings Institution Press}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2010},
}

@Article{pfaff2008var,
  author        = {Pfaff, Bernhard and others},
  title         = {{{VAR}}, {{SVAR}} and {{SVEC}} Models: {{Implementation}} within {{R}} Package Vars},
  number        = {4},
  pages         = {1--32},
  volume        = {27},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Journal of Statistical Software},
  year          = {2008},
}

@Book{pindyck2014microeconomics,
  author        = {Pindyck, Robert S and Rubinfeld, Daniel L},
  title         = {Microeconomics},
  publisher     = {{Pearson Education}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2014},
}

@Article{pope2011numbers,
  author        = {Pope, Devin and Simonsohn, Uri},
  title         = {Round Numbers as Goals: {{Evidence}} from Baseball, {{SAT}} Takers, and the Lab},
  number        = {1},
  pages         = {71--79},
  volume        = {22},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Psychological science},
  year          = {2011},
}

@InProceedings{poyiadzi2020face,
  author        = {Poyiadzi, Rafael and Sokol, Kacper and Santos-Rodriguez, Raul and De Bie, Tijl and Flach, Peter},
  booktitle     = {Proceedings of the {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}},
  title         = {{{FACE}}: {{Feasible}} and Actionable Counterfactual Explanations},
  pages         = {344--350},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2020},
}

@Article{qu2015estimating,
  author        = {Qu, Xi and Lee, Lung-fei},
  title         = {Estimating a Spatial Autoregressive Model with an Endogenous Spatial Weight Matrix},
  number        = {2},
  pages         = {209--232},
  volume        = {184},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Journal of Econometrics},
  year          = {2015},
}

@Article{rabanser2019failing,
  author        = {Rabanser, Stephan and G{\"u}nnemann, Stephan and Lipton, Zachary},
  title         = {Failing Loudly: {{An}} Empirical Study of Methods for Detecting Dataset Shift},
  volume        = {32},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Advances in Neural Information Processing Systems},
  year          = {2019},
}

@Unpublished{raghunathan2019adversarial,
  author        = {Raghunathan, Aditi and Xie, Sang Michael and Yang, Fanny and Duchi, John C and Liang, Percy},
  title         = {Adversarial Training Can Hurt Generalization},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {1906.06032},
  eprinttype    = {arxiv},
  year          = {2019},
}

@Unpublished{raj2017taming,
  author        = {Raj, Vishnu and Kalyani, Sheetal},
  title         = {Taming Non-Stationary Bandits: {{A Bayesian}} Approach},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {1707.09727},
  eprinttype    = {arxiv},
  year          = {2017},
}

@InProceedings{rasmussen2003gaussian,
  author        = {Rasmussen, Carl Edward},
  booktitle     = {Summer School on Machine Learning},
  title         = {Gaussian Processes in Machine Learning},
  pages         = {63--71},
  publisher     = {{Springer}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2003},
}

@InProceedings{ribeiro2016why,
  author        = {Ribeiro, Marco Tulio and Singh, Sameer and Guestrin, Carlos},
  booktitle     = {Proceedings of the 22nd {{ACM SIGKDD}} International Conference on Knowledge Discovery and Data Mining},
  title         = {"{{Why}} Should i Trust You?" {{Explaining}} the Predictions of Any Classifier},
  pages         = {1135--1144},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2016},
}

@Article{romer1989does,
  author        = {Romer, Christina D and Romer, David H},
  title         = {Does Monetary Policy Matter? {{A}} New Test in the Spirit of {{Friedman}} and {{Schwartz}}},
  pages         = {121--170},
  volume        = {4},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {NBER macroeconomics annual},
  year          = {1989},
}

@Article{rudin2019stop,
  author        = {Rudin, Cynthia},
  title         = {Stop Explaining Black Box Machine Learning Models for High Stakes Decisions and Use Interpretable Models Instead},
  number        = {5},
  pages         = {206--215},
  volume        = {1},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Nature Machine Intelligence},
  year          = {2019},
}

@Article{sacerdote2001peer,
  author        = {Sacerdote, Bruce},
  title         = {Peer Effects with Random Assignment: {{Results}} for {{Dartmouth}} Roommates},
  number        = {2},
  pages         = {681--704},
  volume        = {116},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {The Quarterly journal of economics},
  year          = {2001},
}

@Article{sadinle2019least,
  author        = {Sadinle, Mauricio and Lei, Jing and Wasserman, Larry},
  title         = {Least Ambiguous Set-Valued Classifiers with Bounded Error Levels},
  number        = {525},
  pages         = {223--234},
  volume        = {114},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  file          = {:/Users/FA31DU/Zotero/storage/YXQ8N76A/Sadinle et al. - 2019 - Least ambiguous set-valued classifiers with bounde.pdf:;:/Users/FA31DU/Zotero/storage/ZHB56F3V/01621459.2017.html:},
  journal       = {Journal of the American Statistical Association},
  publisher     = {{Taylor \& Francis}},
  year          = {2019},
}

@InProceedings{satopaa2011finding,
  author        = {Satopaa, Ville and Albrecht, Jeannie and Irwin, David and Raghavan, Barath},
  booktitle     = {2011 31st International Conference on Distributed Computing Systems Workshops},
  title         = {Finding a" Kneedle" in a Haystack: {{Detecting}} Knee Points in System Behavior},
  pages         = {166--171},
  publisher     = {{IEEE}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2011},
}

@InProceedings{schut2021generating,
  author        = {Schut, Lisa and Key, Oscar and Mc Grath, Rory and Costabello, Luca and Sacaleanu, Bogdan and Gal, Yarin and others},
  booktitle     = {International {{Conference}} on {{Artificial Intelligence}} and {{Statistics}}},
  title         = {Generating {{Interpretable Counterfactual Explanations By Implicit Minimisation}} of {{Epistemic}} and {{Aleatoric Uncertainties}}},
  pages         = {1756--1764},
  publisher     = {{PMLR}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2021},
}

@Book{schutze2008introduction,
  author        = {Sch{\"u}tze, Hinrich and Manning, Christopher D and Raghavan, Prabhakar},
  title         = {Introduction to Information Retrieval},
  publisher     = {{Cambridge University Press Cambridge}},
  volume        = {39},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2008},
}

@Article{shafir1993reasonbased,
  author        = {Shafir, Eldar and Simonson, Itamar and Tversky, Amos},
  title         = {Reason-Based Choice},
  number        = {1-2},
  pages         = {11--36},
  volume        = {49},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Cognition},
  year          = {1993},
}

@Article{simonson1989choice,
  author        = {Simonson, Itamar},
  title         = {Choice Based on Reasons: {{The}} Case of Attraction and Compromise Effects},
  number        = {2},
  pages         = {158--174},
  volume        = {16},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Journal of consumer research},
  year          = {1989},
}

@Article{sims1986are,
  author        = {Sims, Christopher A and others},
  title         = {Are Forecasting Models Usable for Policy Analysis?},
  issue         = {Win},
  pages         = {2--16},
  volume        = {10},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Quarterly Review},
  year          = {1986},
}

@InProceedings{slack2020fooling,
  author        = {Slack, Dylan and Hilgard, Sophie and Jia, Emily and Singh, Sameer and Lakkaraju, Himabindu},
  booktitle     = {Proceedings of the {{AAAI}}/{{ACM Conference}} on {{AI}}, {{Ethics}}, and {{Society}}},
  title         = {Fooling Lime and Shap: {{Adversarial}} Attacks on Post Hoc Explanation Methods},
  pages         = {180--186},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2020},
}

@Article{slack2021counterfactual,
  author        = {Slack, Dylan and Hilgard, Anna and Lakkaraju, Himabindu and Singh, Sameer},
  title         = {Counterfactual Explanations Can Be Manipulated},
  volume        = {34},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Advances in Neural Information Processing Systems},
  year          = {2021},
}

@Article{slovic1974who,
  author        = {Slovic, Paul and Tversky, Amos},
  title         = {Who Accepts {{Savage}}'s Axiom?},
  number        = {6},
  pages         = {368--373},
  volume        = {19},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Behavioral science},
  year          = {1974},
}

@Unpublished{spooner2021counterfactual,
  author        = {Spooner, Thomas and Dervovic, Danial and Long, Jason and Shepard, Jon and Chen, Jiahao and Magazzeni, Daniele},
  title         = {Counterfactual {{Explanations}} for {{Arbitrary Regression Models}}},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {2106.15212},
  eprinttype    = {arxiv},
  shortjournal  = {arXiv preprint arXiv:2106.15212},
  year          = {2021},
}

@Article{srivastava2014dropout,
  author        = {Srivastava, Nitish and Hinton, Geoffrey and Krizhevsky, Alex and Sutskever, Ilya and Salakhutdinov, Ruslan},
  title         = {Dropout: A Simple Way to Prevent Neural Networks from Overfitting},
  number        = {1},
  pages         = {1929--1958},
  volume        = {15},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {The journal of machine learning research},
  year          = {2014},
}

@Unpublished{stanton2022bayesian,
  author        = {Stanton, Samuel and Maddox, Wesley and Wilson, Andrew Gordon},
  title         = {Bayesian {{Optimization}} with {{Conformal Coverage Guarantees}}},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {2210.12496},
  eprinttype    = {arxiv},
  file          = {:/Users/FA31DU/Zotero/storage/XFGZAB9J/Stanton et al. - 2022 - Bayesian Optimization with Conformal Coverage Guar.pdf:;:/Users/FA31DU/Zotero/storage/RPWYDPVW/2210.html:},
  year          = {2022},
}

@Article{sturm2014simple,
  author        = {Sturm, Bob L},
  title         = {A Simple Method to Determine If a Music Information Retrieval System Is a ``Horse''},
  number        = {6},
  pages         = {1636--1644},
  volume        = {16},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {IEEE Transactions on Multimedia},
  year          = {2014},
}

@Article{sunstein2003libertarian,
  author        = {Sunstein, Cass R and Thaler, Richard H},
  title         = {Libertarian Paternalism Is Not an Oxymoron},
  pages         = {1159--1202},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {The University of Chicago Law Review},
  year          = {2003},
}

@Book{sutton2018reinforcement,
  author        = {Sutton, Richard S and Barto, Andrew G},
  title         = {Reinforcement Learning: {{An}} Introduction},
  publisher     = {{MIT press}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2018},
}

@Unpublished{szegedy2013intriguing,
  author        = {Szegedy, Christian and Zaremba, Wojciech and Sutskever, Ilya and Bruna, Joan and Erhan, Dumitru and Goodfellow, Ian and Fergus, Rob},
  title         = {Intriguing Properties of Neural Networks},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {1312.6199},
  eprinttype    = {arxiv},
  year          = {2013},
}

@Article{thaler1981empirical,
  author        = {Thaler, Richard},
  title         = {Some Empirical Evidence on Dynamic Inconsistency},
  number        = {3},
  pages         = {201--207},
  volume        = {8},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Economics letters},
  year          = {1981},
}

@Article{thaler2004more,
  author        = {Thaler, Richard H and Benartzi, Shlomo},
  title         = {Save More Tomorrow{\texttrademark}: {{Using}} Behavioral Economics to Increase Employee Saving},
  number        = {S1},
  pages         = {S164--S187},
  volume        = {112},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Journal of political Economy},
  year          = {2004},
}

@Article{tversky1981framing,
  author        = {Tversky, Amos and Kahneman, Daniel},
  title         = {The Framing of Decisions and the Psychology of Choice},
  number        = {4481},
  pages         = {453--458},
  volume        = {211},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Science (New York, N.Y.)},
  shortjournal  = {science},
  year          = {1981},
}

@Article{ungemach2011how,
  author        = {Ungemach, Christoph and Stewart, Neil and Reimers, Stian},
  title         = {How Incidental Values from the Environment Affect Decisions about Money, Risk, and Delay},
  number        = {2},
  pages         = {253--260},
  volume        = {22},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Psychological Science},
  year          = {2011},
}

@Unpublished{upadhyay2021robust,
  author        = {Upadhyay, Sohini and Joshi, Shalmali and Lakkaraju, Himabindu},
  title         = {Towards {{Robust}} and {{Reliable Algorithmic Recourse}}},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {2102.13620},
  eprinttype    = {arxiv},
  year          = {2021},
}

@InProceedings{ustun2019actionable,
  author        = {Ustun, Berk and Spangher, Alexander and Liu, Yang},
  booktitle     = {Proceedings of the {{Conference}} on {{Fairness}}, {{Accountability}}, and {{Transparency}}},
  title         = {Actionable Recourse in Linear Classification},
  pages         = {10--19},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2019},
}

@Article{vanboven2000egocentric,
  author        = {Van Boven, Leaf and Dunning, David and Loewenstein, George},
  title         = {Egocentric Empathy Gaps between Owners and Buyers: Misperceptions of the Endowment Effect.},
  number        = {1},
  pages         = {66},
  volume        = {79},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Journal of personality and social psychology},
  year          = {2000},
}

@Book{varshney2022trustworthy,
  author        = {Varshney, Kush R.},
  title         = {Trustworthy {{Machine Learning}}},
  publisher     = {{Independently Published}},
  address       = {{Chappaqua, NY, USA}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2022},
}

@Unpublished{verma2020counterfactual,
  author        = {Verma, Sahil and Dickerson, John and Hines, Keegan},
  title         = {Counterfactual Explanations for Machine Learning: {{A}} Review},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {2010.10596},
  eprinttype    = {arxiv},
  year          = {2020},
}

@Article{verstyuk2020modeling,
  author        = {Verstyuk, Sergiy},
  title         = {Modeling Multivariate Time Series in Economics: {{From}} Auto-Regressions to Recurrent Neural Networks},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Available at SSRN 3589337},
  year          = {2020},
}

@Article{wachter2017counterfactual,
  author        = {Wachter, Sandra and Mittelstadt, Brent and Russell, Chris},
  title         = {Counterfactual Explanations without Opening the Black Box: {{Automated}} Decisions and the {{GDPR}}},
  pages         = {841},
  volume        = {31},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Harv. JL \& Tech.},
  year          = {2017},
}

@Article{wang2018optimal,
  author        = {Wang, HaiYing and Zhu, Rong and Ma, Ping},
  title         = {Optimal Subsampling for Large Sample Logistic Regression},
  number        = {522},
  pages         = {829--844},
  volume        = {113},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Journal of the American Statistical Association},
  year          = {2018},
}

@Book{wasserman2006all,
  author        = {Wasserman, Larry},
  title         = {All of Nonparametric Statistics},
  publisher     = {{Springer Science \& Business Media}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2006},
}

@Book{wasserman2013all,
  author        = {Wasserman, Larry},
  title         = {All of Statistics: A Concise Course in Statistical Inference},
  publisher     = {{Springer Science \& Business Media}},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  year          = {2013},
}

@Article{widmer1996learning,
  author        = {Widmer, Gerhard and Kubat, Miroslav},
  title         = {Learning in the Presence of Concept Drift and Hidden Contexts},
  number        = {1},
  pages         = {69--101},
  volume        = {23},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Machine learning},
  year          = {1996},
}

@Unpublished{wilson2020case,
  author        = {Wilson, Andrew Gordon},
  title         = {The Case for {{Bayesian}} Deep Learning},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {2001.10995},
  eprinttype    = {arxiv},
  year          = {2020},
}

@Article{witten2009penalized,
  author        = {Witten, Daniela M and Tibshirani, Robert and Hastie, Trevor},
  title         = {A Penalized Matrix Decomposition, with Applications to Sparse Principal Components and Canonical Correlation Analysis},
  number        = {3},
  pages         = {515--534},
  volume        = {10},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Biostatistics (Oxford, England)},
  shortjournal  = {Biostatistics},
  year          = {2009},
}

@Article{xu2020epidemiological,
  author        = {Xu, Bo and Gutierrez, Bernardo and Mekaru, Sumiko and Sewalk, Kara and Goodwin, Lauren and Loskill, Alyssa and Cohn, Emily and Hswen, Yulin and Hill, Sarah C. and Cobo, Maria M and Zarebski, Alexander and Li, Sabrina and Wu, Chieh-Hsi and Hulland, Erin and Morgan, Julia and Wang, Lin and O'Brien, Katelynn and Scarpino, Samuel V. and Brownstein, John S. and Pybus, Oliver G. and Pigott, David M. and Kraemer, Moritz U. G.},
  title         = {Epidemiological Data from the {{COVID-19}} Outbreak, Real-Time Case Information},
  doi           = {doi.org/10.1038/s41597-020-0448-0},
  number        = {106},
  volume        = {7},
  bdsk-url-1    = {https://doi.org/10.1038/s41597-020-0448-0},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Scientific Data},
  year          = {2020},
}

@Article{yeh2009comparisons,
  author        = {Yeh, I-Cheng and Lien, Che-hui},
  title         = {The Comparisons of Data Mining Techniques for the Predictive Accuracy of Probability of Default of Credit Card Clients},
  number        = {2},
  pages         = {2473--2480},
  volume        = {36},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Expert systems with applications},
  year          = {2009},
}

@Article{zhang1998forecasting,
  author        = {Zhang, Guoqiang and Patuwo, B Eddy and Hu, Michael Y},
  title         = {Forecasting with Artificial Neural Networks:: {{The}} State of the Art},
  number        = {1},
  pages         = {35--62},
  volume        = {14},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {International journal of forecasting},
  year          = {1998},
}

@Article{zhang2003time,
  author        = {Zhang, G Peter},
  title         = {Time Series Forecasting Using a Hybrid {{ARIMA}} and Neural Network Model},
  pages         = {159--175},
  volume        = {50},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {Neurocomputing},
  year          = {2003},
}

@Unpublished{zheng2018dags,
  author        = {Zheng, Xun and Aragam, Bryon and Ravikumar, Pradeep and Xing, Eric P},
  title         = {Dags with No Tears: {{Continuous}} Optimization for Structure Learning},
  archiveprefix = {arXiv},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  eprint        = {1803.01422},
  eprinttype    = {arxiv},
  year          = {2018},
}

@Article{zhu2015optimal,
  author        = {Zhu, Rong and Ma, Ping and Mahoney, Michael W and Yu, Bin},
  title         = {Optimal Subsampling Approaches for Large Sample Linear Regression},
  pages         = {arXiv--1509},
  date-added    = {2022-12-13 12:58:01 +0100},
  date-modified = {2022-12-13 12:58:01 +0100},
  journal       = {arXiv},
  year          = {2015},
}

@Article{barber2021predictive,
  author    = {Barber, Rina Foygel and Candès, Emmanuel J. and Ramdas, Aaditya and Tibshirani, Ryan J.},
  title     = {Predictive inference with the jackknife+},
  doi       = {10.1214/20-AOS1965},
  issn      = {0090-5364, 2168-8966},
  number    = {1},
  pages     = {486--507},
  urldate   = {2022-12-13},
  volume    = {49},
  abstract  = {This paper introduces the jackknife+, which is a novel method for constructing predictive confidence intervals. Whereas the jackknife outputs an interval centered at the predicted response of a test point, with the width of the interval determined by the quantiles of leave-one-out residuals, the jackknife+ also uses the leave-one-out predictions at the test point to account for the variability in the fitted regression function. Assuming exchangeable training samples, we prove that this crucial modification permits rigorous coverage guarantees regardless of the distribution of the data points, for any algorithm that treats the training points symmetrically. Such guarantees are not possible for the original jackknife and we demonstrate examples where the coverage rate may actually vanish. Our theoretical and empirical analysis reveals that the jackknife and the jackknife+ intervals achieve nearly exact coverage and have similar lengths whenever the fitting algorithm obeys some form of stability. Further, we extend the jackknife+ to \$K\$-fold cross validation and similarly establish rigorous coverage properties. Our methods are related to cross-conformal prediction proposed by Vovk (Ann. Math. Artif. Intell. 74 (2015) 9–28) and we discuss connections.},
  file      = {:Barber2021 - Predictive Inference with the Jackknife+.pdf:PDF},
  journal   = {The Annals of Statistics},
  keywords  = {62F40, 62G08, 62G09, conformal inference, cross-validation, distribution-free, jackknife, leave-one-out, stability},
  month     = feb,
  publisher = {Institute of Mathematical Statistics},
  year      = {2021},
}

@TechReport{chouldechova2018frontiers,
  author        = {Chouldechova, Alexandra and Roth, Aaron},
  title         = {The {Frontiers} of {Fairness} in {Machine} {Learning}},
  doi           = {10.48550/arXiv.1810.08810},
  eprint        = {1810.08810},
  note          = {arXiv:1810.08810 [cs, stat] type: article},
  abstract      = {The last few years have seen an explosion of academic and popular interest in algorithmic fairness. Despite this interest and the volume and velocity of work that has been produced recently, the fundamental science of fairness in machine learning is still in a nascent state. In March 2018, we convened a group of experts as part of a CCC visioning workshop to assess the state of the field, and distill the most promising research directions going forward. This report summarizes the findings of that workshop. Along the way, it surveys recent theoretical work in the field and points towards promising directions for research.},
  archiveprefix = {arxiv},
  file          = {:chouldechova2018frontiers - The Frontiers of Fairness in Machine Learning.pdf:PDF},
  keywords      = {Computer Science - Machine Learning, Computer Science - Data Structures and Algorithms, Computer Science - Computer Science and Game Theory, Statistics - Machine Learning},
  month         = oct,
  school        = {arXiv},
  year          = {2018},
}

@Article{pawelczyk2022probabilistically,
  author     = {Pawelczyk, Martin and Datta, Teresa and van-den-Heuvel, Johannes and Kasneci, Gjergji and Lakkaraju, Himabindu},
  title      = {Probabilistically {Robust} {Recourse}: {Navigating} the {Trade}-offs between {Costs} and {Robustness} in {Algorithmic} {Recourse}},
  file       = {:pawelczyk2022probabilistically - Probabilistically Robust Recourse_ Navigating the Trade Offs between Costs and Robustness in Algorithmic Recourse.pdf:PDF},
  journal    = {arXiv preprint arXiv:2203.06768},
  shorttitle = {Probabilistically {Robust} {Recourse}},
  year       = {2022},
}

@InProceedings{stutz2022learning,
  author   = {Stutz, David and Dvijotham, Krishnamurthy Dj and Cemgil, Ali Taylan and Doucet, Arnaud},
  title    = {Learning {Optimal} {Conformal} {Classifiers}},
  language = {en},
  url      = {https://openreview.net/forum?id=t8O-4LKFVx},
  urldate  = {2023-02-13},
  abstract = {Modern deep learning based classifiers show very high accuracy on test data but this does not provide sufficient guarantees for safe deployment, especially in high-stake AI applications such as medical diagnosis. Usually, predictions are obtained without a reliable uncertainty estimate or a formal guarantee. Conformal prediction (CP) addresses these issues by using the classifier's predictions, e.g., its probability estimates, to predict confidence sets containing the true class with a user-specified probability. However, using CP as a separate processing step after training prevents the underlying model from adapting to the prediction of confidence sets. Thus, this paper explores strategies to differentiate through CP during training with the goal of training model with the conformal wrapper end-to-end. In our approach, conformal training (ConfTr), we specifically "simulate" conformalization on mini-batches during training. Compared to standard training, ConfTr reduces the average confidence set size (inefficiency) of state-of-the-art CP methods applied after training. Moreover, it allows to "shape" the confidence sets predicted at test time, which is difficult for standard CP. On experiments with several datasets, we show ConfTr can influence how inefficiency is distributed across classes, or guide the composition of confidence sets in terms of the included classes, while retaining the guarantees offered by CP.},
  file     = {:stutz2022learning - Learning Optimal Conformal Classifiers.pdf:PDF},
  month    = may,
  year     = {2022},
}

@InProceedings{grathwohl2020your,
  author   = {Grathwohl, Will and Wang, Kuan-Chieh and Jacobsen, Joern-Henrik and Duvenaud, David and Norouzi, Mohammad and Swersky, Kevin},
  title    = {Your classifier is secretly an energy based model and you should treat it like one},
  language = {en},
  url      = {https://openreview.net/forum?id=Hkxzx0NtDB},
  urldate  = {2023-02-13},
  abstract = {We propose to reinterpret a standard discriminative classifier of p(y{\textbar}x) as an energy based model for the joint distribution p(x, y). In this setting, the standard class probabilities can be easily computed as well as unnormalized values of p(x) and p(x{\textbar}y). Within this framework, standard discriminative architectures may be used and the model can also be trained on unlabeled data. We demonstrate that energy based training of the joint distribution improves calibration, robustness, and out-of-distribution detection while also enabling our models to generate samples rivaling the quality of recent GAN approaches. We improve upon recently proposed techniques for scaling up the training of energy based models and present an approach which adds little overhead compared to standard classification training. Our approach is the first to achieve performance rivaling the state-of-the-art in both generative and discriminative learning within one hybrid model.},
  file     = {:grathwohl2020your - Your Classifier Is Secretly an Energy Based Model and You Should Treat It like One.pdf:PDF},
  month    = mar,
  year     = {2020},
}

@Book{murphy2023probabilistic,
  author     = {Murphy, Kevin P.},
  date       = {2023},
  title      = {Probabilistic machine learning: {Advanced} topics},
  publisher  = {MIT Press},
  shorttitle = {Probabilistic machine learning},
}

@TechReport{artelt2021evaluating,
  author      = {Artelt, André and Vaquet, Valerie and Velioglu, Riza and Hinder, Fabian and Brinkrolf, Johannes and Schilling, Malte and Hammer, Barbara},
  date        = {2021-07},
  institution = {arXiv},
  title       = {Evaluating {Robustness} of {Counterfactual} {Explanations}},
  note        = {arXiv:2103.02354 [cs] type: article},
  url         = {http://arxiv.org/abs/2103.02354},
  urldate     = {2023-03-24},
  abstract    = {Transparency is a fundamental requirement for decision making systems when these should be deployed in the real world. It is usually achieved by providing explanations of the system's behavior. A prominent and intuitive type of explanations are counterfactual explanations. Counterfactual explanations explain a behavior to the user by proposing actions -- as changes to the input -- that would cause a different (specified) behavior of the system. However, such explanation methods can be unstable with respect to small changes to the input -- i.e. even a small change in the input can lead to huge or arbitrary changes in the output and of the explanation. This could be problematic for counterfactual explanations, as two similar individuals might get very different explanations. Even worse, if the recommended actions differ considerably in their complexity, one would consider such unstable (counterfactual) explanations as individually unfair. In this work, we formally and empirically study the robustness of counterfactual explanations in general, as well as under different models and different kinds of perturbations. Furthermore, we propose that plausible counterfactual explanations can be used instead of closest counterfactual explanations to improve the robustness and consequently the individual fairness of counterfactual explanations.},
  annotation  = {Comment: Rewrite paper to make things more clear; Remove one theorem \& corollary due to buggy proof},
  file        = {:artelt2021evaluating - Evaluating Robustness of Counterfactual Explanations.pdf:PDF},
  keywords    = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence},
}

@Article{guidotti2022counterfactual,
  author       = {Guidotti, Riccardo},
  date         = {2022-04},
  journaltitle = {Data Mining and Knowledge Discovery},
  title        = {Counterfactual explanations and how to find them: literature review and benchmarking},
  doi          = {10.1007/s10618-022-00831-6},
  issn         = {1573-756X},
  language     = {en},
  url          = {https://doi.org/10.1007/s10618-022-00831-6},
  urldate      = {2023-03-24},
  abstract     = {Interpretable machine learning aims at unveiling the reasons behind predictions returned by uninterpretable classifiers. One of the most valuable types of explanation consists of counterfactuals. A counterfactual explanation reveals what should have been different in an instance to observe a diverse outcome. For instance, a bank customer asks for a loan that is rejected. The counterfactual explanation consists of what should have been different for the customer in order to have the loan accepted. Recently, there has been an explosion of proposals for counterfactual explainers. The aim of this work is to survey the most recent explainers returning counterfactual explanations. We categorize explainers based on the approach adopted to return the counterfactuals, and we label them according to characteristics of the method and properties of the counterfactuals returned. In addition, we visually compare the explanations, and we report quantitative benchmarking assessing minimality, actionability, stability, diversity, discriminative power, and running time. The results make evident that the current state of the art does not provide a counterfactual explainer able to guarantee all these properties simultaneously.},
  file         = {Full Text PDF:https\://link.springer.com/content/pdf/10.1007%2Fs10618-022-00831-6.pdf:application/pdf},
  keywords     = {Explainable AI, Counterfactual explanations, Contrastive explanations, Interpretable machine learning},
  shorttitle   = {Counterfactual explanations and how to find them},
}

@TechReport{mahajan2020preserving,
  author      = {Mahajan, Divyat and Tan, Chenhao and Sharma, Amit},
  date        = {2020-06},
  institution = {arXiv},
  title       = {Preserving {Causal} {Constraints} in {Counterfactual} {Explanations} for {Machine} {Learning} {Classifiers}},
  doi         = {10.48550/arXiv.1912.03277},
  note        = {arXiv:1912.03277 [cs, stat] type: article},
  url         = {http://arxiv.org/abs/1912.03277},
  urldate     = {2023-03-24},
  abstract    = {To construct interpretable explanations that are consistent with the original ML model, counterfactual examples---showing how the model's output changes with small perturbations to the input---have been proposed. This paper extends the work in counterfactual explanations by addressing the challenge of feasibility of such examples. For explanations of ML models in critical domains such as healthcare and finance, counterfactual examples are useful for an end-user only to the extent that perturbation of feature inputs is feasible in the real world. We formulate the problem of feasibility as preserving causal relationships among input features and present a method that uses (partial) structural causal models to generate actionable counterfactuals. When feasibility constraints cannot be easily expressed, we consider an alternative mechanism where people can label generated CF examples on feasibility: whether it is feasible to intervene and realize the candidate CF example from the original input. To learn from this labelled feasibility data, we propose a modified variational auto encoder loss for generating CF examples that optimizes for feasibility as people interact with its output. Our experiments on Bayesian networks and the widely used ''Adult-Income'' dataset show that our proposed methods can generate counterfactual explanations that better satisfy feasibility constraints than existing methods.. Code repository can be accessed here: {\textbackslash}textit\{https://github.com/divyat09/cf-feasibility\}},
  annotation  = {Comment: 2019 NeurIPS Workshop on Do the right thing: Machine learning and Causal Inference for improved decision making},
  file        = {:mahajan2020preserving - Preserving Causal Constraints in Counterfactual Explanations for Machine Learning Classifiers.pdf:PDF},
  keywords    = {Computer Science - Machine Learning, Computer Science - Artificial Intelligence, Statistics - Machine Learning},
}

@TechReport{antoran2023sampling,
  author      = {Antorán, Javier and Padhy, Shreyas and Barbano, Riccardo and Nalisnick, Eric and Janz, David and Hernández-Lobato, José Miguel},
  date        = {2023-03},
  institution = {arXiv},
  title       = {Sampling-based inference for large linear models, with application to linearised {Laplace}},
  note        = {arXiv:2210.04994 [cs, stat] type: article},
  url         = {http://arxiv.org/abs/2210.04994},
  urldate     = {2023-03-25},
  abstract    = {Large-scale linear models are ubiquitous throughout machine learning, with contemporary application as surrogate models for neural network uncertainty quantification; that is, the linearised Laplace method. Alas, the computational cost associated with Bayesian linear models constrains this method's application to small networks, small output spaces and small datasets. We address this limitation by introducing a scalable sample-based Bayesian inference method for conjugate Gaussian multi-output linear models, together with a matching method for hyperparameter (regularisation) selection. Furthermore, we use a classic feature normalisation method (the g-prior) to resolve a previously highlighted pathology of the linearised Laplace method. Together, these contributions allow us to perform linearised neural network inference with ResNet-18 on CIFAR100 (11M parameters, 100 outputs x 50k datapoints), with ResNet-50 on Imagenet (50M parameters, 1000 outputs x 1.2M datapoints) and with a U-Net on a high-resolution tomographic reconstruction task (2M parameters, 251k output{\textasciitilde}dimensions).},
  annotation  = {Comment: Published at ICLR 2023. This latest Arxiv version is extended with a demonstration of the proposed methods on the Imagenet dataset},
  file        = {arXiv Fulltext PDF:https\://arxiv.org/pdf/2210.04994.pdf:application/pdf},
  keywords    = {Statistics - Machine Learning, Computer Science - Artificial Intelligence, Computer Science - Machine Learning},
}

@Misc{altmeyer2022conformal,
  author   = {Altmeyer, Patrick},
  date     = {2022-10},
  title    = {{Conformal} {Prediction} in {Julia}},
  language = {en},
  url      = {https://www.paltmeyer.com/blog/posts/conformal-prediction/},
  urldate  = {2023-03-27},
  abstract = {A (very) gentle introduction to Conformal Prediction in Julia using my new package ConformalPrediction.jl.},
}

@InProceedings{welling2011bayesian,
  author     = {Welling, M. and Teh, Y.},
  date       = {2011-06},
  title      = {Bayesian {Learning} via {Stochastic} {Gradient} {Langevin} {Dynamics}},
  url        = {https://www.semanticscholar.org/paper/Bayesian-Learning-via-Stochastic-Gradient-Langevin-Welling-Teh/aeed631d6a84100b5e9a021ec1914095c66de415},
  urldate    = {2023-05-15},
  abstract   = {In this paper we propose a new framework for learning from large scale datasets based on iterative learning from small mini-batches. By adding the right amount of noise to a standard stochastic gradient optimization algorithm we show that the iterates will converge to samples from the true posterior distribution as we anneal the stepsize. This seamless transition between optimization and Bayesian posterior sampling provides an inbuilt protection against overfitting. We also propose a practical method for Monte Carlo estimates of posterior statistics which monitors a "sampling threshold" and collects samples after it has been surpassed. We apply the method to three models: a mixture of Gaussians, logistic regression and ICA with natural gradients.},
  annotation = {[TLDR] This paper proposes a new framework for learning from large scale datasets based on iterative learning from small mini-batches by adding the right amount of noise to a standard stochastic gradient optimization algorithm and shows that the iterates will converge to samples from the true posterior distribution as the authors anneal the stepsize.},
  file       = {:welling_bayesian_2011 - Bayesian Learning Via Stochastic Gradient Langevin Dynamics.html:URL;:welling2011bayesian - Bayesian Learning Via Stochastic Gradient Langevin Dynamics.pdf:PDF},
}

@Article{gill2010circular,
  author       = {Gill, Jeff and Hangartner, Dominik},
  date         = {2010},
  journaltitle = {Political Analysis},
  title        = {Circular {Data} in {Political} {Science} and {How} to {Handle} {It}},
  doi          = {10.1093/pan/mpq009},
  issn         = {1047-1987, 1476-4989},
  language     = {en},
  number       = {3},
  pages        = {316--336},
  url          = {https://www.cambridge.org/core/journals/political-analysis/article/circular-data-in-political-science-and-how-to-handle-it/6DF2D9DA60C455E6A48FFB0FF011F747},
  urldate      = {2023-05-15},
  volume       = {18},
  abstract     = {There has been no attention to circular (purely cyclical) data in political science research. We show that such data exist and are mishandled by models that do not take into account the inherently recycling nature of some phenomenon. Clock and calendar effects are the obvious cases, but directional data are observed as well. We describe a standard maximum likelihood regression modeling framework based on the von Mises distribution, then develop a general Bayesian regression procedure for the first time, providing an easy-to-use Metropolis-Hastings sampler for this approach. Applications include a chronographic analysis of U.S. domestic terrorism and directional party preferences in a two-dimensional ideological space for German Bundestag elections. The results demonstrate the importance of circular models to handle periodic and directional data in political science.},
  file         = {Full Text PDF:https\://www.cambridge.org/core/services/aop-cambridge-core/content/view/6DF2D9DA60C455E6A48FFB0FF011F747/S1047198700012493a.pdf/div-class-title-circular-data-in-political-science-and-how-to-handle-it-div.pdf:application/pdf},
  publisher    = {Cambridge University Press},
}

@InProceedings{liu2023goggle,
  author     = {Liu, Tennison and Qian, Zhaozhi and Berrevoets, Jeroen and Schaar, Mihaela van der},
  date       = {2023-02},
  title      = {{GOGGLE}: {Generative} {Modelling} for {Tabular} {Data} by {Learning} {Relational} {Structure}},
  language   = {en},
  url        = {https://openreview.net/forum?id=fPVRcJqspu},
  urldate    = {2023-05-15},
  abstract   = {Deep generative models learn highly complex and non-linear representations to generate realistic synthetic data. While they have achieved notable success in computer vision and natural language processing, similar advances have been less demonstrable in the tabular domain. This is partially because generative modelling of tabular data entails a particular set of challenges, including heterogeneous relationships, limited number of samples, and difficulties in incorporating prior knowledge. Additionally, unlike their counterparts in image and sequence domain, deep generative models for tabular data almost exclusively employ fully-connected layers, which encode weak inductive biases about relationships between inputs. Real-world data generating processes can often be represented using relational structures, which encode sparse, heterogeneous relationships between variables. In this work, we learn and exploit relational structure underlying tabular data to better model variable dependence, and as a natural means to introduce regularization on relationships and include prior knowledge. Specifically, we introduce GOGGLE, an end-to-end message passing scheme that jointly learns the relational structure and corresponding functional relationships as the basis of generating synthetic samples. Using real-world datasets, we provide empirical evidence that the proposed method is effective in generating realistic synthetic data and exploiting domain knowledge for downstream tasks.},
  file       = {Full Text PDF:https\://openreview.net/pdf?id=fPVRcJqspu:application/pdf},
  shorttitle = {{GOGGLE}},
}

@TechReport{du2020implicit,
  author      = {Du, Yilun and Mordatch, Igor},
  date        = {2020-06},
  institution = {arXiv},
  title       = {Implicit {Generation} and {Generalization} in {Energy}-{Based} {Models}},
  doi         = {10.48550/arXiv.1903.08689},
  note        = {arXiv:1903.08689 [cs, stat] type: article},
  url         = {http://arxiv.org/abs/1903.08689},
  urldate     = {2023-05-16},
  abstract    = {Energy based models (EBMs) are appealing due to their generality and simplicity in likelihood modeling, but have been traditionally difficult to train. We present techniques to scale MCMC based EBM training on continuous neural networks, and we show its success on the high-dimensional data domains of ImageNet32x32, ImageNet128x128, CIFAR-10, and robotic hand trajectories, achieving better samples than other likelihood models and nearing the performance of contemporary GAN approaches, while covering all modes of the data. We highlight some unique capabilities of implicit generation such as compositionality and corrupt image reconstruction and inpainting. Finally, we show that EBMs are useful models across a wide variety of tasks, achieving state-of-the-art out-of-distribution classification, adversarially robust classification, state-of-the-art continual online class learning, and coherent long term predicted trajectory rollouts.},
  file        = {arXiv Fulltext PDF:https\://arxiv.org/pdf/1903.08689.pdf:application/pdf},
  keywords    = {Computer Science - Machine Learning, Computer Science - Computer Vision and Pattern Recognition, Statistics - Machine Learning},
}

@Comment{jabref-meta: databaseType:biblatex;}