156  |  Visualizing Fairness in Machine Learning with Yongsu Ahn and Alex Cabrera

43:04
 
Share
 

Manage episode 255289367 series 32120
By Enrico Bertini and Moritz Stefaner, Enrico Bertini, and Moritz Stefaner. Discovered by Player FM and our community — copyright is owned by the publisher, not Player FM, and audio is streamed directly from their servers. Hit the Subscribe button to track updates in Player FM, or paste the feed URL into other podcast apps.
document.addEventListener("DOMContentLoaded", function() { var player = document.getElementById("player-60f87bae6276f"); podlovePlayerCache.add([{"url":"https:\/\/datastori.es\/wp-json\/podlove-web-player\/shortcode\/publisher\/4482","data":{"version":5,"show":{"title":"Data Stories","subtitle":"A podcast on data and how it affects our lives \u2014 with Enrico Bertini and Moritz Stefaner","summary":"Enrico Bertini and Moritz Stefaner discuss the latest developments in data analytics, visualization and related topics.","poster":"https:\/\/datastori.es\/wp-content\/cache\/podlove\/ee\/2a473f91a0edbb253663696244c6f4\/data-stories_500x.png","link":"https:\/\/datastori.es"},"title":"156\u00a0\u00a0|\u00a0\u00a0Visualizing Fairness in Machine Learning with Yongsu Ahn and Alex Cabrera","subtitle":"","summary":"","publicationDate":"2020-03-05T04:30:03+01:00","duration":"00:43:04.265","poster":"https:\/\/datastori.es\/wp-content\/cache\/podlove\/ee\/2a473f91a0edbb253663696244c6f4\/data-stories_500x.png","link":"https:\/\/datastori.es\/156-fairness-in-machine-learning-with-yongsu-ahn-and-alex-cabrera\/","chapters":[{"start":"00:00:33.748","title":"Welcome to Data Stories!","href":"","image":""},{"start":"00:01:07.254","title":"Our podcast is listener-supported, please consider making a donation","href":"https:\/\/www.patreon.com\/datastories","image":""},{"start":"00:01:41.586","title":"Our topic today: Bias and fairness in machine learning","href":"","image":""},{"start":"00:02:48.944","title":"Our guests: Alex Cabrera","href":"https:\/\/cabreraalex.com\/","image":""},{"start":"00:03:14.053","title":"and Yongsu Ahn","href":"https:\/\/www.linkedin.com\/in\/ayong8\/","image":""},{"start":"00:03:54.006","title":"How to define 'fairness' and 'bias' in machine learning?","href":"","image":""},{"start":"00:08:49.364","title":"Examples of discriminitation in machine learning","href":"","image":""},{"start":"00:13:22.372","title":"What is FairSight?","href":"https:\/\/arxiv.org\/pdf\/1908.00176.pdf","image":""},{"start":"00:17:00.235","title":"What is FairVis?","href":"https:\/\/arxiv.org\/pdf\/1904.05419.pdf","image":""},{"start":"00:38:32.068","title":"Do you have advice on how to get started with the topic?","href":"","image":""},{"start":"00:52:10.938","title":"Get in touch with us and support us on Patreon","href":"https:\/\/www.patreon.com\/datastories","image":""}],"audio":[{"url":"https:\/\/datastori.es\/podlove\/file\/6575\/s\/webplayer\/c\/website\/156-fairness-in-machine-learning-with-yongsu-ahn-and-alex-cabrera.m4a","size":"32316509","title":"MPEG-4 AAC Audio (m4a)","mimeType":"audio\/mp4"},{"url":"https:\/\/datastori.es\/podlove\/file\/6576\/s\/webplayer\/c\/website\/156-fairness-in-machine-learning-with-yongsu-ahn-and-alex-cabrera.mp3","size":"41436780","title":"MP3 Audio (mp3)","mimeType":"audio\/mpeg"}],"files":[{"url":"https:\/\/datastori.es\/podlove\/file\/6576\/s\/webplayer\/156-fairness-in-machine-learning-with-yongsu-ahn-and-alex-cabrera.mp3","size":"41436780","title":"MP3 Audio","mimeType":"audio\/mpeg"},{"url":"https:\/\/datastori.es\/podlove\/file\/6575\/s\/webplayer\/156-fairness-in-machine-learning-with-yongsu-ahn-and-alex-cabrera.m4a","size":"32316509","title":"MPEG-4 AAC Audio","mimeType":"audio\/mp4"}],"contributors":[]}}, {"url":"https:\/\/datastori.es\/wp-json\/podlove-web-player\/shortcode\/config\/ds\/theme\/ds","data":{"activeTab":"chapters","subscribe-button":{"feed":"https:\/\/datastori.es\/feed\/podcast\/","clients":[{"id":"overcast","service":null},{"id":"stitcher","service":null},{"id":"spotify","service":null},{"id":"pocket-casts","service":null},{"id":"google-podcasts","service":null},{"id":"apple-podcasts","service":null},{"id":"rss","service":null}]},"share":{"channels":["facebook","twitter","whats-app","linkedin","pinterest","xing","mail","link"],"outlet":"https:\/\/datastori.es\/wp-content\/plugins\/podlove-web-player\/web-player\/share.html","sharePlaytime":true},"related-episodes":{"source":"disabled","value":null},"version":5,"theme":{"tokens":{"brand":"#5728b1","brandDark":"#47309b","brandDarkest":"#221064","brandLightest":"#FFF","shadeDark":"#47309b","shadeBase":"#5728b1","contrast":"#221064","alt":"#fff"},"fonts":{"ci":{"name":"","family":[" AvenirNext"," Avenir Next","Segoe UI","-apple-system","BlinkMacSystemFont","Roboto","Helvetica","Arial","sans-serif","Apple Color Emoji","Segoe UI Emoji\", \"Segoe UI Symbol"],"src":[],"weight":"600"},"regular":{"name":"regular","family":["AvenirNext","Avenir Next","Segoe UI","-apple-system","BlinkMacSystemFont","Roboto","Helvetica","Arial","sans-serif","Apple Color Emoji","Segoe UI Emoji\", \"Segoe UI Symbol"],"src":[],"weight":300},"bold":{"name":"bold","family":["AvenirNext","Avenir Next","-apple-system","BlinkMacSystemFont","Segoe UI","Roboto","Helvetica","Arial","sans-serif","Apple Color Emoji","Segoe UI Emoji\", \"Segoe UI Symbol"],"src":[],"weight":700}}},"base":"https:\/\/datastori.es\/wp-content\/plugins\/podlove-web-player\/web-player\/"}}]); podlovePlayer(player, "https://datastori.es/wp-json/podlove-web-player/shortcode/publisher/4482", "https://datastori.es/wp-json/podlove-web-player/shortcode/config/ds/theme/ds").then(function() { player && player.classList.remove("podlove-web-player-loading"); }); });

In this episode we have PhD students Yongsu Ahn and Alex Cabrera to talk about two separate data visualization systems they developed to help people analyze machine learning models in terms of potential biases they may have. The systems are called FairSight and FairVis and have slightly different goals. FairSight focuses on models that generate rankings (e.g., in school admissions) and FairVis more on comparison of fairness metrics. With them we explore the world of “machine bias” trying to understand what it is and how visualization can play a role in its detection and mitigation.

[Our podcast is fully listener-supported. That’s why you don’t have to listen to ads! Please consider becoming a supporter on Patreon or sending us a one-time donation through Paypal. And thank you!]

Enjoy the show!

Links:


Related episodes

Chapters

1. Welcome to Data Stories! (00:00:33)

2. Our podcast is listener-supported, please consider making a donation (00:01:07)

3. Our topic today: Bias and fairness in machine learning (00:01:41)

4. Our guests: Alex Cabrera (00:02:48)

5. and Yongsu Ahn (00:03:14)

6. How to define 'fairness' and 'bias' in machine learning? (00:03:54)

7. Examples of discriminitation in machine learning (00:08:49)

8. What is FairSight? (00:13:22)

9. What is FairVis? (00:17:00)

10. Do you have advice on how to get started with the topic? (00:38:32)

11. Get in touch with us and support us on Patreon (00:52:10)

169 episodes