The following selected bibliography lists works that challenge the traditional view of data, Geographic Information Systems (GIS) and mapping. Listed are works that critically analyze data studies, mapping and GIS from decolonial, antiracist, feminist, queer, LGBTQIA2S+ and disability perspectives.
Please also see our Canadian Queer Data Guide for data on Canada's 2SLGBTQIA+ populations.
Data | GIS & Mapping | AI & Algorithms
Data

<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FXLPHCSQ6%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FXLPHCSQ6%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FXLPHCSQ6%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
- renew the authorization for BibBase on Mendeley, and
- update the BibBase URL in your page the same way you did when you initially set up this page.
link bibtex
@book{keilty_queer_2024, address = {Seattle}, series = {Feminist {Technosciences} {Series}}, title = {Queer {Data} {Studies}}, isbn = {978-0-295-75198-6}, language = {eng}, publisher = {University of Washington Press}, editor = {Keilty, Patrick}, collaborator = {Herzig, Rebecca and Subramaniam, Banu}, year = {2024}, }
@misc{collier_celebrating_2024, title = {Celebrating {Indigenous}-{Led} {Data} {Initiatives}}, url = {https://hillnotes.ca/2024/06/21/celebrating-indigenous-led-data-initiatives/}, abstract = {On 21 June, National Indigenous Peoples Day, communities across Canada hold events to celebrate Indigenous cultures and contributions. To mark the occasion, this HillNote provides information about research and data collected about Indigenous Peoples, Indigenous data sovereignty, the federal government’s role in data collection and Indigenous-led data initiatives.}, language = {en-US}, urldate = {2024-06-27}, journal = {HillNotes}, author = {Collier, Brittany}, month = jun, year = {2024}, }
link bibtex abstract
@book{wiggins_how_2023, address = {New York, NY}, edition = {First edition.}, title = {How data happened: a history from the age of reason to the age of algorithms}, isbn = {978-1-324-00673-2}, shorttitle = {How data happened}, abstract = {From facial recognition--capable of checking people into flights or identifying undocumented residents--to automated decision systems that inform who gets loans and who receives bail, each of us moves through a world determined by data-empowered algorithms. But these technologies didn't just appear: they are part of a history that goes back centuries, from the census enshrined in the US Constitution to the birth of eugenics in Victorian Britain to the development of Google search. Expanding on the popular course they created at Columbia University, Chris Wiggins and Matthew L. Jones illuminate the ways in which data has long been used as a tool and a weapon in arguing for what is true, as well as a means of rearranging or defending power. They explore how data was created and curated, as well as how new mathematical and computational techniques developed to contend with that data serve to shape people, ideas, society, military operations, and economies. Although technology and mathematics are at its heart, the story of data ultimately concerns an unstable game among states, corporations, and people. How were new technical and scientific capabilities developed; who supported, advanced, or funded these capabilities or transitions; and how did they change who could do what, from what, and to whom? Wiggins and Jones focus on these questions as they trace data's historical arc, and look to the future. By understanding the trajectory of data--where it has been and where it might yet go--Wiggins and Jones argue that we can understand how to bend it to ends that we collectively choose, with intentionality and purpose -- provided by publisher.}, language = {eng}, publisher = {W.W. Norton \& Company}, author = {Wiggins, Chris H.}, collaborator = {Jones, Matthew L.}, year = {2023}, keywords = {Big data, Computer science, Documents d'information, Données volumineuses, Histoire, History, Informational works, Informatique, Mathematics, Mathématiques, Statistics}, }
@book{wong_we_2023, address = {Cambridge, Mass}, title = {We, the {Data}}, shorttitle = {We, the {Data}}, url = {https://www.penguinrandomhouse.com/books/730905/we-the-data-by-wendy-h-wong/}, abstract = {A rallying call for extending human rights beyond our physical selves—and why we need to reboot rights in our data-intensive world. Our data-intensive world is here to stay, but does that come at...}, language = {en-US}, urldate = {2023-04-11}, publisher = {MIT Press}, author = {Wong, Wendy H.}, year = {2023}, }
@misc{bjork_chatgpt_2023, title = {{ChatGPT} threatens language diversity. {More} needs to be done to protect our differences in the age of {AI}}, url = {http://theconversation.com/chatgpt-threatens-language-diversity-more-needs-to-be-done-to-protect-our-differences-in-the-age-of-ai-198878}, abstract = {When you ask ChatGPT to generate content, the default output is in the voice, style and language of white English-speaking men, who have long dominated many writing-intensive sectors.}, language = {en}, urldate = {2023-03-14}, journal = {The Conversation}, author = {Bjork, Collin}, month = feb, year = {2023}, }
@misc{center_for_antiracist_research_toward_2022, title = {{TOWARD} {EVIDENCE}-{BASED} {ANTIRACIST} {POLICYMAKING}: {Problems} and {Proposals} for {Better} {Racial} {Data} {Collection} and {Reporting}}, url = {https://www.bu.edu/antiracism-center/files/2022/06/Toward-Evidence-Based-Antiracist-Policymaking.pdf}, urldate = {2022-08-08}, publisher = {Center for Antiracist Research}, author = {{Center for Antiracist Research}}, month = may, year = {2022}, }
link bibtex
@book{quinless_decolonizing_2022, address = {Toronto Buffalo London}, title = {Decolonizing {Data}: {Unsettling} {Conversations} about {Social} {Research} {Methods}}, isbn = {978-1-4875-0440-3}, shorttitle = {Decolonizing {Data}}, language = {English}, publisher = {University of Toronto Press}, author = {Quinless, Jacqueline M.}, month = feb, year = {2022}, }
link bibtex abstract
@book{guyan_queer_2022, address = {London}, series = {Bloomsbury studies in digital cultures}, title = {Queer data: using gender, sex and sexuality data for action}, isbn = {978-1-350-23073-6}, shorttitle = {Queer data}, abstract = {"Data has never mattered more. Our lives are increasingly shaped by it and how it is defined, collected and used. But who counts in the collection, analysis and application of data? This important book is the first to look at queer data - defined as data relating to gender, sex, sexual orientation and trans identity/history. The author shows us how current data practices reflect an incomplete account of LGBTQ lives and helps us understand how data biases are used to delegitimise the everyday experiences of queer people. Guyan demonstrates why it is important to understand, collect and analyse queer data, the benefits and challenges involved in doing so, and how we might better use queer data in our work. Arming us with the tools for action, this book shows how greater knowledge about queer identities is instrumental in informing decisions about resource allocation, changes to legislation, access to services, representation and visibility."-- Provided by publisher.}, language = {eng}, publisher = {Bloomsbury Academic}, author = {Guyan, Kevin}, year = {2022}, keywords = {Data mining, Decision making, Research, Sexual minorities, Statistical methods, Statistics}, }
@book{chun_discriminating_2021, address = {Cambridge, MA, USA}, title = {Discriminating {Data}: {Correlation}, {Neighborhoods}, and the {New} {Politics} of {Recognition}}, isbn = {978-0-262-04622-0}, shorttitle = {Discriminating {Data}}, url = {https://librarysearch.library.utoronto.ca/permalink/01UTORONTO_INST/blpd0s/alma991107112195106196}, abstract = {How big data and machine learning encode discrimination and create agitated clusters of comforting rage.}, language = {en}, publisher = {MIT Press}, author = {Chun, Wendy Hui Kyong}, month = nov, year = {2021}, note = {00001}, }
link bibtex
@book{christian_alignment_2021, address = {S.l.}, title = {{ALIGNMENT} {PROBLEM}: machine learning and human values.}, isbn = {978-0-393-86833-3}, shorttitle = {{ALIGNMENT} {PROBLEM}}, language = {English}, publisher = {W W NORTON}, author = {Christian, Brian}, year = {2021}, note = {OCLC: 1233266753}, }
@misc{fry_what_2021, title = {What {Data} {Can}’t {Do}}, url = {https://www.newyorker.com/magazine/2021/03/29/what-data-cant-do}, abstract = {When it comes to people—and policy—numbers are both powerful and perilous.}, language = {en-us}, urldate = {2021-04-13}, journal = {The New Yorker}, author = {Fry, Hannah}, month = mar, year = {2021}, }
link bibtex abstract
@book{harford_data_2021, address = {New York}, edition = {First American edition.}, title = {The {Data} {Detective}: {Ten} {Easy} {Rules} to {Make} {Sense} of {Statistics}}, isbn = {978-0-593-08459-5}, shorttitle = {The {Data} {Detective}}, abstract = {From “one of the great (greatest?) contemporary popular writers on economics” (Tyler Cowen) comes a smart, lively, and encouraging rethinking of how to use statistics.Today we think statistics are the enemy, numbers used to mislead and confuse us. That’s a mistake, Tim Harford says in The Data Detective. We shouldn’t be suspicious of statistics—we need to understand what they mean and how they can improve our lives: they are, at heart, human behavior seen through the prism of numbers and are often “the only way of grasping much of what is going on around us.” If we can toss aside our fears and learn to approach them clearly—understanding how our own preconceptions lead us astray—statistics can point to ways we can live better and work smarter.As “perhaps the best popular economics writer in the world” (New Statesman), Tim Harford is an expert at taking complicated ideas and untangling them for millions of readers. In The Data Detective, he uses new research in science and psychology to set out ten strategies for using statistics to erase our biases and replace them with new ideas that use virtues like patience, curiosity, and good sense to better understand ourselves and the world. As a result, The Data Detective is a big-idea book about statistics and human behavior that is fresh, unexpected, and insightful.}, language = {English}, publisher = {Riverhead Books}, author = {Harford, Tim}, month = feb, year = {2021}, keywords = {General, MATHEMATICS, Methodology, Probability \& Statistics, Social sciences, Statistical methods, Statistics}, }
link bibtex abstract
@book{lane_democratizing_2021, address = {S.l.}, title = {Democratizing {Our} {Data}: {A} {Manifesto}}, isbn = {978-0-262-54274-6}, shorttitle = {Democratizing {Our} {Data}}, abstract = {Why America's data system is broken, and how to fix it.Why, with data increasingly important, available, valuable and cheap, are the data produced by the American government getting worse and costing more? State and local governments rely on population data from the US Census Bureau; prospective college students and their parents can check data from the National Center for Education Statistics; small businesses can draw on data about employment and wages from the Bureau of Labor Statistics. But often the information they get is out of date or irrelevant, based on surveys--a form of information gathering notorious for low response rates. In A Data Manifesto, Julia Lane argues that bad data is bad for democracy. Her book is a wake-up call to America to fix its broken public data system.}, language = {English}, publisher = {The MIT Press}, author = {Lane, Julia}, month = oct, year = {2021}, }
link bibtex
@book{crawford_atlas_2021, address = {New Haven}, title = {The {Atlas} of {AI}: {Power}, {Politics}, and the {Planetary} {Costs} of {Artificial} {Intelligence}}, isbn = {978-0-300-25239-2}, shorttitle = {The {Atlas} of {AI}}, language = {eng}, publisher = {Yale University Press}, author = {Crawford, Kate}, year = {2021}, }
link bibtex
@book{walter_indigenous_2021, address = {London ;}, series = {Routledge {Studies} in {Indigenous} {Peoples} and {Policy}}, title = {Indigenous data sovereignty and policy}, isbn = {978-0-429-27395-7}, language = {eng}, publisher = {Routledge}, author = {Walter, Maggie}, year = {2021}, keywords = {Computer network resources, Electronic books, Indigenous peoples}, }
@article{bode_why_2020, title = {Why {You} {Can}’t {Model} {Away} {Bias}}, volume = {81}, issn = {0026-7929}, url = {https://doi.org/10.1215/00267929-7933102}, doi = {10.1215/00267929-7933102}, abstract = {Quantitative literary studies is often understood as homogeneous in its methods; many literary scholars also perceive the field as incapable of contributing literary-critical or historical insight.1 This article contests both perceptions—but not by arguing that quantitative literary research is inevitably sound, justified, or beneficial. Rather, I elaborate and extend an ongoing disagreement with considerable bearing on the current state and future of the field. That disagreement is between what I will call its scholarly and its statistical approaches: between those who maintain that literary insight depends on critically analyzing and historicizing data sets prior to statistical analysis of patterns and trends, and those who claim that statistical analysis, in the absence of or with minimal investigation and contextualization of data sets, is sufficient for critical and historical understanding.}, number = {1}, urldate = {2023-08-08}, journal = {Modern Language Quarterly}, author = {Bode, Katherine}, month = mar, year = {2020}, pages = {95--124}, }
@techreport{nelson_toolkit_2020, title = {A {Toolkit} for {Centering} {Racial} {Equity} {Throughout} {Data} {Integration}}, url = {https://aisp.upenn.edu/resource-article/a-toolkit-for-centering-racial-equity-throughout-data-integration/}, abstract = {Societal “progress” is often marked by the construction of new infrastructure that fuels change and innovation. Just as railroads and interstate highways were the defining infrastructure projects of the 1800 and 1900s, the development of data infrastructure is a critical innovation of our century. Railroads and highways were drivers of development and prosperity for some investors and sites. Yet other individuals and communities were harmed, displaced, bypassed, ignored, and forgotten by those efforts. As railroads and highways both developed and decimated communities, so too can data infrastructure. At this moment in our history, we can co-create data infrastructure to promote racial equity and the public good, or we can invest in data infrastructure that disregards the historical, social, and political context—reinforcing racial inequity that continues to harm communities. Building data infrastructure without a racial equity lens and Societal “progress” is often marked by the construction of new infrastructure understanding of historical context will exacerbate existing inequalities along the lines of race, gender, class, and ability. Instead, we commit to contextualize our work in the historical and structural oppression that shapes it, and organize stakeholders across geography, sector, and experience to center racial equity throughout data integration.}, language = {en-US}, urldate = {2023-03-23}, institution = {Actionable Intelligence for Social Policy, University of Pennsylvania}, author = {Nelson, Amy Hawn and Jenkins, Della and Zanti, Sharon and Katz, Matthew and Berkowitz, Emily and Burnett, TC and Culhane, Dennis}, month = may, year = {2020}, }
@misc{pruden_meet_2020, title = {Meet the {Methods} {Series}: “{What} and who is {Two}-{Spirit}?” in {Health} {Research} - {CIHR}}, shorttitle = {Meet the {Methods} {Series}}, url = {https://cihr-irsc.gc.ca/e/52214.html}, language = {eng}, urldate = {2022-08-22}, author = {Pruden, Harlan and Salway, Travis}, collaborator = {{Canadian Institutes of Health Research, Government of Canada}}, month = oct, year = {2020}, note = {Last Modified: 2020-10-29}, }
doi link bibtex abstract 1 download
@book{dignazio_data_2020, address = {Cambridge}, series = {Strong ideas}, title = {Data {Feminism}}, isbn = {978-0-262-04400-4}, abstract = {A new way of thinking about data science and data ethics that is informed by the ideas of intersectional feminism. Today, data science is a form of power. It has been used to expose injustice, improve health outcomes, and topple governments. But it has also been used to discriminate, police, and surveil. This potential for good, on the one hand, and harm, on the other, makes it essential to ask: Data science by whom? Data science for whom? Data science with whose interests in mind? The narratives around big data and data science are overwhelmingly white, male, and techno-heroic. In Data Feminism , Catherine D'Ignazio and Lauren Klein present a new way of thinking about data science and data ethics—one that is informed by intersectional feminist thought. Illustrating data feminism in action, D'Ignazio and Klein show how challenges to the male/female binary can help challenge other hierarchical (and empirically wrong) classification systems. They explain how, for example, an understanding of emotion can expand our ideas about effective data visualization, and how the concept of invisible labor can expose the significant human efforts required by our automated systems. And they show why the data never, ever “speak for themselves.” Data Feminism offers strategies for data scientists seeking to learn how feminism can help them work toward justice, and for feminists who want to focus their efforts on the growing field of data science. But Data Feminism is about much more than gender. It is about power, about who has it and who doesn't, and about how those differentials of power can be challenged and changed.}, language = {English}, publisher = {MIT Press}, author = {D'Ignazio, Catherine and Klein, Lauren F.}, year = {2020}, doi = {10.7551/mitpress/11805.001.0001}, keywords = {Big data, Computing and Processing, Data Science, Electronic books, Feminism, Feminism and science, General Topics for Engineers, Information Science, Methodology, Power (Social sciences), Quantitative research, Social aspects}, }
link bibtex abstract
@book{stone_counting_2020, address = {New York, NY}, edition = {First edition.}, title = {Counting: {How} {We} {Use} {Numbers} to {Decide} {What} {Matters}}, isbn = {978-1-63149-592-2}, shorttitle = {Counting}, abstract = {“Deborah Stone’s mind-altering insight is that the numbers we use to capture the human experience are themselves a form of creative story-telling. They shouldn’t end the conversation, but spark a deeper and richer one. Counting deserves five stars for showing why five stars can never tell the whole story.” —Jacob S. Hacker, co-author of Let Them Eat Tweets: How the Right Rules in an Age of Extreme InequalityWhat do people do when they count? What do numbers really mean? We all know that people can lie with statistics, but in this groundbreaking work, eminent political scientist Deborah Stone uncovers a much deeper problem. With help from Dr. Seuss and Cookie Monster, she explains why numbers can’t be objective: in order to count, one must first decide what counts. Every number is the ending to a story built on cultural assumptions, social conventions, and personal judgments.And yet, in this age of big data and metric mania, numbers shape almost every facet of our lives: whether we get hired, fired, or promoted; whether we get into college or out of prison; how our opinions are gathered and portrayed to politicians; or how government designs health and safety regulations. In warm and playful prose, Counting explores what happens when we measure nebulous notions like merit, race, poverty, pain, or productivity.When so much rides on numbers, they can become instruments of social welfare, justice, and democracy—or not. The citizens of Flint, Michigan, for instance, used numbers to prove how their household water got contaminated and to force their government to take remedial action. In stark contrast, the Founding Fathers finessed an intractable conflict by counting each slave as three-fifths of a person in the national census. They set a terrible precedent for today’s politicians who claim to solve moral and political dilemmas with arithmetic.Suffused with moral reflection and ending with a powerful epilogue on COVID-19’s dizzying statistics, Counting will forever change our relationship with numbers.}, language = {English}, publisher = {Liveright}, author = {Stone, Deborah}, month = oct, year = {2020}, keywords = {Counting, Evaluation, Measurement, Social aspects, Statistics}, }
link bibtex abstract
@book{so_redlining_2020, title = {Redlining {Culture}: {A} {Data} {History} of {Racial} {Inequality} and {Postwar} {Fiction}}, isbn = {978-0-231-55231-8}, shorttitle = {Redlining {Culture}}, abstract = {The canon of postwar American fiction has changed over the past few decades to include far more writers of color. It would appear that we are making progress—recovering marginalized voices and including those who were for far too long ignored. However, is this celebratory narrative borne out in the data?Richard Jean So draws on big data, literary history, and close readings to offer an unprecedented analysis of racial inequality in American publishing that reveals the persistence of an extreme bias toward white authors. In fact, a defining feature of the publishing industry is its vast whiteness, which has denied nonwhite authors, especially black writers, the coveted resources of publishing, reviews, prizes, and sales, with profound effects on the language, form, and content of the postwar novel. Rather than seeing the postwar period as the era of multiculturalism, So argues that we should understand it as the invention of a new form of racial inequality—one that continues to shape the arts and literature today.Interweaving data analysis of large-scale patterns with a consideration of Toni Morrison’s career as an editor at Random House and readings of individual works by Octavia Butler, Henry Dumas, Amy Tan, and others, So develops a form of criticism that brings together qualitative and quantitative approaches to the study of literature. A vital and provocative work for American literary studies, critical race studies, and the digital humanities, Redlining Culture shows the importance of data and computational methods for understanding and challenging racial inequality.}, publisher = {Columbia University Press}, author = {So, Richard Jean}, month = dec, year = {2020}, note = {Pages: 240 Pages}, }
link bibtex
@article{lucchesi_spatial_2020, title = {Spatial {Data} and ({De})colonization: {Incorporating} {Indigenous} {Data} {Sovereignty} {Principles} into {Cartographic} {Research}}, volume = {55}, issn = {0317-7173}, shorttitle = {Spatial {Data} and ({De})colonization}, language = {eng}, number = {3}, journal = {Cartographica}, author = {Lucchesi, Annita Hetoevėhotohke’e}, year = {2020}, note = {Publisher: University of Toronto Press}, pages = {163--169}, }
link bibtex
@book{oguamanam_indigenous_2019, title = {Indigenous {Data} {Sovereignty}: {Retooling} {Indigenous} {Resurgence} for {Development}}, shorttitle = {Indigenous {Data} {Sovereignty}}, language = {eng}, publisher = {Centre for International Governance Innovation}, author = {Oguamanam, Chidi}, year = {2019}, }
@book{benjamin_race_2019, address = {Cambridge, England ;}, title = {Race after technology: abolitionist tools for the new {Jim} code}, isbn = {978-1-5095-2643-7}, shorttitle = {Race after technology}, url = {https://www.wiley.com/en-us/Race+After+Technology%3A+Abolitionist+Tools+for+the+New+Jim+Code-p-9781509526437}, abstract = {"From everyday apps to complex algorithms, Ruha Benjamin cuts through tech-industry hype to understand how emerging technologies can reinforce white supremacy and deepen social inequity. Far from a sinister story of racist programmers scheming on the dark web, Benjamin argues that automation has the potential to hide, speed, and even deepen discrimination, while appearing neutral and even benevolent when compared to racism of a previous era. Presenting the concept of the New Jim Code, she shows how a range of discriminatory designs encode inequity: by explicitly amplifying racial hierarchies, by ignoring but thereby replicating social divisions, or by aiming to fix racial bias but ultimately doing quite the opposite. Moreover, she makes a compelling case for race itself as a kind of tool a technology designed to stratify and sanctify social injustice that is part of the architecture of everyday life. This illuminating guide into the world of biased bots, altruistic algorithms, and their many entanglements provides conceptual tools to decode tech promises with sociologically informed skepticism. In doing so, it challenges us to question not only the technologies we are sold, but also the ones we manufacture ourselves"-- Provided by publisher.}, language = {eng}, publisher = {Polity Press}, author = {Benjamin, Ruha}, year = {2019}, keywords = {21st century, African Americans, Digital divide, Electronic books, Information technology, Race relations, Social aspects, Social conditions, United States, Whites}, }
@article{ahmed_asif_technologies_2019, title = {Technologies of {Power} – {From} {Area} {Studies} to {Data} {Sciences}}, volume = {5}, url = {https://spheres-journal.org/contribution/technologies-of-power-from-area-studies-to-data-sciences/}, language = {en-US}, urldate = {2020-08-02}, journal = {spheres: Journal for Digital Cultures}, author = {Ahmed Asif, Manan}, month = nov, year = {2019}, }
link bibtex abstract
@book{eubanks_automating_2019, address = {New York}, edition = {First Picador edition.}, title = {Automating inequality: how high-tech tools profile, police, and punish the poor}, isbn = {978-1-250-21578-9}, shorttitle = {Automating inequality}, abstract = {"The State of Indiana denies one million applications for healthcare, foodstamps and cash benefits in three years--because a new computer system interprets any mistake as "failure to cooperate." In Los Angeles, an algorithm calculates the comparative vulnerability of tens of thousands of homeless people in order to prioritize them for an inadequate pool of housing resources. In Pittsburgh, a child welfare agency uses a statistical model to try to predict which children might be future victims of abuse or neglect. Since the dawn of the digital age, decision-making in finance, employment, politics, health and human services has undergone revolutionary change. Today, automated systems--rather than humans--control which neighborhoods get policed, which families attain needed resources, and who is investigated for fraud. While we all live under this new regime of data, the most invasive and punitive systems are aimed at the poor. In Automating Inequality, Virginia Eubanks systematically investigates the impacts of data mining, policy algorithms, and predictive risk models on poor and working-class people in America. The book is full of heart-wrenching and eye-opening stories, from a woman in Indiana whose benefits are literally cut off as she lies dying to a family in Pennsylvania in daily fear of losing their daughter because they fit a certain statistical profile. The U.S. has always used its most cutting-edge science and technology to contain, investigate, discipline and punish the destitute. Like the county poorhouse and scientific charity before them, digital tracking and automated decision-making hide poverty from the middle-class public and give the nation the ethical distance it needs to make inhumane choices: which families get food and which starve, who has housing and who remains homeless, and which families are broken up by the state. In the process, they weaken democracy and betray our most cherished national values. This deeply researched and passionate book could not be more timely."--Publisher's description.}, language = {eng}, publisher = {Picador}, author = {Eubanks, Virginia}, year = {2019}, keywords = {Computers, Data processing, Internet, Law and legislation, Poor, Poverty, Public welfare, Services for, Social aspects, United States}, }
link bibtex abstract
@book{criado-perez_invisible_2019, address = {London}, title = {Invisible women: exposing data bias in a world designed for men}, isbn = {978-1-78474-172-3}, shorttitle = {Invisible women}, abstract = {"Welcome to the Gender Data Gap. Our world is largely built for and by men, in a system that can ignore half the population. This book will tell you how and why this matters In her new book, Invisible Women, award-winning campaigner and writer Caroline Criado Perez shows us how, in a world largely built for and by men, we are systematically ignoring half the population. She exposes the gender data gap - a gap in our knowledge that is at the root of perpetual, systemic discrimination against women, and that has created a pervasive but invisible bias with a profound effect on women's lives. Caroline brings together for the first time an impressive range of case studies, stories and new research from across the world that illustrate the hidden ways in which women are excluded from the very building blocks of the world we live in, and the impact this has on their health and wellbeing. From government policy and medical research, to technology, workplaces, urban planning and the media - Invisible Women exposes the biased data that excludes women. In making the case for change, this powerful and provocative book will make you see the world anew."-- Publisher's description.}, language = {eng}, publisher = {Chatto \& Windus}, author = {Criado-Perez, Caroline}, year = {2019}, keywords = {Male domination (Social structure), Methodology, Research, Sex discrimination against women, Sex role, Social sciences}, }
@book{lewis_digital_2018, address = {Detroit}, title = {Digital {Defense} {Playbook}: {Community} {Power} {Tools} for {Reclaiming} {Data}}, url = {https://www.odbproject.org/wp-content/uploads/2019/03/ODB_DDP_HighRes_Single.pdf}, publisher = {Our Data Bodies}, author = {Lewis, T. and Gangadharan, S. P. and Saba, M. and Petty, T.}, year = {2018}, }
@misc{onuoha_missing_2018, title = {On {Missing} {Data} {Sets}}, url = {https://github.com/MimiOnuoha/missing-datasets}, abstract = {An overview and exploration of the concept of missing datasets.}, urldate = {2022-04-14}, author = {Onuoha, Mimi and Sampath, Sparshith and Braithwaite, Myles and Faife, Corin}, month = jan, year = {2018}, note = {original-date: 2016-02-03T16:30:28Z}, }
link bibtex abstract
@book{noble_algorithms_2018, address = {New York}, title = {Algorithms of oppression: how search engines reinforce racism}, isbn = {978-1-4798-3724-3}, shorttitle = {Algorithms of oppression}, abstract = {"In Algorithms of Oppression, Safiya Umoja Noble challenges the idea that search engines like Google offer an equal playing field for all forms of ideas, identities, and activities. Data discrimination is a real social problem. Noble argues that the combination of private interests in promoting certain sites, along with the monopoly status of a relatively small number of Internet search engines, leads to a biased set of search algorithms that privilege whiteness and discriminate against people of color, especially women of color. Through an analysis of textual and media searches as well as extensive research on paid online advertising, Noble exposes a culture of racism and sexism in the way discoverability is created online. As search engines and their related companies grow in importance-operating as a source for email, a major vehicle for primary and secondary school learning, and beyond-understanding and reversing these disquieting trends and discriminatory practices is of utmost importance"--Back cover.}, language = {eng}, publisher = {University Press}, author = {Noble, Safiya Umoja}, year = {2018}, keywords = {Discrimination, Google, Search engines, Sociological aspects}, }
doi link bibtex abstract
@book{taylor_indigenous_2016, address = {Canberra}, series = {Research monograph / {Centre} for {Aboriginal} {Economic} {Policy} {Research}, {College} of {Arts} and {Social} {Sciences}, {The} {Australian} {National} {University}, {Canberra}}, title = {Indigenous {Data} {Sovereignty}: {Toward} an agenda}, volume = {38}, isbn = {978-1-76046-030-3}, shorttitle = {Indigenous {Data} {Sovereignty}}, abstract = {"As the global ‘data revolution’ accelerates, how can the data rights and interests of indigenous peoples be secured? Premised on the United Nations Declaration on the Rights of Indigenous Peoples, this book argues that indigenous peoples have inherent and inalienable rights relating to the collection, ownership and application of data about them, and about their lifeways and territories. As the first book to focus on indigenous data sovereignty, it asks: what does data sovereignty mean for indigenous peoples, and how is it being used in their pursuit of self-determination? The varied group of mostly indigenous contributors theorise and conceptualise this fast-emerging field and present case studies that illustrate the challenges and opportunities involved. These range from indigenous communities grappling with issues of identity, governance and development, to national governments and NGOs seeking to formulate a response to indigenous demands for data ownership. While the book is focused on the CANZUS states of Canada, Australia, Aotearoa/New Zealand and the United States, much of the content and discussion will be of interest and practical value to a broader global audience. ‘A debate-shaping book … it speaks to a fast-emerging field; it has a lot of important things to say; and the timing is right.’ — Stephen Cornell, Professor of Sociology and Faculty Chair of the Native Nations Institute, University of Arizona. ‘The effort … in this book to theorise and conceptualise data sovereignty and its links to the realisation of the rights of indigenous peoples is pioneering and laudable.’ — Victoria Tauli-Corpuz, UN Special Rapporteur on the Rights of Indigenous Peoples, Baguio City, Philippines "}, language = {eng}, publisher = {ANU Press}, author = {Taylor, John and Kukutai, Tahu}, collaborator = {Taylor, John and Kukutai, Tahu}, year = {2016}, doi = {10.22459/CAEPR38.11.2016}, }
link bibtex abstract
@book{oneil_weapons_2016, address = {Great Britain}, title = {Weapons of math destruction: how big data increases inequality and threatens democracy}, isbn = {978-0-241-29681-3}, shorttitle = {Weapons of math destruction}, abstract = {A former Wall Street quant sounds an alarm on the mathematical models that pervade modern life and threaten to rip apart our social fabric We live in the age of the algorithm. Increasingly, the decisions that affect our lives where we go to school, whether we get a loan, how much we pay for insurance are being made not by humans, but by mathematical models. In theory, this should lead to greater fairness: everyone is judged according to the same rules, and bias is eliminated. And yet, as Cathy O'Neil reveals in this urgent and necessary book, the opposite is true. The models being used today are opaque, unregulated, and incontestable, even when they're wrong. Most troubling, they reinforce discrimination. Tracing the arc of a person's life, O'Neil exposes the black box models that shape our future, both as individuals and as a society. These "weapons of math destruction" score teachers and students, sort CVs, grant or deny loans, evaluate workers, target voters, and monitor our health. O'Neil calls on modellers to take more responsibility for their algorithms and on policy makers to regulate their use. But in the end, it's up to us to become more savvy about the models that govern our lives. This important book empowers us to ask the tough questions, uncover the truth, and demand change.}, language = {eng}, publisher = {Allen Lane}, author = {O'Neil, Cathy}, year = {2016}, keywords = {Algorithms, Big data, Human behavior, Mathematical models, Social aspects}, }
@misc{dignazio_what_2015, title = {What would feminist data visualization look like?}, shorttitle = {What would feminist data visualization look like?}, url = {https://civic.mit.edu/feminist-data-visualization.html}, language = {en-US}, urldate = {2023-04-10}, author = {D'Ignazio, Catherine}, month = dec, year = {2015}, }
link bibtex
@book{bryan_weaponizing_2015, address = {New York ; London}, title = {Weaponizing maps: indigenous peoples and counterinsurgency in the {Americas}}, isbn = {978-1-4625-1992-7 978-1-4625-1991-0}, shorttitle = {Weaponizing maps}, publisher = {The Guilford Press}, author = {Bryan, Joe and Wood, Denis}, year = {2015}, note = {00000 OCLC: 890360605}, keywords = {Cartography, Central America, Human geography, Indian cartography, Indians of Central America, Indians of North America, Land tenure, North America, Social aspects}, }
@book{walter_indigenous_2013, address = {Walnut Creek, CA}, title = {Indigenous {Statistics}: {A} {Quantitative} {Research} {Methodology}}, isbn = {978-1-61132-292-7}, shorttitle = {Indigenous {Statistics}}, url = {https://www.routledge.com/Indigenous-Statistics-A-Quantitative-Research-Methodology/Walter-Andersen/p/book/9781611322934}, abstract = {"In the first book ever published on Indigenous quantitative methodologies, Maggie Walter and Chris Andersen open up a major new approach to research across the disciplines and applied fields. While qualitative methods have been rigorously critiqued and reformulated, the population statistics relied on by virtually all research on Indigenous peoples continue to be taken for granted as straightforward, transparent numbers. This book dismantles that persistent positivism with a forceful critique, then fills the void with a new paradigm for Indigenous quantitative methods, using concrete examples of research projects from First World Indigenous peoples in the United States, Australia, and Canada. Concise and accessible, it is an ideal supplementary text as well as a core component of the methodological toolkit for anyone conducting Indigenous research or using Indigenous population statistics"-- Provided by publisher.}, language = {eng}, publisher = {Left Coast Press}, author = {Walter, Maggie}, year = {2013}, keywords = {Ethnic Studies, Indigenous peoples, Methodology, Minority Studies, Native American Studies, Research, SOCIAL SCIENCE, Statistics}, }
GIS & Mapping

<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FFVMWIGGG%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FFVMWIGGG%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FFVMWIGGG%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
- renew the authorization for BibBase on Mendeley, and
- update the BibBase URL in your page the same way you did when you initially set up this page.
@article{mattke_mapping_2022, title = {Mapping {Prejudice}: {The} {Map} {Library} as a {Hub} for {Community} {Co}-{Creation} and {Social} {Change}}, volume = {18}, issn = {1542-0353}, shorttitle = {Mapping {Prejudice}}, url = {https://doi.org/10.1080/15420353.2022.2076006}, doi = {10.1080/15420353.2022.2076006}, abstract = {The John R. Borchert Map Library was the ideal incubator for an experiment that has changed how a wide range of people are thinking about structural racism and the history of race in American urban environments. Mapping Prejudice used a cartographic visualization of racial covenants as the intellectual nexus of a project that transcended disciplinary boundaries and invited community members into cutting-edge research work. The Map Library provided the physical space, resources, and geospatial expertise necessary for community-driven mapping work. It also served as an intersectional hub necessary for this transformative research initiative, illustrating the synergies between map librarianship and other disciplines. The work depended on the unique contributions of the map librarian: project management; experience networking with researchers, campus departments, and community groups; and knowledge of best practices surrounding data management, curation, and reuse. This article explains how Mapping Prejudice changed academic scholarship and public understandings by engaging volunteers in meaningful research. It concludes by providing a description of future directions for this project and calls on librarians to lead more work of this kind. The example of Mapping Prejudice suggests ways that map librarians can be leading new modes of inclusive, equitable and community-responsive research.}, number = {1-2}, urldate = {2023-03-23}, journal = {Journal of Map \& Geography Libraries}, author = {Mattke, Ryan and Delegard, Kirsten and Leebaw, Danya}, month = may, year = {2022}, note = {Publisher: Routledge \_eprint: https://doi.org/10.1080/15420353.2022.2076006}, keywords = {Critical GIS, Minneapolis, academic libraries, community co-creation, critical cartography, crowdsourcing, decolonization, digital humanities, map libraries, public history, racial covenants, structural racism, transdisciplinary research}, pages = {1--21}, }
@inproceedings{sefala_constructing_2021, title = {Constructing a {Visual} {Dataset} to {Study} the {Effects} of {Spatial} {Apartheid} in {South} {Africa}}, url = {https://openreview.net/forum?id=WV0waZz9dTF}, abstract = {Aerial images of neighborhoods in South Africa show the clear legacy of Apartheid, a former policy of political and economic discrimination against non-European groups, with completely segregated neighborhoods of townships next to gated wealthy areas. This paper introduces the first publicly available dataset to study the evolution of spatial apartheid, using 6,768 high resolution satellite images of 9 provinces in South Africa. Our dataset was created using polygons demarcating land use, geographically labelled coordinates of buildings in South Africa, and high resolution satellite imagery covering the country from 2006-2017. We describe our iterative process to create this dataset, which includes pixel wise labels for 4 classes of neighborhoods: wealthy areas, non wealthy areas, non residential neighborhoods and vacant land. While datasets 7 times smaller than ours have cost over 1M to annotate, our dataset was created with highly constrained resources. We finally show examples of applications examining the evolution of neighborhoods in South Africa using our dataset.}, language = {en}, urldate = {2023-04-24}, author = {Sefala, Raesetje and Gebru, Timnit and Mfupe, Luzango and Moorosi, Nyalleng and Klein, Richard}, month = nov, year = {2021}, }
@article{heeks_datafication_2019, title = {Datafication, development and marginalised urban communities: an applied data justice framework}, volume = {22}, issn = {1369-118X}, shorttitle = {Datafication, development and marginalised urban communities}, url = {https://doi.org/10.1080/1369118X.2019.1599039}, doi = {10.1080/1369118X.2019.1599039}, abstract = {The role of data within international development is rapidly expanding. However, the recency of this phenomenon means analysis has been lagging; particularly, analysis of broader impacts of real-world initiatives. Addressing this gap through a focus on data’s increasing presence in urban development, this paper makes two contributions. First – drawing from the emerging literature on ‘data justice’ – it presents an explicit, systematic and comprehensive new framework that can be used for analysis of datafication. Second, it applies the framework to four mapping initiatives in cities of the global South. These initiatives capture and visualise new data about marginalised communities: residents living in slums and other informal settlements about whom data has traditionally been lacking. Analysing across procedural, rights, instrumental and structural dimensions, it finds these initiatives deliver real incremental gains for their target communities. But it is external actors and wealthier communities that gain more; thus, increasing relative inequality.}, number = {7}, urldate = {2021-05-12}, journal = {Information, Communication \& Society}, author = {Heeks, Richard and Shekhar, Satyarupa}, month = jun, year = {2019}, note = {Publisher: Routledge \_eprint: https://doi.org/10.1080/1369118X.2019.1599039}, keywords = {Data justice, community mapping, datafication, developing countries, inequality}, pages = {992--1011}, }
link bibtex
@book{bryan_weaponizing_2015, address = {New York ; London}, title = {Weaponizing maps: indigenous peoples and counterinsurgency in the {Americas}}, isbn = {978-1-4625-1992-7 978-1-4625-1991-0}, shorttitle = {Weaponizing maps}, publisher = {The Guilford Press}, author = {Bryan, Joe and Wood, Denis}, year = {2015}, note = {00000 OCLC: 890360605}, keywords = {Cartography, Central America, Human geography, Indian cartography, Indians of Central America, Indians of North America, Land tenure, North America, Social aspects}, }
link bibtex
@book{muhammad_condemnation_2010, address = {Cambridge, Mass}, title = {The condemnation of blackness: race, crime, and the making of modern urban {America}}, isbn = {978-0-674-03597-3}, shorttitle = {The condemnation of blackness}, language = {eng}, publisher = {Harvard University Press}, author = {Muhammad, Khalil Gibran}, year = {2010}, keywords = {African Americans, Crime and race, Discrimination in criminal justice administration, Race relations, Social conditions, United States}, }
link bibtex
@book{wood_power_1992, address = {New York}, series = {Mappings}, title = {The power of maps}, isbn = {0-89862-492-4 978-0-89862-492-2 0-89862-493-2 978-0-89862-493-9}, publisher = {Guilford Press}, author = {Wood, Denis. and Fels, John.}, year = {1992}, }
@misc{noauthor_urban_nodate, title = {Urban {Renewal}, 1950-1966}, url = {https://dsl.richmond.edu/panorama/renewal/#view=0/0/1&viz=cartogram}, urldate = {2022-08-12}, }
@misc{noauthor_mapping_nodate, title = {Mapping {Inequality}}, url = {https://dsl.richmond.edu/panorama/redlining/}, abstract = {Redlining in New Deal America}, language = {en}, urldate = {2022-08-12}, }
@misc{noauthor_mapping_nodate-1, title = {Mapping {Prejudice}}, url = {https://mappingprejudice.umn.edu/}, urldate = {2022-08-12}, }
@misc{noauthor_mapping_nodate-2, title = {Mapping {Racism}}, url = {https://mappingracism.home.blog/}, abstract = {Mapping Racism is a collaborative project that will document these covenants, create resources to educate our community about their effects, and spark a dialogue about the ongoing impact this racis…}, language = {en}, urldate = {2022-08-12}, journal = {Mapping Racism}, }
@misc{stories_equity_nodate, title = {Equity {Mapping}: {How} {We} {Can} {Do} {More} with {GIS}}, shorttitle = {Equity {Mapping}}, url = {https://www.directionsmag.com/article/11217}, urldate = {2022-08-12}, author = {stories, A. summary of top and You, The Latest Geospatial Trends Emailed Direct to}, }
@misc{noauthor_how_nodate, title = {How {Maps} can {Help} the {Fight} for {Racial} {Equity}}, url = {https://datasmart.ash.harvard.edu/news/article/how-maps-can-help-fight-racial-equity}, abstract = {September 21, 2020 Equity}, language = {en}, urldate = {2022-08-12}, }
@misc{noauthor_seattle-_nodate, title = {Seattle- race and segregation maps by census tracts 1940-2020}, url = {https://public.tableau.com/views/Seattle-segregation1940-2020largescreen/Story1940-2020?:embed=y&:showVizHome=no&:host_url=https%3A%2F%2Fpublic.tableau.com%2F&:embed_code_version=3&:tabs=no&:toolbar=yes&:animate_transition=yes&:display_static_image=no&:display_spinner=no&:display_overlay=yes&:display_count=yes&:language=en-US&publish=yes&:loadOrderID=0}, language = {en}, urldate = {2022-08-12}, journal = {Tableau Software}, }
@misc{lavery_presenting_nodate, title = {Presenting breakdowns by race/ethnicity (or any groups) in your maps}, url = {https://www.esri.com/arcgis-blog/products/arcgis-online/mapping/techniques-for-presenting-breakdowns-by-race-ethnicity-or-any-groups-in-your-maps/}, abstract = {A few different ways to incorporate breakdowns by race/ethnicity, sex, veteran status and more in your maps.}, language = {en-US}, urldate = {2022-08-12}, journal = {ArcGIS Blog}, author = {Lavery, Diana}, }
AI & Algorithms

<script src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FZBP52737%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FZBP52737%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fapi.zotero.org%2Fgroups%2F2911209%2Fcollections%2FZBP52737%2Fitems%3Fkey%3DIvJhUTh5RKsgHH9zlFgwPfpF%26format%3Dbibtex%26limit%3D100&jsonp=1"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
- renew the authorization for BibBase on Mendeley, and
- update the BibBase URL in your page the same way you did when you initially set up this page.
@article{grynbaum_times_2023, chapter = {Business}, title = {The {Times} {Sues} {OpenAI} and {Microsoft} {Over} {A}.{I}. {Use} of {Copyrighted} {Work}}, issn = {0362-4331}, url = {https://www.nytimes.com/2023/12/27/business/media/new-york-times-open-ai-microsoft-lawsuit.html}, abstract = {Millions of articles from The New York Times were used to train chatbots that now compete with it, the lawsuit said.}, language = {en-US}, urldate = {2024-01-05}, journal = {The New York Times}, author = {Grynbaum, Michael M. and Mac, Ryan}, month = dec, year = {2023}, keywords = {Artificial Intelligence, ChatGPT, Compensation for Damages (Law), Copyrights and Copyright Violations, Microsoft Corp, New York Times, News and News Media, Newspapers, OpenAI Labs, Suits and Litigation (Civil)}, }
@inproceedings{pinney_much_2023, address = {New York, NY, USA}, series = {{CHIIR} '23}, title = {Much {Ado} {About} {Gender}: {Current} {Practices} and {Future} {Recommendations} for {Appropriate} {Gender}-{Aware} {Information} {Access}}, isbn = {9798400700354}, shorttitle = {Much {Ado} {About} {Gender}}, url = {https://dl.acm.org/doi/10.1145/3576840.3578316}, doi = {10.1145/3576840.3578316}, abstract = {Information access research (and development) sometimes makes use of gender, whether to report on the demographics of participants in a user study, as inputs to personalized results or recommendations, or to make systems gender-fair, amongst other purposes. This work makes a variety of assumptions about gender, however, that are not necessarily aligned with current understandings of what gender is, how it should be encoded, and how a gender variable should be ethically used. In this work, we present a systematic review of papers on information retrieval and recommender systems that mention gender in order to document how gender is currently being used in this field. We find that most papers mentioning gender do not use an explicit gender variable, but most of those that do either focus on contextualizing results of model performance, personalizing a system based on assumptions of user gender, or auditing a model’s behavior for fairness or other privacy-related issues. Moreover, most of the papers we review rely on a binary notion of gender, even if they acknowledge that gender cannot be split into two categories. We connect these findings with scholarship on gender theory and recent work on gender in human-computer interaction and natural language processing. We conclude by making recommendations for ethical and well-grounded use of gender in building and researching information access systems.}, urldate = {2023-04-24}, booktitle = {Proceedings of the 2023 {Conference} on {Human} {Information} {Interaction} and {Retrieval}}, publisher = {Association for Computing Machinery}, author = {Pinney, Christine and Raj, Amifa and Hanna, Alex and Ekstrand, Michael D.}, month = mar, year = {2023}, keywords = {auditing, gender, information access, systematic review}, pages = {269--279}, }
@article{oliver_constructing_2023, title = {Constructing {Relational} and {Verifiable} {Protest} {Event} {Data}: {Four} {Challenges} and {Some} {Solutions}}, volume = {28}, issn = {1086-671X}, shorttitle = {{CONSTRUCTING} {RELATIONAL} {AND} {VERIFIABLE} {PROTEST} {EVENT} {DATA}}, url = {https://doi.org/10.17813/1086-671X-28-1-1}, doi = {10.17813/1086-671X-28-1-1}, abstract = {We call for a relational approach to constructing protest event data from news sources to provide tools for detecting and correcting errors and for capturing the relations among events and between events and the texts describing them. We address two problems with most protest event datasets: (1) inconsistencies and errors in identifying events and (2) disconnect between data structures and what is known about how protests and media accounts of protests are produced. Relational data structures can capture the theoretically important structuring of events into campaigns and episodes and media attention cascades and cycles. Relational data structures support richer theorizing about the interplay of protests and their representations in news media discourses. We present preliminary illustrative data about Black protests from these new procedures to demonstrate the value of this approach.}, number = {1}, urldate = {2023-04-24}, journal = {Mobilization: An International Quarterly}, author = {Oliver, Pamela and Hanna, Alex and Lim, Chaeyoon}, month = mar, year = {2023}, pages = {1--22}, }
@misc{liu_evaluating_2023, title = {Evaluating {Verifiability} in {Generative} {Search} {Engines}}, url = {http://arxiv.org/abs/2304.09848}, doi = {10.48550/arXiv.2304.09848}, abstract = {Generative search engines directly generate responses to user queries, along with in-line citations. A prerequisite trait of a trustworthy generative search engine is verifiability, i.e., systems should cite comprehensively (high citation recall; all statements are fully supported by citations) and accurately (high citation precision; every cite supports its associated statement). We conduct human evaluation to audit four popular generative search engines -- Bing Chat, NeevaAI, perplexity.ai, and YouChat -- across a diverse set of queries from a variety of sources (e.g., historical Google user queries, dynamically-collected open-ended questions on Reddit, etc.). We find that responses from existing generative search engines are fluent and appear informative, but frequently contain unsupported statements and inaccurate citations: on average, a mere 51.5\% of generated sentences are fully supported by citations and only 74.5\% of citations support their associated sentence. We believe that these results are concerningly low for systems that may serve as a primary tool for information-seeking users, especially given their facade of trustworthiness. We hope that our results further motivate the development of trustworthy generative search engines and help researchers and users better understand the shortcomings of existing commercial systems.}, urldate = {2023-04-24}, publisher = {arXiv}, author = {Liu, Nelson F. and Zhang, Tianyi and Liang, Percy}, month = apr, year = {2023}, note = {arXiv:2304.09848 [cs]}, keywords = {Computer Science - Computation and Language, Computer Science - Information Retrieval}, }
@article{noauthor_we_2023, title = {We can build better, fairer algorithms in a world of angry bias – so why aren’t we?}, url = {https://www.theglobeandmail.com/canada/article-we-can-build-better-fairer-algorithms-in-a-world-of-angry-bias-so-why/}, abstract = {Given the vast influence of social media and algorithms, what does fairness look like?}, language = {en-CA}, urldate = {2023-04-16}, journal = {The Globe and Mail}, month = apr, year = {2023}, keywords = {Social Issues}, }
@misc{bjork_chatgpt_2023, title = {{ChatGPT} threatens language diversity. {More} needs to be done to protect our differences in the age of {AI}}, url = {http://theconversation.com/chatgpt-threatens-language-diversity-more-needs-to-be-done-to-protect-our-differences-in-the-age-of-ai-198878}, abstract = {When you ask ChatGPT to generate content, the default output is in the voice, style and language of white English-speaking men, who have long dominated many writing-intensive sectors.}, language = {en}, urldate = {2023-03-14}, journal = {The Conversation}, author = {Bjork, Collin}, month = feb, year = {2023}, }
@article{williams_exploited_2022, title = {The {Exploited} {Labor} {Behind} {Artificial} {Intelligence}}, url = {https://www.noemamag.com/the-exploited-labor-behind-artificial-intelligence}, abstract = {Supporting transnational worker organizing should be at the center of the fight for “ethical AI.”}, language = {en-US}, urldate = {2023-04-24}, journal = {Noema}, author = {Williams, Adrienne and Miceli, Milagros and Gebru, Timnit}, month = oct, year = {2022}, }
@misc{miceli_data-production_2022, title = {The {Data}-{Production} {Dispositif}}, url = {http://arxiv.org/abs/2205.11963}, abstract = {Machine learning (ML) depends on data to train and verify models. Very often, organizations outsource processes related to data work (i.e., generating and annotating data and evaluating outputs) through business process outsourcing (BPO) companies and crowdsourcing platforms. This paper investigates outsourced ML data work in Latin America by studying three platforms in Venezuela and a BPO in Argentina. We lean on the Foucauldian notion of dispositif to define the data-production dispositif as an ensemble of discourses, actions, and objects strategically disposed to (re)produce power/knowledge relations in data and labor. Our dispositif analysis comprises the examination of 210 data work instruction documents, 55 interviews with data workers, managers, and requesters, and participant observation. Our findings show that discourses encoded in instructions reproduce and normalize the worldviews of requesters. Precarious working conditions and economic dependency alienate workers, making them obedient to instructions. Furthermore, discourses and social contexts materialize in artifacts, such as interfaces and performance metrics, limiting workers' agency and normalizing specific ways of interpreting data. We conclude by stressing the importance of counteracting the data-production dispositif by fighting alienation and precarization, and empowering data workers to become assets in the quest for high-quality data.}, urldate = {2023-04-24}, publisher = {arXiv}, author = {Miceli, Milagros and Posada, Julian}, month = may, year = {2022}, note = {arXiv:2205.11963 [cs]}, keywords = {Computer Science - Computers and Society, Computer Science - Human-Computer Interaction, Computer Science - Machine Learning}, }
@article{oliver_black_2022, title = {Black {Protests} in the {United} {States}, 1994 to 2010}, volume = {9}, issn = {2330-6696}, url = {https://sociologicalscience.com/articles-v9-12-275/}, doi = {10.15195/v9.a12}, abstract = {Using novel data, we provide the first panoramic view of U.S. Black movement protest events as reported in U.S. newswires between 1994 and 2010 and put our quantitative data into dialogue with qualitative accounts. Struggles during these years presaged the Black Lives protest waves of 2014 to 2016 and 2020. Protests increased after the 1995 Million Man March into 2001 but dropped abruptly after the 9/11 attacks. Collective action increased again at the end of the 2000s. Protests in response to police violence and other criminal-legal issues were major arenas of struggle and news coverage. Also common were issues of national identity including celebrations of Black history and Black solidarity, protests about Confederate symbols, and protests about White hate groups and hate crimes. Although Black people protested about a wide variety of issues, newswires focused disproportionately on incidents of police violence and perceived threats of Black violence. There is substantial continuity in issues, organizations, and activism between this earlier period and the Black Lives Movement of 2014 to 2020.}, language = {en-US}, urldate = {2023-04-24}, journal = {Sociological Science}, author = {Oliver, Pamela and Lim, Chaeyoon and Matthews, Morgan C. and Hanna, Alex}, month = may, year = {2022}, pages = {275--312}, }
doi link bibtex abstract
@book{crawford_atlas_2021, address = {New Haven}, edition = {1}, title = {Atlas of {AI}: power, politics, and the planetary costs of artificial intelligence}, isbn = {978-0-300-20957-0}, shorttitle = {Atlas of {AI}}, abstract = {Crawford reveals how AI is a technology of extraction: from the minerals drawn from the earth, to the labor pulled from low-wage information workers, to the data taken from every action and expression. This book shows how this planetary network is fueling a shift toward undemocratic governance and increased inequity. Rather than taking a narrow focus on code and algorithms, Crawford offers us a material and political perspective on what it takes to make AI and how it centralizes power. This is an account of what is at stake as technology companies use artificial intelligence to reshape the world. --From publisher description}, language = {eng}, publisher = {Yale University Press}, author = {Crawford, Kate}, year = {2021}, doi = {10.2307/j.ctv1ghv45t}, keywords = {Artificial intelligence, Artificial intelligence--Political aspects, Artificial intelligence--Sociological aspects, Atlas (topology), Business, Computer Science, Engineering, Intelligence artificielle, Intelligence artificielle--Aspect politique, Intelligence artificielle--Aspect sociologique, Kunstmatige intelligentie, Politieke aspecten, Power politics, SCIENCE--General, Sociologische aspecten}, }
@misc{noauthor_project_2021, title = {Project {Dillard}: {Revisionist} {History}}, url = {https://www.pushkin.fm/podcasts/revisionist-history/project-dillard}, abstract = {A historically Black university in New Orleans is beloved by everyone – except the US News best colleges rankings. We hack our way back into the algorithm and show how Dillard University can rise to the top. Part two of a two-part series.}, language = {en}, urldate = {2023-04-16}, journal = {Pushkin Industries}, month = jul, year = {2021}, }
@inproceedings{sefala_constructing_2021, title = {Constructing a {Visual} {Dataset} to {Study} the {Effects} of {Spatial} {Apartheid} in {South} {Africa}}, url = {https://openreview.net/forum?id=WV0waZz9dTF}, abstract = {Aerial images of neighborhoods in South Africa show the clear legacy of Apartheid, a former policy of political and economic discrimination against non-European groups, with completely segregated neighborhoods of townships next to gated wealthy areas. This paper introduces the first publicly available dataset to study the evolution of spatial apartheid, using 6,768 high resolution satellite images of 9 provinces in South Africa. Our dataset was created using polygons demarcating land use, geographically labelled coordinates of buildings in South Africa, and high resolution satellite imagery covering the country from 2006-2017. We describe our iterative process to create this dataset, which includes pixel wise labels for 4 classes of neighborhoods: wealthy areas, non wealthy areas, non residential neighborhoods and vacant land. While datasets 7 times smaller than ours have cost over 1M to annotate, our dataset was created with highly constrained resources. We finally show examples of applications examining the evolution of neighborhoods in South Africa using our dataset.}, language = {en}, urldate = {2023-04-24}, author = {Sefala, Raesetje and Gebru, Timnit and Mfupe, Luzango and Moorosi, Nyalleng and Klein, Richard}, month = nov, year = {2021}, }
@article{bender_dangers_2021, series = {{FAccT} '21}, title = {On the {Dangers} of {Stochastic} {Parrots}: {Can} {Language} {Models} {Be} {Too} {Big}? 🦜}, shorttitle = {On the {Dangers} of {Stochastic} {Parrots}}, url = {https://dl.acm.org/doi/10.1145/3442188.3445922}, doi = {10.1145/3442188.3445922}, abstract = {The past 3 years of work in NLP have been characterized by the development and deployment of ever larger language models, especially for English. BERT, its variants, GPT-2/3, and others, most recently Switch-C, have pushed the boundaries of the possible both through architectural innovations and through sheer size. Using these pretrained models and the methodology of fine-tuning them for specific tasks, researchers have extended the state of the art on a wide array of tasks as measured by leaderboards on specific benchmarks for English. In this paper, we take a step back and ask: How big is too big? What are the possible risks associated with this technology and what paths are available for mitigating those risks? We provide recommendations including weighing the environmental and financial costs first, investing resources into curating and carefully documenting datasets rather than ingesting everything on the web, carrying out pre-development exercises evaluating how the planned approach fits into research and development goals and supports stakeholder values, and encouraging research directions beyond ever larger language models.}, urldate = {2023-04-10}, journal = {Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency}, author = {Bender, Emily M. and Gebru, Timnit and McMillan-Major, Angelina and Shmitchell, Shmargaret}, month = mar, year = {2021}, pages = {610--623}, }
link bibtex
@book{crawford_atlas_2021-1, address = {New Haven}, title = {The {Atlas} of {AI}: {Power}, {Politics}, and the {Planetary} {Costs} of {Artificial} {Intelligence}}, isbn = {978-0-300-25239-2}, shorttitle = {The {Atlas} of {AI}}, language = {eng}, publisher = {Yale University Press}, author = {Crawford, Kate}, year = {2021}, }
@incollection{le_bui_were_2020, title = {We’re {Missing} a {Moral} {Framework} of {Justice} in {Artificial} {Intelligence}: {On} the {Limits}, {Failings}, and {Ethics} of {Fairness}}, isbn = {978-0-19-006739-7}, shorttitle = {We’re {Missing} a {Moral} {Framework} of {Justice} in {Artificial} {Intelligence}}, url = {https://doi.org/10.1093/oxfordhb/9780190067397.013.9}, abstract = {This chapter assesses the concepts of fairness and bias in artificial intelligence research and interventions. In considering the explosive growth, emergence of, and investment in high-profile AI fairness and ethics interventions within both the academy and industry—alongside the mounting and proliferating calls for the interrogation, regulation, and, in some cases, dismantling and prohibition of AI—it contests and questions the extent to which such remedies can address the original concerns and problems they are designed to address. Indeed, many community organizations are organizing responses and challenging AI used in predictive technologies—facial-recognition software and biometrics technologies—with increasing success. Ultimately, the canon of AI ethics must interrogate and deeply engage with intersectional power structures that work to further consolidate capital in the hands of the elites and that will undergird digital informational systems of inequality: there is no neutral or objective state through which the flows and mechanics of data can be articulated as unbiased or fair.}, urldate = {2023-04-10}, booktitle = {The {Oxford} {Handbook} of {Ethics} of {AI}}, publisher = {Oxford University Press}, author = {Le Bui, Matthew and Noble, Safiya Umoja}, editor = {Dubber, Markus D. and Pasquale, Frank and Das, Sunit}, month = jul, year = {2020}, doi = {10.1093/oxfordhb/9780190067397.013.9}, pages = {0}, }
@book{dubber_oxford_2020, title = {The {Oxford} {Handbook} of {Ethics} of {AI}}, isbn = {978-0-19-006739-7}, url = {https://doi.org/10.1093/oxfordhb/9780190067397.001.0001}, abstract = {This book explores the intertwining domains of artificial intelligence (AI) and ethics—two highly divergent fields which at first seem to have nothing to do with one another. AI is a collection of computational methods for studying human knowledge, learning, and behavior, including by building agents able to know, learn, and behave. Ethics is a body of human knowledge—far from completely understood—that helps agents (humans today, but perhaps eventually robots and other AIs) decide how they and others should behave. Despite these differences, however, the rapid development in AI technology today has led to a growing number of ethical issues in a multitude of fields, ranging from disciplines as far-reaching as international human rights law to issues as intimate as personal identity and sexuality. In fact, the number and variety of topics in this volume illustrate the width, diversity of content, and at times exasperating vagueness of the boundaries of “AI Ethics” as a domain of inquiry. Within this discourse, the book points to the capacity of sociotechnical systems that utilize data-driven algorithms to classify, to make decisions, and to control complex systems. Given the wide-reaching and often intimate impact these AI systems have on daily human lives, this volume attempts to address the increasingly complicated relations between humanity and artificial intelligence. It considers not only how humanity must conduct themselves toward AI but also how AI must behave toward humanity.}, urldate = {2023-04-10}, publisher = {Oxford University Press}, editor = {Dubber, Markus D. and Pasquale, Frank and Das, Sunit}, month = jul, year = {2020}, doi = {10.1093/oxfordhb/9780190067397.001.0001}, }
@incollection{gebru_race_2020, title = {Race and {Gender}}, isbn = {978-0-19-006739-7}, url = {https://doi.org/10.1093/oxfordhb/9780190067397.013.16}, abstract = {This chapter discusses the role of race and gender in artificial intelligence (AI). The rapid permeation of AI into society has not been accompanied by a thorough investigation of the sociopolitical issues that cause certain groups of people to be harmed rather than advantaged by it. For instance, recent studies have shown that commercial automated facial analysis systems have much higher error rates for dark-skinned women, while having minimal errors on light-skinned men. Moreover, a 2016 ProPublica investigation uncovered that machine learning–based tools that assess crime recidivism rates in the United States are biased against African Americans. Other studies show that natural language–processing tools trained on news articles exhibit societal biases. While many technical solutions have been proposed to alleviate bias in machine learning systems, a holistic and multifaceted approach must be taken. This includes standardization bodies determining what types of systems can be used in which scenarios, making sure that automated decision tools are created by people from diverse backgrounds, and understanding the historical and political factors that disadvantage certain groups who are subjected to these tools.}, urldate = {2023-04-10}, booktitle = {The {Oxford} {Handbook} of {Ethics} of {AI}}, publisher = {Oxford University Press}, author = {Gebru, Timnit}, editor = {Dubber, Markus D. and Pasquale, Frank and Das, Sunit}, month = jul, year = {2020}, doi = {10.1093/oxfordhb/9780190067397.013.16}, pages = {0}, }
link bibtex abstract
@book{mitchell_artificial_2019, address = {New York}, edition = {First edition.}, title = {Artificial intelligence: a guide for thinking humans}, isbn = {978-0-374-25783-5}, shorttitle = {Artificial intelligence}, abstract = {No recent scientific enterprise has proved as alluring, terrifying, and filled with extravagant promise and frustrating setbacks as artificial intelligence. An award-winning author and leading computer scientist reveals its turbulent history and the recent surge of successes, grand hopes, and emerging fears that surround AI., No recent scientific enterprise has proved as alluring, terrifying, and filled with extravagant promise and frustrating setbacks as artificial intelligence. The award-winning author Melanie Mitchell, a leading computer scientist, now reveals AI’s turbulent history and the recent spate of apparent successes, grand hopes, and emerging fears surrounding it. In Artificial Intelligence, Mitchell turns to the most urgent questions concerning AI today: How intelligent—really—are the best AI programs? How do they work? What can they actually do, and when do they fail? How humanlike do we expect them to become, and how soon do we need to worry about them surpassing us? Along the way, she introduces the dominant models of modern AI and machine learning, describing cutting-edge AI programs, their human inventors, and the historical lines of thought underpinning recent achievements. She meets with fellow experts such as Douglas Hofstadter, the cognitive scientist and Pulitzer Prize–winning author of the modern classic Gödel, Escher, Bach, who explains why he is “terrified” about the future of AI. She explores the profound disconnect between the hype and the actual achievements in AI, providing a clear sense of what the field has accomplished and how much further it has to go. Interweaving stories about the science of AI and the people behind it, Artificial Intelligence brims with clear-sighted, captivating, and accessible accounts of the most interesting and provocative modern work in the field, flavored with Mitchell’s humor and personal observations. This frank, lively book is an indispensable guide to understanding today’s AI, its quest for “human-level” intelligence, and its impact on the future for us all.-- Provided by publisher.}, language = {eng}, publisher = {Farrar, Straus and Giroux}, author = {Mitchell, Melanie}, year = {2019}, keywords = {Artificial intelligence, COMPUTERS, Machine Theory, Machine learning}, }
@article{hoffmann_where_2019, title = {Where fairness fails: data, algorithms, and the limits of antidiscrimination discourse}, volume = {22}, issn = {1369-118X}, shorttitle = {Where fairness fails}, url = {https://doi.org/10.1080/1369118X.2019.1573912}, doi = {10.1080/1369118X.2019.1573912}, abstract = {Problems of bias and fairness are central to data justice, as they speak directly to the threat that ‘big data’ and algorithmic decision-making may worsen already existing injustices. In the United States, grappling with these problems has found clearest expression through liberal discourses of rights, due process, and antidiscrimination. Work in this area, however, has tended to overlook certain established limits of antidiscrimination discourses for bringing about the change demanded by social justice. In this paper, I engage three of these limits: 1) an overemphasis on discrete ‘bad actors’, 2) single-axis thinking that centers disadvantage, and 3) an inordinate focus on a limited set of goods. I show that, in mirroring some of antidiscrimination discourse’s most problematic tendencies, efforts to achieve fairness and combat algorithmic discrimination fail to address the very hierarchical logic that produces advantaged and disadvantaged subjects in the first place. Finally, I conclude by sketching three paths for future work to better account for the structural conditions against which we come to understand problems of data and unjust discrimination in the first place.}, number = {7}, urldate = {2023-04-10}, journal = {Information, Communication \& Society}, author = {Hoffmann, Anna Lauren}, month = jun, year = {2019}, note = {Publisher: Routledge \_eprint: https://doi.org/10.1080/1369118X.2019.1573912}, keywords = {Big data, algorithms, antidiscrimination, intersectionality, social justice}, pages = {900--915}, }
link bibtex abstract
@book{hoang_fabuleux_2019, address = {Les Ulis}, series = {Hors {Collection}}, title = {Le {Fabuleux} {Chantier}: {Rendre} l'intelligence {Artificielle} {Robustement} {Bénéfique}}, isbn = {978-2-7598-2430-4}, shorttitle = {Le {Fabuleux} {Chantier}}, abstract = {L'histoire de nos civilisations est marquée par l'externalisation progressive de l'information, de l'invention du langage à celle de l'imprimerie. La révolution algorithmique est sans doute la plus spectaculaire de ces externalisations, car elle est incarnée par le traitement automatisé de l'information. Désormais, Homo Sapiens s'appuie davantage sur son téléphone que sur son cerveau pour mémoriser la connaissance, s'orienter en ville ou planifier ses vacances. Cette révolution présente des opportunités fantastiques dans les sciences, la santé et la protection de l'environnement. Cependant, le déploiement du traitement automatisé de l'information est allé si vite que ses effets secondaires n'ont pas eu le temps d'être anticipés adéquatement. Par exemple, les algorithmes de recommandations influencent "idiennement les croyances de milliards d'individus, avec un degré de personnalisation stupéfiant. Pour le meilleur, mais aussi pour le pire. Discours de haine, biais sexistes, addiction, conspirationnisme, manipulations politiques, propagande antivaccin, ou encore catalyse de génocides. Boostée par des techniques de machine learning mal maîtrisées, l'information automatiquement (mal) traitée tue. Ce livre analyse, dans un langage accessible, les effets secondaires présents et les risques futurs du déploiement massif des algorithmes. Il fournit au lecteur une compréhension conceptuelle des algorithmes clés en jeu, de leurs limites et de leurs vulnérabilités. Ce livre esquisse aussi le chantier initié par une communauté croissante de chercheurs dans le but de rendre le traitement automatisé de l'information robustement bénéfique pour l'humanité. Il souligne les enjeux de ce fabuleux chantier, les principaux défis qu'il pose et comment chacun, expert ou non, peut apporter sa pierre à l'édifice}, language = {fre}, urldate = {2023-04-10}, publisher = {EDP Sciences}, author = {Hoang, Lê Nguyên. and El Mhamdi, El Mahdi}, year = {2019}, note = {OCLC: 1129221467}, keywords = {Artificial Intelligence, Artificial intelligence, Information Systems, Information science, Information storage and retrieval systems, Intelligence artificielle, Sciences de l'information, Systèmes d'information, artificial intelligence, information science}, }
@misc{christie_debiasing_2018, title = {Debiasing {AI} systems: {A} conversation with {Rob} {Speer}, {Luminoso}'s head of science}, shorttitle = {Debiasing {AI} systems}, url = {https://web.archive.org/web/20181019010043/https://www.luminoso.com/resources/debiasing-ai-systems}, urldate = {2023-04-10}, journal = {Luminoso}, collaborator = {Christie, Denise}, month = oct, year = {2018}, }
@misc{onuoha_notes_2018, title = {Notes on {Algorithmic} {Violence}}, shorttitle = {{GitHub} - {MimiOnuoha}/{On}-{Algorithmic}-{Violence}}, url = {https://github.com/MimiOnuoha/On-Algorithmic-Violence}, language = {en}, urldate = {2023-04-10}, journal = {GitHub}, author = {Onuoha, Mimi}, month = feb, year = {2018}, }
@misc{zettabytes_epfl_ai_2018, title = {{AI} {Safety} against {Adversarial} {Attacks} (ft. {El} {Mahdi} {El} {Mhamdi})}, url = {https://www.youtube.com/watch?v=1Ds1DAnoaag}, abstract = {In this video, El Mahdi El Mhamdi, PhD candidate of the IC Schoold at EPFL, argues that AI safety is urgent for today's AIs. Especially recommender systems. https://people.epfl.ch/elmahdi.elmham... Examining Sentiments and Popularity of Pro- and Anti-Vaccination Videos on YouTube {\textbar} M. Yun-Ju Song and A. Gruzd (2017) https://dl.acm.org/citation.cfm?id=30... Understanding Anti-Vaccination Attitudes in Social Media {\textbar} AAAI (2016) https://www.aaai.org/ocs/index.php/IC... Understanding vaccine refusal: why we need social media now (2015) https://www.ncbi.nlm.nih.gov/pubmed/2... Mapping the anti-vaccination movement on Facebook: https://www.tandfonline.com/doi/abs/1... "From mid-2007 through the end of 2014, there were 6,274 U.S. deaths that could have been prevented by vaccines" https://www.huffpost.com/entry/phony-... "1.6M measle infections in the last 10 years" https://www.vaccineswork.org/vaccine-... Rapport de l'INSERM https://www.inserm.fr/actualites-et-e... "En octobre 2016, une précédente étude du Leem montrait que seulement 69 \% des personnes interrogées faisaient confiance aux vaccins, en recul de deux points sur un an et un plus bas depuis 2012. " https://www.lepoint.fr/sante/la-confi...}, urldate = {2023-04-10}, author = {{ZettaBytes, EPFL}}, month = dec, year = {2018}, }
@inproceedings{buolamwini_gender_2018, title = {Gender {Shades}: {Intersectional} {Accuracy} {Disparities} in {Commercial} {Gender} {Classification}}, shorttitle = {Gender {Shades}}, url = {https://proceedings.mlr.press/v81/buolamwini18a.html}, abstract = {Recent studies demonstrate that machine learning algorithms can discriminate based on classes like race and gender. In this work, we present an approach to evaluate bias present in automated facial analysis algorithms and datasets with respect to phenotypic subgroups. Using the dermatologist approved Fitzpatrick Skin Type classification system, we characterize the gender and skin type distribution of two facial analysis benchmarks, IJB-A and Adience. We find that these datasets are overwhelmingly composed of lighter-skinned subjects (79.6\% for IJB-A and 86.2\% for Adience) and introduce a new facial analysis dataset which is balanced by gender and skin type. We evaluate 3 commercial gender classification systems using our dataset and show that darker-skinned females are the most misclassified group (with error rates of up to 34.7\%). The maximum error rate for lighter-skinned males is 0.8\%. The substantial disparities in the accuracy of classifying darker females, lighter females, darker males, and lighter males in gender classification systems require urgent attention if commercial companies are to build genuinely fair, transparent and accountable facial analysis algorithms.}, language = {en}, urldate = {2023-04-10}, booktitle = {Proceedings of the 1st {Conference} on {Fairness}, {Accountability} and {Transparency}}, publisher = {PMLR}, author = {Buolamwini, Joy and Gebru, Timnit}, month = jan, year = {2018}, note = {ISSN: 2640-3498}, pages = {77--91}, }
link bibtex abstract
@book{noble_algorithms_2018, address = {New York}, title = {Algorithms of oppression: how search engines reinforce racism}, isbn = {978-1-4798-3724-3}, shorttitle = {Algorithms of oppression}, abstract = {"In Algorithms of Oppression, Safiya Umoja Noble challenges the idea that search engines like Google offer an equal playing field for all forms of ideas, identities, and activities. Data discrimination is a real social problem. Noble argues that the combination of private interests in promoting certain sites, along with the monopoly status of a relatively small number of Internet search engines, leads to a biased set of search algorithms that privilege whiteness and discriminate against people of color, especially women of color. Through an analysis of textual and media searches as well as extensive research on paid online advertising, Noble exposes a culture of racism and sexism in the way discoverability is created online. As search engines and their related companies grow in importance-operating as a source for email, a major vehicle for primary and secondary school learning, and beyond-understanding and reversing these disquieting trends and discriminatory practices is of utmost importance"--Back cover.}, language = {eng}, publisher = {University Press}, author = {Noble, Safiya Umoja}, year = {2018}, keywords = {Discrimination, Google, Search engines, Sociological aspects}, }