-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathreferences.bib
More file actions
140 lines (132 loc) · 15 KB
/
references.bib
File metadata and controls
140 lines (132 loc) · 15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
@inproceedings{geStereoMathAccessibleMusical2024,
title = {{{StereoMath}}: {{An Accessible}} and {{Musical Equation Editor}}},
shorttitle = {{{StereoMath}}},
booktitle = {Proceedings of the 26th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Ge, Kenneth and Seo, JooYoung},
year = {2024},
month = oct,
series = {{{ASSETS}} '24},
pages = {1--5},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
doi = {10.1145/3663548.3688487},
urldate = {2025-02-28},
abstract = {For blind and low-vision (BLV) individuals, digital math communication is uniquely difficult due to the lack of accessible tools. Currently, the state of the art is either code-based, like LaTeX, or WYSIWYG, like visual editors. However, both paradigms view math communication as primarily a visual typesetting problem, and may be accessible but difficult to use. In this paper, we present an equation editor that is built from the ground up with BLV accessibility in mind. Specifically, we notice that two of the biggest barriers with current technology are the high cognitive load and the lack of spatial relationships. Thus, we build an editor that uses spatial audio cues, muscle memory, tones, and more intuitive navigation to properly contextualize math equations. We discuss how this new paradigm can enable new levels of math communication, engagement, and literacy. Finally, we discuss natural next steps.},
isbn = {979-8-4007-0677-6}
}
@inproceedings{kamathPlayingBarriersCrafting2024,
title = {Playing {{Without Barriers}}: {{Crafting Playful}} and {{Accessible VR Table-Tennis}} with and for {{Blind}} and {{Low-Vision Individuals}}},
shorttitle = {Playing {{Without Barriers}}},
booktitle = {Proceedings of the 26th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Kamath, Sanchita S. and Zeidieh, Aziz and Khan, Omar and Sethi, Dhruv and Seo, JooYoung},
year = {2024},
month = oct,
series = {{{ASSETS}} '24},
pages = {1--5},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
doi = {10.1145/3663548.3688526},
urldate = {2025-02-28},
abstract = {Virtual reality (VR) has been celebrated for its immersive experiences, yet its potential for creating accessible and enjoyable environments for Blind and Low-Vision (BLV) individuals remains underexplored. Our project addresses this gap by developing a VR table tennis game specifically designed for BLV players. Utilizing an autoethnographic approach, our mixed-ability team, including three BLV co-designers, prototyped the game through rapid iterative testing and evaluation over four months. We integrated multi-sensory feedback mechanisms, such as spatial audio, haptic feedback, and high-contrast visuals, to enhance navigation and interaction. Our findings highlight the effectiveness of combining these modalities to create an enjoyable and realistic VR sports experience. However, we also identified challenges, such as the need for balanced sensory feedback to avoid overload. This study emphasizes the importance of inclusive design in VR gaming, offering new recreational opportunities for BLV individuals and setting the stage for future advancements in accessible VR technology.},
isbn = {979-8-4007-0677-6}
}
@inproceedings{seoCodingNonVisuallyVisual2023,
title = {Coding {{Non-Visually}} in {{Visual Studio Code}}: {{Collaboration Towards Accessible Development Environment}} for {{Blind Programmers}}},
shorttitle = {Coding {{Non-Visually}} in {{Visual Studio Code}}},
booktitle = {Proceedings of the 25th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Seo, JooYoung and Rogge, Megan},
year = {2023},
month = oct,
series = {{{ASSETS}} '23},
pages = {1--9},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
doi = {10.1145/3597638.3614550},
urldate = {2024-06-21},
abstract = {This paper delineates a fruitful collaboration between blind and sighted developers, aiming to augment the accessibility of Visual Studio Code (VSCode). Our shared journey is portrayed through examples drawn from our interaction with GitHub issues, pull requests, review processes, and insider's releases, each contributing to an improved VSCode experience for blind developers. One key milestone of our co-design process is the establishment of an accessible terminal buffer, a significant enhancement for blind developers using VSCode. Other innovative outcomes include Git Diff audio cues, adaptable verbosity settings, intuitive help menus, and a targeted accessibility testing initiative. These tailored improvements not only uplift the accessibility standards of VSCode but also provide a valuable blueprint for open-source developers at large. Through our shared dedication to promoting inclusivity in software development, we aim for the strategies and successes shared in this paper to inspire and guide the open-source community towards crafting more accessible software environments.},
isbn = {979-8-4007-0220-4},
keywords = {accessibility,integrated development environment,nonvisual programming,visual studio code}
}
@article{seoDesigningBornAccessibleCourses2024,
title = {Designing {{Born-Accessible Courses}} in {{Data Science}} and {{Visualization}}: {{Challenges}} and {{Opportunities}} of a {{Remote Curriculum Taught}} by {{Blind Instructors}} to {{Blind Students}}},
shorttitle = {Designing {{Born-Accessible Courses}} in {{Data Science}} and {{Visualization}}},
author = {Seo, Joo Young and O'Modhrain, Sile and Xia, Yilin and Kamath, Sanchita S. and Lee, Bongshin and Coughlan, James},
year = {2024},
journal = {EuroVis 2024 - Education Papers},
edition = {1053},
publisher = {The Eurographics Association},
doi = {10.2312/EVED.20241053},
urldate = {2024-06-21},
abstract = {While recent years have seen a growing interest in accessible visualization tools and techniques for blind people, little attention is paid to the learning opportunities and teaching strategies of data science and visualization tailored for blind individuals. Whereas the former focuses on the accessibility and usability issues of data visualization tools, the latter is concerned with the learnability of concepts and skills for data science and visualization. In this paper, we present novel approaches to teaching data science and visualization to blind students in an online setting. Taught by blind instructors, nine blind learners having a wide range of professional backgrounds participated in a two-week summer course. We describe the course design, teaching strategies, and learning outcomes. We also discuss the challenges and opportunities of teaching data science and visualization to blind students. Our work contributes to the growing body of knowledge on accessible data science and visualization education, and provides insights into the design of online courses for blind students.},
copyright = {Creative Commons Attribution 4.0 International},
isbn = {9783038682578},
langid = {english},
keywords = {Applied computing Education,CCS Concepts: Applied computing Education},
}
@inproceedings{seoMAIDRMakingStatistical2024,
title = {{{MAIDR}}: {{Making Statistical Visualizations Accessible}} with {{Multimodal Data Representation}}},
shorttitle = {{{MAIDR}}},
booktitle = {Proceedings of the {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Seo, JooYoung and Xia, Yilin and Lee, Bongshin and Mccurry, Sean and Yam, Yu Jun},
year = {2024},
month = may,
series = {{{CHI}} '24},
pages = {1--22},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
doi = {10.1145/3613904.3642730},
urldate = {2024-06-21},
abstract = {This paper investigates new data exploration experiences that enable blind users to interact with statistical data visualizations---bar plots, heat maps, box plots, and scatter plots---leveraging multimodal data representations. In addition to sonification and textual descriptions that are commonly employed by existing accessible visualizations, our MAIDR (multimodal access and interactive data representation) system incorporates two additional modalities (braille and review) that offer complementary benefits. It also provides blind users with the autonomy and control to interactively access and understand data visualizations. In a user study involving 11 blind participants, we found the MAIDR system facilitated the accurate interpretation of statistical visualizations. Participants exhibited a range of strategies in combining multiple modalities, influenced by their past interactions and experiences with data visualizations. This work accentuates the overlooked potential of combining refreshable tactile representation with other modalities and elevates the discussion on the importance of user autonomy when designing accessible data visualizations.},
isbn = {979-8-4007-0330-0},
keywords = {Accessibility,Blind,Braille Display,Multimodality,Screen Readers,Statistical Visualization},
}
@inproceedings{seoMAIDRMeetsAI2024,
title = {{{MAIDR Meets AI}}: {{Exploring Multimodal LLM-Based Data Visualization Interpretation}} by and with {{Blind}} and {{Low-Vision Users}}},
shorttitle = {{{MAIDR Meets AI}}},
booktitle = {Proceedings of the 26th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Seo, JooYoung and Kamath, Sanchita S. and Zeidieh, Aziz and Venkatesh, Saairam and McCurry, Sean},
year = {2024},
month = oct,
series = {{{ASSETS}} '24},
pages = {1--31},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
doi = {10.1145/3663548.3675660},
urldate = {2025-02-28},
abstract = {This paper investigates how blind and low-vision (BLV) users interact with multimodal large language models (LLMs) to interpret data visualizations. Building upon our previous work on the multimodal access and interactive data representation (MAIDR) framework, our mixed-visual-ability team co-designed maidrAI, an LLM extension providing multiple AI responses to users' visual queries. To explore generative AI-based data representation, we conducted user studies with 8 BLV participants, tasking them with interpreting box plots using our system. We examined how participants personalize LLMs through prompt engineering, their preferences for data visualization descriptions, and strategies for verifying LLM responses. Our findings highlight three dimensions affecting BLV users' decision-making process: modal preference, LLM customization, and multimodal data representation. This research contributes to designing more accessible data visualization tools for BLV users and advances the understanding of inclusive generative AI applications.},
isbn = {979-8-4007-0677-6},
}
@inproceedings{khanSightedPeopleHave2025,
title = {"{{Sighted People Have Their Pick Of The Litter}}": {{Unpacking The Need For Digital Mental Health}} ({{DMH}}) {{Tracking Services With And For The Blind Community}}},
shorttitle = {"{{Sighted People Have Their Pick Of The Litter}}"},
booktitle = {Proceedings of the {{Extended Abstracts}} of the {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Khan, Omar and Seo, JooYoung},
date = {2025-04-25},
series = {{{CHI EA}} '25},
pages = {1--13},
publisher = {Association for Computing Machinery},
location = {New York, NY, USA},
doi = {10.1145/3706599.3719817},
url = {https://dl.acm.org/doi/10.1145/3706599.3719817},
urldate = {2025-05-02},
abstract = {The proliferation of digital mental health (DMH) tracking services promises personalized support, yet accessibility barriers limit equal access. This study investigates blind community experiences with DMH tracking services across the United States as a step toward inclusive health technology design. Working with blind advocacy organizations, we distributed a cross-sectional observational survey (n = 93) and analyzed open-ended responses using Norman and Skinner’s eHealth Literacy framework. Our findings reveal significant challenges in navigation, content interpretation, and overall user experience, which impediments the blind community’s effective engagement with DMH tools. Results highlight the need for adaptive interfaces, accessible tracking strategies, and voice-guided interactions. These insights inform design recommendations for developers and policymakers, promoting more inclusive mental health technologies. By prioritizing accessibility, we make forward progress in ensuring that DMH tracking services fulfill their potential to support mental well-being across diverse user groups, fostering digital equality in mental health care.},
isbn = {979-8-4007-1395-8},
}
@article{kohEngagingInformationVision2025,
title = {Engaging with Information beyond Vision: Hands-on Approaches to Computational Thinking for Blind and Visually Impaired Learners},
shorttitle = {Engaging with Information beyond Vision},
author = {Koh, Kyungwon and Seo, JooYoung and Chen, Si and Cox, Eugene Malcolm},
date = {2025-03-11},
journaltitle = {Information Research an international electronic journal},
volume = {30},
number = {iConf},
pages = {280--286},
issn = {1368-1613},
doi = {10.47989/ir30iConf47353},
url = {https://publicera.kb.se/ir/article/view/47353},
urldate = {2025-05-02},
abstract = {Introduction: This project develops accessible maker tools and activities to foster computational thinking (CT) skills in blind and visually impaired (BVI) learners, while investigating the experiences of two key groups: (1) BVI learners and (2) librarians and maker professionals who design and deliver accessible CT programs. Methods: The pilot phase designed and delivered an accessible electronics and coding curriculum to three BVI youth in a two-day summer camp. Data was collected through two debrief focus groups—one with BVI learners and one with the maker professionals who served as instructors. Analysis: All interviews were recorded and transcribed. The research team used a grounded theory approach to analyse the interview data. Results: Both learners and instructors highlighted the benefits of tactile and multi-sensory learning tools, though challenges emerged with the text-based coding platform. Learners self-reported increased confidence, autonomy, and interest in CT skills. Instructors adapted their approaches with detailed verbal descriptions and modifications to tools and lesson plans. Understanding the diverse needs of BVI learners and providing personalized assistance was crucial. Conclusion: Tactile and physical approaches to computational thinking show promise for previously marginalized learners, though challenges remain. Future research will explore how emerging technologies, including AI, can further enhance accessibility for BVI learners.},
issue = {iConf},
langid = {english},
keywords = {accessibility,blind and visually impaired people,computational thinking,learning,makerspaces},
}