-
Notifications
You must be signed in to change notification settings - Fork 46
/
recent.bib
1881 lines (1758 loc) · 156 KB
/
recent.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
@article{2022-rich-screen-reader-vis-experiences,
title = {Rich Screen Reader Experiences for Accessible Data Visualization},
author = {Zong, Jonathan and Lee, Crystal and Lundgard, Alan and Jang, JiWoong and Hajas, Daniel and Satyanarayan, Arvind},
date = {2022},
journaltitle = {Computer Graphics Forum (Proc. EuroVis)},
url = {http://vis.csail.mit.edu/pubs/rich-screen-reader-vis-experiences}
}
@online{AccessibleCOVID19Data,
title = {Accessible {{COVID-19}} Data},
url = {https://covid.ski.org/?fbclid=IwAR0kqAZIeQkyelOjMpRA_NrKVM8gKYGEVSZeFgT0vSe61f8aLE0z4oB8DzI},
urldate = {2022-08-21},
file = {C:\Users\jseo1005\Zotero\storage\D2ILW7QE\covid.ski.org.html}
}
@online{AccessibleGraphs,
title = {Accessible {{Graphs}}},
url = {https://accessiblegraphs.org/},
urldate = {2023-09-11},
abstract = {Helping blind people see graphs using sound and touch},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\DST4J3U2\accessiblegraphs.org.html}
}
@software{AccessibleGraphsProject2023,
title = {The {{Accessible Graphs}} Project},
date = {2023-01-17T09:12:00Z},
origdate = {2019-10-10T21:52:42Z},
url = {https://github.com/hasadna/accessible-graphs},
urldate = {2023-09-11},
abstract = {The Accessible Graphs project},
organization = {{The Public Knowledge Workshop}}
}
@article{ACMDSeminarFully2017,
title = {{{ACMD Seminar}}: {{Towards Fully Accessible Data Visualisation}}},
shorttitle = {{{ACMD Seminar}}},
date = {2017-06-06T09:43-04:00},
journaltitle = {NIST},
url = {https://www.nist.gov/itl/math/acmd-seminar-towards-fully-accessible-data-visualisation},
urldate = {2022-08-21},
abstract = {Volker SorgeSchool of Computer Science, University of Birmingham, UK},
langid = {english},
annotation = {Last Modified: 2019-11-15T19:42-05:00},
file = {C:\Users\jseo1005\Zotero\storage\8ZZW8M5Y\acmd-seminar-towards-fully-accessible-data-visualisation.html}
}
@online{alamSeeChartEnablingAccessible2023,
title = {{{SeeChart}}: {{Enabling Accessible Visualizations Through Interactive Natural Language Interface For People}} with {{Visual Impairments}}},
shorttitle = {{{SeeChart}}},
author = {Alam, Md Zubair Ibne and Islam, Shehnaz and Hoque, Enamul},
date = {2023-02-15},
eprint = {2302.07742},
eprinttype = {arxiv},
eprintclass = {cs},
doi = {10.1145/3581641.3584099},
url = {http://arxiv.org/abs/2302.07742},
urldate = {2023-02-16},
abstract = {Web-based data visualizations have become very popular for exploring data and communicating insights. Newspapers, journals, and reports regularly publish visualizations to tell compelling stories with data. Unfortunately, most visualizations are inaccessible to readers with visual impairments. For many charts on the web, there are no accompanying alternative (alt) texts, and even if such texts exist they do not adequately describe important insights from charts. To address the problem, we first interviewed 15 blind users to understand their challenges and requirements for reading data visualizations. Based on the insights from these interviews, we developed SeeChart, an interactive tool that automatically deconstructs charts from web pages and then converts them to accessible visualizations for blind people by enabling them to hear the chart summary as well as to interact through data points using the keyboard. Our evaluation with 14 blind participants suggests the efficacy of SeeChart in understanding key insights from charts and fulfilling their information needs while reducing their required time and cognitive burden.},
pubstate = {preprint},
keywords = {Computer Science - Human-Computer Interaction},
file = {C:\Users\jseo1005\Zotero\storage\6M9NTXPQ\Alam et al. - 2023 - SeeChart Enabling Accessible Visualizations Throu.pdf}
}
@online{americanprintinghousefortheblindAnnualReports2021,
title = {Annual {{Reports}}},
author = {{American Printing House for the Blind}},
date = {2021},
url = {https://www.aph.org/app/uploads/2022/04/annual-report-fy2021.pdf},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\UU2U8PQE\Limitless Possibility.pdf}
}
@online{AudioGraphsApple,
title = {Audio {{Graphs}} | {{Apple Developer Documentation}}},
url = {https://developer.apple.com/documentation/accessibility/audio_graphs},
urldate = {2022-08-21},
file = {C:\Users\jseo1005\Zotero\storage\3Y4533ZN\audio_graphs.html}
}
@inproceedings{aultEvaluationLongDescriptions2002,
title = {Evaluation of {{Long Descriptions}} of {{Statistical Graphics}} for {{Blind}} and {{Low Vision Web Users}}},
booktitle = {Computers {{Helping People}} with {{Special Needs}}},
author = {Ault, H. K. and Deloge, J. W. and Lapp, R. W. and Morgan, M. J. and Barnett, J. R.},
editor = {Miesenberger, Klaus and Klaus, Joachim and Zagler, Wolfgang},
date = {2002},
series = {Lecture {{Notes}} in {{Computer Science}}},
pages = {517--526},
publisher = {{Springer}},
location = {{Berlin, Heidelberg}},
doi = {10.1007/3-540-45491-8_99},
abstract = {The objective of this research was to maximize not only accessibility but also user comprehension of web pages, particularly those containing tabular and graphical information. Based on literature and interviews with blind and low vision students and their teachers, the research team developed guidelines for web developers to describe charts and graphs commonly used in statistical applications. A usability study was then performed to evaluate the effectiveness of these new guidelines. Accessibility and comprehension for both blind and low vision users were increased when web pages were developed following the new guidelines.},
isbn = {978-3-540-45491-5},
langid = {english},
keywords = {Accessibility Guideline,Blind User,Lesson Plan,Screen Reader,Worcester Polytechnic Institute},
file = {C:\Users\jseo1005\Zotero\storage\ZPH9N8YJ\3-540-45491-8_99.pdf}
}
@inproceedings{banovicUncoveringInformationNeeds2013,
title = {Uncovering Information Needs for Independent Spatial Learning for Users Who Are Visually Impaired},
booktitle = {Proceedings of the 15th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Banovic, Nikola and Franz, Rachel L. and Truong, Khai N. and Mankoff, Jennifer and Dey, Anind K.},
date = {2013-10-21},
pages = {1--8},
publisher = {{ACM}},
location = {{Bellevue Washington}},
doi = {10.1145/2513383.2513445},
url = {https://dl.acm.org/doi/10.1145/2513383.2513445},
urldate = {2022-08-21},
eventtitle = {{{ASSETS}} '13: {{The}} 15th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
isbn = {978-1-4503-2405-2},
langid = {english}
}
@article{bovairAcquisitionPerformanceTextEditing1990,
title = {The {{Acquisition}} and {{Performance}} of {{Text-Editing Skill}}: {{A Cognitive Complexity Analysis}}},
shorttitle = {The {{Acquisition}} and {{Performance}} of {{Text-Editing Skill}}},
author = {Bovair, Susan and Kieras, David E. and Polson, Peter G.},
date = {1990-03-01},
journaltitle = {Human–Computer Interaction},
volume = {5},
number = {1},
pages = {1--48},
publisher = {{Taylor \& Francis}},
issn = {0737-0024},
doi = {10.1207/s15327051hci0501_1},
url = {https://www.tandfonline.com/doi/abs/10.1207/s15327051hci0501_1},
urldate = {2023-09-07},
abstract = {Kieras and Polson (1985) proposed an approach for making quantitative predictions on ease of learning and ease of use of a system, based on a production system version of the goals, operators, methods, and selection rules (GOMS) model of Card, Moran, and Newel1 (1983). This article describes the principles for constructing such models and obtaining predictions of learning and execution time. A production rule model for a simulated text editor is described in detail and is compared to experimental data on learning and performance. The model accounted well for both learning and execution time and for the details of the increase in speed with practice. The relationship between the performance model and the Keystroke-Level Model of Card et al. (1983) is discussed. The results provide strong support for the original proposal that production rule models can make quantitative predictions for both ease of learning and ease of use.},
file = {C:\Users\jseo1005\Zotero\storage\FUDR4HX3\Bovair et al. - 1990 - The Acquisition and Performance of Text-Editing Skill A Cognitive Complexity Analysis.pdf}
}
@online{BraillePatterns,
title = {Braille {{Patterns}}},
url = {https://unicode.org/charts/nameslist/c_2800.html},
urldate = {2023-01-12},
file = {C:\Users\jseo1005\Zotero\storage\EAIRJ2XZ\c_2800.html}
}
@article{brookeSUSRetrospective2013,
title = {{{SUS}}: A Retrospective},
shorttitle = {{{SUS}}},
author = {Brooke, John},
date = {2013-01-01},
journaltitle = {Journal of Usability Studies},
shortjournal = {Journal of Usability Studies},
volume = {8},
pages = {29--40}
}
@inproceedings{brownVizTouchAutomaticallyGenerated2012a,
title = {{{VizTouch}}: Automatically Generated Tactile Visualizations of Coordinate Spaces},
shorttitle = {{{VizTouch}}},
booktitle = {Proceedings of the {{Sixth International Conference}} on {{Tangible}}, {{Embedded}} and {{Embodied Interaction}}},
author = {Brown, Craig and Hurst, Amy},
date = {2012-02-19},
pages = {131--138},
publisher = {{ACM}},
location = {{Kingston Ontario Canada}},
doi = {10.1145/2148131.2148160},
url = {https://dl.acm.org/doi/10.1145/2148131.2148160},
urldate = {2023-09-09},
eventtitle = {{{TEI}}'12: {{Sixth International Conference}} on {{Tangible}}, {{Embedded}}, and {{Embodied Interaction}}},
isbn = {978-1-4503-1174-8},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\WEMJHXZ2\Brown and Hurst - 2012 - VizTouch automatically generated tactile visualiz.pdf}
}
@online{ChartsContentComponents,
title = {Charts - {{Content}} - {{Components}} - {{Human Interface Guidelines}} - {{Design}} - {{Apple Developer}}},
url = {https://developer.apple.com/design/human-interface-guidelines/components/content/charts},
urldate = {2022-09-27},
file = {C:\Users\jseo1005\Zotero\storage\NQVXZWEW\charts.html}
}
@online{cherukuruVisualsExaminingExperiences2022,
title = {Beyond {{Visuals}} : {{Examining}} the {{Experiences}} of {{Geoscience Professionals With Vision Disabilities}} in {{Accessing Data Visualizations}}},
shorttitle = {Beyond {{Visuals}}},
author = {Cherukuru, Nihanth W. and Bailey, David A. and Fourment, Tiffany and Hatheway, Becca and Holland, Marika M. and Rehme, Matt},
date = {2022-07-26},
eprint = {2207.13220},
eprinttype = {arxiv},
eprintclass = {cs},
doi = {10.48550/arXiv.2207.13220},
url = {http://arxiv.org/abs/2207.13220},
urldate = {2022-10-20},
abstract = {Data visualizations are ubiquitous in all disciplines and have become the primary means of analysing data and communicating insights. However, the predominant reliance on visual encoding of data continues to create accessibility barriers for people who are blind/vision impaired resulting in their under representation in Science, Technology, Engineering and Mathematics (STEM) disciplines. This research study seeks to understand the experiences of professionals who are blind/vision impaired in one such STEM discipline (geosciences) in accessing data visualizations. In-depth, semi-structured interviews with seven professionals were conducted to examine the accessibility barriers and areas for improvement to inform accessibility research pertaining to data visualizations through a socio-technical lens. A reflexive thematic analysis revealed the negative impact of visualizations in influencing their career path, lack of data exploration tools for research, barriers in accessing works of peers and mismatched pace of visualization and accessibility research. The article also includes recommendations from the participants to address some of these accessibility barriers.},
pubstate = {preprint},
keywords = {Computer Science - Computers and Society,Computer Science - Graphics,Computer Science - Human-Computer Interaction},
file = {C:\Users\jseo1005\Zotero\storage\B2F2B2R2\Cherukuru et al. - 2022 - Beyond Visuals Examining the Experiences of Geos.pdf}
}
@inproceedings{choiTactileDisplayBraille2004,
title = {Tactile Display as a {{Braille}} Display for the Visually Disabled},
booktitle = {2004 {{IEEE}}/{{RSJ International Conference}} on {{Intelligent Robots}} and {{Systems}} ({{IROS}}) ({{IEEE Cat}}. {{No}}.{{04CH37566}})},
author = {Choi, H.R. and Lee, S.W. and Jung, K.M. and Koo, J.C. and Lee, S.I. and Choi, H.G. and Jeon, J.W. and Nam, J.D.},
date = {2004-09},
volume = {2},
pages = {1985-1990 vol.2},
doi = {10.1109/IROS.2004.1389689},
abstract = {Tactile sensation is one of the most important sensory functions along with the auditory sensation for the visually impaired because it replaces the visual sensation of the persons with sight. In this paper, we present a tactile display device as a dynamic Braille display that is the unique tool for exchanging information among them. The proposed tactile cell of the Braille display is based on the dielectric elastomer and it has advantageous features over the existing ones with respect to intrinsic softness, ease of fabrication, cost effectiveness and miniaturization. We introduce a new idea for actuation and describe the actuating mechanism of the Braille pin in details capable of realizing the enhanced spatial density of the tactile cells. Finally, results of psychophysical experiments are given and its effectiveness is confirmed.},
eventtitle = {2004 {{IEEE}}/{{RSJ International Conference}} on {{Intelligent Robots}} and {{Systems}} ({{IROS}}) ({{IEEE Cat}}. {{No}}.{{04CH37566}})},
keywords = {Actuators,Auditory displays,Engineering management,Fabrication,Humans,Lungs,Mechanical engineering,Pins,Psychology,Skin},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\W28ZJ4ZI\\Choi et al. - 2004 - Tactile display as a Braille display for the visua.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\QT327JMP\\1389689.html}
}
@article{choiVisualizingNonVisual2019,
title = {Visualizing for the {{Non}}‐{{Visual}}: {{Enabling}} the {{Visually Impaired}} to {{Use Visualization}}},
shorttitle = {Visualizing for the {{Non}}‐{{Visual}}},
author = {Choi, Jinho and Jung, Sanghun and Park, Deok Gun and Choo, Jaegul and Elmqvist, Niklas},
date = {2019-06},
journaltitle = {Computer Graphics Forum},
volume = {38},
number = {3},
pages = {249--260},
publisher = {{Wiley-Blackwell}},
issn = {01677055},
doi = {10.1111/cgf.13686},
url = {https://proxy2.library.illinois.edu/login?url=https://search.ebscohost.com/login.aspx?direct=true&db=bsu&AN=137771620&site=eds-live&scope=site},
urldate = {2023-09-04},
abstract = {The majority of visualizations on the web are still stored as raster images, making them inaccessible to visually impaired users. We propose a deep‐neural‐network‐based approach that automatically recognizes key elements in a visualization, including a visualization type, graphical elements, labels, legends, and most importantly, the original data conveyed in the visualization. We leverage such extracted information to provide visually impaired people with the reading of the extracted information. Based on interviews with visually impaired users, we built a Google Chrome extension designed to work with screen reader software to automatically decode charts on a webpage using our pipeline. We compared the performance of the back‐end algorithm with existing methods and evaluated the utility using qualitative feedback from visually impaired users.},
keywords = {CCS Concepts,Data modeling,Data visualization,Google Chrome (Computer software),Human‐centered computing → Visual analytics,People with visual disabilities,Visual analytics,Visualization,Visualization toolkits,Work design},
file = {C:\Users\jseo1005\Zotero\storage\CR795AQV\Choi et al. - 2019 - Visualizing for the Non‐Visual Enabling the Visua.pdf}
}
@article{choiVisualizingNonVisualEnabling2019,
title = {Visualizing for the {{Non-Visual}}: {{Enabling}} the {{Visually Impaired}} to {{Use Visualization}}},
shorttitle = {Visualizing for the {{Non-Visual}}},
author = {Choi, Jinho and Jung, Sanghun and Park, Deok Gun and Choo, Jaegul and Elmqvist, Niklas},
date = {2019},
journaltitle = {Computer Graphics Forum},
volume = {38},
number = {3},
pages = {249--260},
issn = {1467-8659},
doi = {10.1111/cgf.13686},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.13686},
urldate = {2022-08-22},
abstract = {The majority of visualizations on the web are still stored as raster images, making them inaccessible to visually impaired users. We propose a deep-neural-network-based approach that automatically recognizes key elements in a visualization, including a visualization type, graphical elements, labels, legends, and most importantly, the original data conveyed in the visualization. We leverage such extracted information to provide visually impaired people with the reading of the extracted information. Based on interviews with visually impaired users, we built a Google Chrome extension designed to work with screen reader software to automatically decode charts on a webpage using our pipeline. We compared the performance of the back-end algorithm with existing methods and evaluated the utility using qualitative feedback from visually impaired users.},
langid = {english},
keywords = {• Human-centered computing → Visual analytics,CCS Concepts,Visualization toolkits},
file = {C:\Users\jseo1005\Zotero\storage\ZBUN9DYE\cgf.html}
}
@inproceedings{ciuhaVisualizationConcurrentTones2010,
title = {Visualization of Concurrent Tones in Music with Colours},
booktitle = {Proceedings of the International Conference on {{Multimedia}} - {{MM}} '10},
author = {Ciuha, Peter and Klemenc, Bojan and Solina, Franc},
date = {2010},
pages = {1677},
publisher = {{ACM Press}},
location = {{Firenze, Italy}},
doi = {10.1145/1873951.1874320},
url = {http://dl.acm.org/citation.cfm?doid=1873951.1874320},
urldate = {2022-08-21},
eventtitle = {The International Conference},
isbn = {978-1-60558-933-6},
langid = {english}
}
@article{clarkDualCodingTheory1991,
title = {Dual Coding Theory and Education},
author = {Clark, James M. and Paivio, Allan},
date = {1991-09-01},
journaltitle = {Educational Psychology Review},
shortjournal = {Educ Psychol Rev},
volume = {3},
number = {3},
pages = {149--210},
issn = {1573-336X},
doi = {10.1007/BF01320076},
url = {https://doi.org/10.1007/BF01320076},
urldate = {2023-01-17},
abstract = {Dual coding theory (DCT) explains human behavior and experience in terms of dynamic associative processes that operate on a rich network of modality-specific verbal and nonverbal (or imagery) representations. We first describe the underlying premises of the theory and then show how the basic DCT mechanisms can be used to model diverse educational phenomena. The research demonstrates that concreteness, imagery, and verbal associative processes play major roles in various educational domains: the representation and comprehension of knowledge, learning and memory of school material, effective instruction, individual differences, achievement motivation and test anxiety, and the learning of motor skills. DCT also has important implications for the science and practice of educational psychology — specifically, for educational research and teacher education. We show not only that DCT provides a unified explanation for diverse topics in education, but also that its mechanistic framework accommodates theories cast in terms of strategies and other high-level psychological processes. Although much additional research needs to be done, the concrete models that DCT offers for the behavior and experience of students, teachers, and educational psychologists further our understanding of educational phenomena and strengthen related pedagogical practices.},
langid = {english},
keywords = {imagery,unified educational theory,verbal processes},
file = {C:\Users\jseo1005\Zotero\storage\7WPYLRAX\Clark and Paivio - 1991 - Dual coding theory and education.pdf}
}
@inproceedings{degreefInterdependentVariablesRemotely2021,
title = {Interdependent {{Variables}}: {{Remotely Designing Tactile Graphics}} for an {{Accessible Workflow}}},
shorttitle = {Interdependent {{Variables}}},
booktitle = {The 23rd {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {De Greef, Lilian and Moritz, Dominik and Bennett, Cynthia},
date = {2021-10-17},
pages = {1--6},
publisher = {{ACM}},
location = {{Virtual Event USA}},
doi = {10.1145/3441852.3476468},
url = {https://dl.acm.org/doi/10.1145/3441852.3476468},
urldate = {2023-09-06},
abstract = {In this experience report, we offer a case study of blind and sighted colleagues creating an accessible workflow to collaborate on a data visualization-focused project. We outline our process for making the project’s shared data representations accessible through incorporating both handmade and machine-embossed tactile graphics. We also share lessons and strategies for considering team needs and addressing contextual constraints like remote collaboration during the COVID-19 pandemic. More broadly, this report contributes to ongoing research into the ways accessibility is interdependent by arguing that access work must be a collective responsibility and properly supported with recognition, resources, and infrastructure.},
eventtitle = {{{ASSETS}} '21: {{The}} 23rd {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
isbn = {978-1-4503-8306-6},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\YGPSINZ3\De Greef et al. - 2021 - Interdependent Variables Remotely Designing Tacti.pdf}
}
@online{DesmosGraphingCalculator,
title = {Desmos | {{Graphing Calculator}}},
url = {https://www.desmos.com/calculator},
urldate = {2023-01-02},
abstract = {Explore math with our beautiful, free online graphing calculator. Graph functions, plot points, visualize algebraic equations, add sliders, animate graphs, and more.},
langid = {english},
organization = {{Desmos}},
file = {C:\Users\jseo1005\Zotero\storage\VTL3FFZK\calculator.html}
}
@online{Diagcess,
title = {Diagcess},
url = {https://www.npmjs.com/package/diagcess},
urldate = {2022-08-21},
abstract = {A diagram explorer for progressiveaccee.com style diagram annotations.. Latest version: 1.1.4, last published: 7 months ago. Start using diagcess in your project by running `npm i diagcess`. There are no other projects in the npm registry using diagcess.},
langid = {english},
organization = {{npm}},
file = {C:\Users\jseo1005\Zotero\storage\NJM4U6JH\diagcess.html}
}
@article{dowWizardOzSupport2005,
title = {Wizard of {{Oz}} Support throughout an Iterative Design Process},
author = {Dow, S. and MacIntyre, B. and Lee, J. and Oezbek, C. and Bolter, J.D. and Gandy, M.},
date = {2005-10},
journaltitle = {IEEE Pervasive Computing},
volume = {4},
number = {4},
pages = {18--26},
issn = {1558-2590},
doi = {10.1109/MPRV.2005.93},
abstract = {The Wizard of Oz prototyping approach, widely used in human-computer interaction research, is particularly useful in exploring user interfaces for pervasive, ubiquitous, or mixed-reality systems that combine complex sensing and intelligent control logic. The vast design space for such nontraditional interfaces provides many possibilities for user interaction through one or more modalities and often requires challenging hardware and software implementations. The WOz method helps designers avoid getting locked into a particular design or working under an incorrect set of assumptions about user preferences, because it lets them explore and evaluate designs before investing the considerable development time needed to build a complete prototype.},
eventtitle = {{{IEEE Pervasive Computing}}},
keywords = {audio tours,Computational modeling,design process,HCI methods,Intelligent control,Intelligent sensors,Intelligent systems,Iterative methods,mixed reality,Process design,Prototypes,prototyping,Sensor systems,ubiquitous computing,User interfaces,Virtual reality,Wizard of Oz},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\FEII3JJW\\Dow et al. - 2005 - Wizard of Oz support throughout an iterative desig.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\AXPNQFXY\\1541964.html}
}
@inproceedings{ebelVisualizingEventSequence2021,
title = {Visualizing {{Event Sequence Data}} for {{User Behavior Evaluation}} of {{In-Vehicle Information Systems}}},
booktitle = {13th {{International Conference}} on {{Automotive User Interfaces}} and {{Interactive Vehicular Applications}}},
author = {Ebel, Patrick and Lingenfelder, Christoph and Vogelsang, Andreas},
date = {2021-09-20},
series = {{{AutomotiveUI}} '21},
pages = {219--229},
publisher = {{Association for Computing Machinery}},
location = {{New York, NY, USA}},
doi = {10.1145/3409118.3475140},
url = {https://dl.acm.org/doi/10.1145/3409118.3475140},
urldate = {2023-09-07},
abstract = {With modern In-Vehicle Information Systems (IVISs) becoming more capable and complex than ever, their evaluation becomes increasingly difficult. The analysis of large amounts of user behavior data can help to cope with this complexity and can support UX experts in designing IVISs that serve customer needs and are safe to operate while driving. We, therefore, propose a Multi-level User Behavior Visualization Framework providing effective visualizations of user behavior data that is collected via telematics from production vehicles. Our approach visualizes user behavior data on three different levels: (1) The Task Level View aggregates event sequence data generated through touchscreen interactions to visualize user flows. (2) The Flow Level View allows comparing the individual flows based on a chosen metric. (3) The Sequence Level View provides detailed insights into touch interactions, glance, and driving behavior. Our case study proves that UX experts consider our approach a useful addition to their design process.},
isbn = {978-1-4503-8063-8},
file = {C:\Users\jseo1005\Zotero\storage\27AG4E2E\Ebel et al. - 2021 - Visualizing Event Sequence Data for User Behavior Evaluation of In-Vehicle Information Systems.pdf}
}
@article{elavskyHowAccessibleMy2022,
title = {How Accessible Is My Visualization? {{Evaluating}} Visualization Accessibility with {{Chartability}}},
shorttitle = {How Accessible Is My Visualization?},
author = {Elavsky, Frank and Bennett, Cynthia and Moritz, Dominik},
date = {2022-06},
journaltitle = {Computer Graphics Forum},
shortjournal = {Computer Graphics Forum},
volume = {41},
number = {3},
pages = {57--70},
issn = {0167-7055, 1467-8659},
doi = {10.1111/cgf.14522},
url = {https://onlinelibrary.wiley.com/doi/10.1111/cgf.14522},
urldate = {2022-10-17},
abstract = {Novices and experts have struggled to evaluate the accessibility of data visualizations because there are no common shared guidelines across environments, platforms, and contexts in which data visualizations are authored. Between non-specifc standards bodies like WCAG, emerging research, and guidelines from specifc communities of practice, it is hard to organize knowledge on how to evaluate accessible data visualizations. We present Chartability, a set of heuristics synthesized from these various sources which enables designers, developers, researchers, and auditors to evaluate data-driven visualizations and interfaces for visual, motor, vestibular, neurological, and cognitive accessibility. In this paper, we outline our process of making a set of heuristics and accessibility principles for Chartability and highlight key features in the auditing process. Working with participants on real projects, we found that data practitioners with a novice level of accessibility skills were more confdent and found auditing to be easier after using Chartability. Expert accessibility practitioners were eager to integrate Chartability into their own work. Refecting on Chartability’s development and the preliminary user evaluation, we discuss tradeoffs of open projects, working with high-risk evaluations like auditing projects in the wild, and challenge future research projects at the intersection of visualization and accessibility to consider the broad intersections of disabilities.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\3GJLUDSM\Elavsky et al. - 2022 - How accessible is my visualization Evaluating vis.pdf}
}
@online{ExperienceLearnEducational,
title = {Experience + {{Learn}} / {{Educational Media}} / {{Effective Practices}} for {{Description}} of {{Science Content}} within {{Digital Talking Books}} / {{NCAM}}},
url = {http://ncamftp.wgbh.org/ncam-old-site/experience_learn/educational_media/stemdx.html},
urldate = {2023-08-28}
}
@article{fanAccessibilityDataVisualizations2022,
title = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}: {{Practices}} and {{Experiences During COVID-19}}},
shorttitle = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}},
author = {Fan, Danyang and Siu, Alexa F. and Rao, Hrishikesh V. and Kim, Gene S-H and Vazquez, Xavier and Greco, Lucy and O’Modhrain, Sile and Follmer, Sean},
date = {2022-08-18},
journaltitle = {ACM Transactions on Accessible Computing},
shortjournal = {ACM Trans. Access. Comput.},
pages = {3557899},
issn = {1936-7228, 1936-7236},
doi = {10.1145/3557899},
url = {https://dl.acm.org/doi/10.1145/3557899},
urldate = {2023-01-12},
abstract = {Data visualization has become an increasingly important means of efective data communication and has played a vital role in broadcasting the progression of COVID-19. Accessible data representations, on the other hand, have lagged behind, leaving areas of information out of reach for many blind and visually impaired (BVI) users. In this work, we sought to understand (1) the accessibility of current implementations of visualizations on the web; (2) BVI users’ preferences and current experiences when accessing data-driven media; (3) how accessible data representations on the web address these users’ access needs and help them navigate, interpret, and gain insights from the data; and (4) the practical challenges that limit BVI users’ access and use of data representations. To answer these questions, we conducted a mixed-methods study consisting of an accessibility audit of 87 data visualizations on the web to identify accessibility issues, an online survey of 127 screen reader users to understand lived experiences and preferences, and a remote contextual inquiry with 12 of the survey respondents to observe how they navigate, interpret and gain insights from accessible data representations. Our observations during this critical period of time provide an understanding of the widespread accessibility issues encountered across online data visualizations, the impact that data accessibility inequities have on the BVI community, the ways screen reader users sought access to data-driven information and made use of online visualizations to form insights, and the pressing need to make larger strides towards improving data literacy, building conidence, and enriching methods of access. Based on our indings, we provide recommendations for researchers and practitioners to broaden data accessibility on the web. CCS Concepts: · Human-centered computing → Empirical studies in accessibility; Visualization application domains.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\Y6Q8L9IR\Fan et al. - 2022 - The Accessibility of Data Visualizations on the We.pdf}
}
@article{fanAccessibilityDataVisualizations2023,
title = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}: {{Practices}} and {{Experiences During COVID-19}}},
shorttitle = {The {{Accessibility}} of {{Data Visualizations}} on the {{Web}} for {{Screen Reader Users}}},
author = {Fan, Danyang and Fay Siu, Alexa and Rao, Hrishikesh and Kim, Gene Sung-Ho and Vazquez, Xavier and Greco, Lucy and O'Modhrain, Sile and Follmer, Sean},
date = {2023-03-29},
journaltitle = {ACM Transactions on Accessible Computing},
shortjournal = {ACM Trans. Access. Comput.},
volume = {16},
number = {1},
pages = {4:1--4:29},
issn = {1936-7228},
doi = {10.1145/3557899},
url = {https://dl.acm.org/doi/10.1145/3557899},
urldate = {2023-08-27},
abstract = {Data visualization has become an increasingly important means of effective data communication and has played a vital role in broadcasting the progression of COVID-19. Accessible data representations, however, have lagged behind, leaving areas of information out of reach for many blind and visually impaired (BVI) users. In this work, we sought to understand (1) the accessibility of current implementations of visualizations on the web; (2) BVI users’ preferences and current experiences when accessing data-driven media; (3) how accessible data representations on the web address these users’ access needs and help them navigate, interpret, and gain insights from the data; and (4) the practical challenges that limit BVI users’ access and use of data representations. To answer these questions, we conducted a mixed-methods study consisting of an accessibility audit of 87 data visualizations on the web to identify accessibility issues, an online survey of 127 screen reader users to understand lived experiences and preferences, and a remote contextual inquiry with 12 of the survey respondents to observe how they navigate, interpret, and gain insights from accessible data representations. Our observations during this critical period of time provide an understanding of the widespread accessibility issues encountered across online data visualizations, the impact that data accessibility inequities have on the BVI community, the ways screen reader users sought access to data-driven information and made use of online visualizations to form insights, and the pressing need to make larger strides towards improving data literacy, building confidence, and enriching methods of access. Based on our findings, we provide recommendations for researchers and practitioners to broaden data accessibility on the web.},
keywords = {Accessibility,accessible data visualization,audit,blind,data visualization,user experience,visually impaired,web accessibility},
file = {C:\Users\jseo1005\Zotero\storage\L48LCCW3\Fan et al. - 2023 - The Accessibility of Data Visualizations on the We.pdf}
}
@inproceedings{fanSlideToneTiltTone1DOF2022,
title = {Slide-{{Tone}} and {{Tilt-Tone}}: 1-{{DOF Haptic Techniques}} for {{Conveying Shape Characteristics}} of {{Graphs}} to {{Blind Users}}},
shorttitle = {Slide-{{Tone}} and {{Tilt-Tone}}},
booktitle = {{{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Fan, Danyang and Siu, Alexa Fay and Law, Wing-Sum Adrienne and Zhen, Raymond Ruihong and O'Modhrain, Sile and Follmer, Sean},
date = {2022-04-29},
pages = {1--19},
publisher = {{ACM}},
location = {{New Orleans LA USA}},
doi = {10.1145/3491102.3517790},
url = {https://dl.acm.org/doi/10.1145/3491102.3517790},
urldate = {2023-01-12},
eventtitle = {{{CHI}} '22: {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
isbn = {978-1-4503-9157-3},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\KTLG8G4D\Fan et al. - 2022 - Slide-Tone and Tilt-Tone 1-DOF Haptic Techniques .pdf}
}
@inproceedings{farihaMiningFrequentPatterns2013,
title = {Mining {{Frequent Patterns}} from {{Human Interactions}} in {{Meetings Using Directed Acyclic Graphs}}},
booktitle = {Advances in {{Knowledge Discovery}} and {{Data Mining}}},
author = {Fariha, Anna and Ahmed, Chowdhury Farhan and Leung, Carson Kai-Sang and Abdullah, S. M. and Cao, Longbing},
editor = {Pei, Jian and Tseng, Vincent S. and Cao, Longbing and Motoda, Hiroshi and Xu, Guandong},
date = {2013},
series = {Lecture {{Notes}} in {{Computer Science}}},
pages = {38--49},
publisher = {{Springer}},
location = {{Berlin, Heidelberg}},
doi = {10.1007/978-3-642-37453-1_4},
abstract = {In modern life, interactions between human beings frequently occur in meetings, where topics are discussed. Semantic knowledge of meetings can be revealed by discovering interaction patterns from these meetings. An existing method mines interaction patterns from meetings using tree structures. However, such a tree-based method may not capture all kinds of triggering relations between interactions, and it may not distinguish a participant of a certain rank from another participant of a different rank in a meeting. Hence, the tree-based method may not be able to find all interaction patterns such as those about correlated interaction. In this paper, we propose to mine interaction patterns from meetings using an alternative data structure—namely, a directed acyclic graph (DAG). Specifically, a DAG captures both temporal and triggering relations between interactions in meetings. Moreover, to distinguish one participant of a certain rank from another, we assign weights to nodes in the DAG. As such, a meeting can be modeled as a weighted DAG, from which weighted frequent interaction patterns can be discovered. Experimental results showed the effectiveness of our proposed DAG-based method for mining interaction patterns from meetings.},
isbn = {978-3-642-37453-1},
langid = {english},
keywords = {Data mining,Directed Acyclic Graphs,Frequent patterns,Human interaction,Modeling meetings},
file = {C:\Users\jseo1005\Zotero\storage\M2HRTAMH\Fariha et al. - 2013 - Mining Frequent Patterns from Human Interactions i.pdf}
}
@inproceedings{fitzpatrickProducingAccessibleStatistics2017,
title = {Producing {{Accessible Statistics Diagrams}} in {{R}}},
booktitle = {Proceedings of the 14th {{International Web}} for {{All Conference}}},
author = {Fitzpatrick, Donal and Godfrey, A. Jonathan R. and Sorge, Volker},
date = {2017-04-02},
pages = {1--4},
publisher = {{ACM}},
location = {{Perth Western Australia Australia}},
doi = {10.1145/3058555.3058564},
url = {https://dl.acm.org/doi/10.1145/3058555.3058564},
urldate = {2022-08-21},
abstract = {Blind people are at risk of being left behind in the infor mation age if efforts are not made to improve the access to information that is not traditionally conveyed in text, whether that text be accessed in braille, audio, or a com puter’s screen reading software. Most graphics summarise a scene or some aspect of data that the author hopes will in form their audience; good statistical graphics are commonly used to great effect for the sighted world, but are practi cally useless to a blind audience. Our work aims to provide an accessible way for blind users to easily, efficiently, and most importantly accurately, explore and query the data contained in diagrams such as bar charts, box plots, time series, and many more. We employ the statistical software environment R to compute rich semantics for these diagrams and make them web accessible by supporting screen reading and interactive exploration.},
eventtitle = {{{W4A}} '17: {{Web For All}} 2017 - {{The Future}} of {{Accessible Work}}},
isbn = {978-1-4503-4900-0},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\ASU5R8I9\Fitzpatrick et al. - 2017 - Producing Accessible Statistics Diagrams in R.pdf}
}
@online{FormInputBindings,
title = {Form {{Input Bindings}} | {{Vue}}.Js},
url = {https://vuejs.org/guide/essentials/forms.html#select},
urldate = {2022-09-24}
}
@inproceedings{gargBraille8UnifiedBraille2016,
title = {Braille-8 — {{The}} Unified Braille {{Unicode}} System: {{Presenting}} an Ideal Unified System around 8-Dot {{Braille Unicode}} for the Braille Users World-Over},
shorttitle = {Braille-8 — {{The}} Unified Braille {{Unicode}} System},
booktitle = {2016 {{IEEE International Conference}} on {{Advanced Networks}} and {{Telecommunications Systems}} ({{ANTS}})},
author = {Garg, Anupam Kumar},
date = {2016-11},
pages = {1--6},
doi = {10.1109/ANTS.2016.7947839},
abstract = {Traditional Braille is a 6-dot code that can represent maximum 64 unique symbols with each braille cell. This is grossly insufficient to represent even ordinary English text (comprising 26 small letters, 26 capital letters, 10 digits, and 14 basic punctuations) - let alone math and science symbols. Thus a braille user has to enter 2 (and sometime 3 or 4) braille cells to enter one character or symbol. This makes braille writing very slow and tedious. Incidentally, 8-dot Braille Unicode was introduced to facilitate the Computer Braille that could represent all 95 computer characters with one braille cell itself. Since 8-dot braille can represent maximum 256 unique symbols, it has huge potential to provide the ultimate solution to all woes faced by braille users while writing texts (in English or in native languages) as well as mathematical and technical text. This paper presents a comprehensive unified braille Unicode system providing a detailed mapping of 8-dot braille Unicode pattern to represent the transcribing codes (in English or any other language) as well as the math, science, and computer symbols/characters - mostly with one braille cell itself.},
eventtitle = {2016 {{IEEE International Conference}} on {{Advanced Networks}} and {{Telecommunications Systems}} ({{ANTS}})},
keywords = {blind,braille pattern,braille standard,Braille Unicode,braille user,Braille-8,computer braille,Computers,Context,eight-dot braille code,Encoding,Geometry,Set theory,Standards,visually challenged,Writing},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\WE3NJT7N\\Garg - 2016 - Braille-8 — The unified braille Unicode system Pr.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\LBYE5IYN\\authors.html}
}
@inproceedings{giudiceLearningNonvisualGraphical2012,
title = {Learning Non-Visual Graphical Information Using a Touch-Based Vibro-Audio Interface},
booktitle = {Proceedings of the 14th International {{ACM SIGACCESS}} Conference on {{Computers}} and Accessibility - {{ASSETS}} '12},
author = {Giudice, Nicholas A. and Palani, Hari Prasath and Brenner, Eric and Kramer, Kevin M.},
date = {2012},
pages = {103},
publisher = {{ACM Press}},
location = {{Boulder, Colorado, USA}},
doi = {10.1145/2384916.2384935},
url = {http://dl.acm.org/citation.cfm?doid=2384916.2384935},
urldate = {2022-08-21},
eventtitle = {The 14th International {{ACM SIGACCESS}} Conference},
isbn = {978-1-4503-1321-6},
langid = {english}
}
@inproceedings{godfreyAccessibleInteractionModel2018,
title = {An {{Accessible Interaction Model}} for {{Data Visualisation}} in {{Statistics}}},
booktitle = {Computers {{Helping People}} with {{Special Needs}}},
author = {Godfrey, A. Jonathan R. and Murrell, Paul and Sorge, Volker},
editor = {Miesenberger, Klaus and Kouroupetroglou, Georgios},
date = {2018},
series = {Lecture {{Notes}} in {{Computer Science}}},
pages = {590--597},
publisher = {{Springer International Publishing}},
location = {{Cham}},
doi = {10.1007/978-3-319-94277-3_92},
abstract = {Data is everywhere and its communication and understanding is an important pre-requisite for the full participation of individuals in the information age. Good data visualisation is commonly used to great effect for the sighted world, but are practically useless to a blind audience. Blind people are at risk of being left behind if efforts are not made to improve the access to information that is not traditionally conveyed in text, whether that text be accessed in braille, audio, or a computer’s screen reading software. Our work aims to provide an accessible way for blind users to easily, efficiently, and most importantly accurately, explore and query the data contained in diagrams such as bar charts, box plots, time series, and many more. We employ the statistical software environment R not only as a means to generate accessible diagrams, but also as a way for blind users to directly interact with data in the same way as their sighted peers by supporting immediate data visualisation via screen reading and interactive exploration.},
isbn = {978-3-319-94277-3},
langid = {english},
keywords = {Blind People,Blind Users,Data Visualisation,Screen Reader,Statistical Software Application},
file = {C:\Users\jseo1005\Zotero\storage\H8QE92CU\Godfrey et al. - 2018 - An Accessible Interaction Model for Data Visualisa.pdf}
}
@article{godfreyAdviceBlindTeachers2015,
title = {Advice {{From Blind Teachers}} on {{How}} to {{Teach Statistics}} to {{Blind Students}}},
author = {Godfrey, A. Jonathan R. and Loots, M. Theodor},
date = {2015-11-01},
journaltitle = {Journal of Statistics Education},
volume = {23},
number = {3},
pages = {null},
publisher = {{Taylor \& Francis}},
issn = {null},
doi = {10.1080/10691898.2015.11889746},
url = {https://doi.org/10.1080/10691898.2015.11889746},
urldate = {2023-01-05},
abstract = {Blind students are bound to make up a very small part of the population most university lecturers will encounter during their careers. Research to date shows that good communication between staff and student improves the chances of a successful outcome for both parties. The research does show, however, that the exercise seems to be one of re-inventing the wheel, perhaps with a less than fully informed blueprint to work from.The authors use their own experiences as blind students who progressed beyond research methods or first year introductory courses into careers as teachers and researchers of statistical methods to provide guidance for their sighted colleagues. Our principle point of difference to the existing research work is that we rely on the experience of our statistical education for our current livelihoods; we were not one-off students taking a research methodology course or first year introductory course. We benefitted from the successful (and possibly the not so successful) interactions we had with our sighted teachers. It is our hope that by saving staff from wasted effort, we can spare students from unnecessary discomfort in classes that could improve their future employment prospects. Our aim is therefore to provide practical support for our sighted colleagues and blind peers as we work together towards the empowerment of blind students in becoming competent producers of statistical information, not just consumers who interpret that information.},
keywords = {Braille,Low vision,Speech output,Tactile images},
file = {C:\Users\jseo1005\Zotero\storage\8GHY4ISY\Godfrey and Loots - 2015 - Advice From Blind Teachers on How to Teach Statist.pdf}
}
@article{godfreyStatisticalSoftwareBlind2013,
title = {Statistical {{Software}} from a {{Blind Person}}'s {{Perspective}}},
author = {Godfrey, Jonathan,R., A.},
date = {2013},
journaltitle = {The R Journal},
shortjournal = {The R Journal},
volume = {5},
number = {1},
pages = {73},
issn = {2073-4859},
doi = {10.32614/RJ-2013-007},
url = {https://journal.r-project.org/archive/2013/RJ-2013-007/index.html},
urldate = {2022-08-22},
abstract = {Blind people have experienced access issues to many software applications since the advent of the Windows operating system; statistical software has proven to follow the rule and not be an exception. The ability to use R within minutes of download with next to no adaptation has opened doors for accessible production of statistical analyses for this author (himself blind) and blind students around the world. This article shows how little is required to make R the most accessible statistical software available today. There is any number of ramifications that this opportunity creates for blind students, especially in terms of their future research and employment prospects. There is potential for making R even better for blind users. The extensibility of R makes this possible through added functionality being made available in an add-on package called BrailleR. Functions in this package are intended to make graphical information available in text form.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\HGNVE9WY\Godfrey - 2013 - Statistical Software from a Blind Person's Perspec.pdf}
}
@inproceedings{gotzelmannLucentMaps3DPrinted2016,
title = {{{LucentMaps}}: {{3D Printed Audiovisual Tactile Maps}} for {{Blind}} and {{Visually Impaired People}}},
shorttitle = {{{LucentMaps}}},
booktitle = {Proceedings of the 18th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Götzelmann, Timo},
date = {2016-10-23},
series = {{{ASSETS}} '16},
pages = {81--90},
publisher = {{Association for Computing Machinery}},
location = {{New York, NY, USA}},
doi = {10.1145/2982142.2982163},
url = {https://dl.acm.org/doi/10.1145/2982142.2982163},
urldate = {2023-09-09},
abstract = {Tactile maps support blind and visually impaired people in orientation and to familiarize with unfamiliar environments. Interactive approaches complement these maps with auditory feedback. However, commonly these approaches focus on blind people. We present an approach which incorporates visually impaired people by visually augmenting relevant parts of tactile maps. These audiovisual tactile maps can be used in conjunction with common tablet computers and smartphones. By integrating conductive elements into 3D printed tactile maps, they can be recognized by a single touch on the mobile device's display, which eases the handling for blind and visually impaired people. To allow multiple elevation levels in our transparent tactile maps, we conducted a study to reconcile technical and physiological requirements of off-the-shelf 3D printers, capacitive touch inputs and the human tactile sense. We propose an interaction concept for 3D printed audiovisual tactile maps, verify its feasibility and test it with a user study. Our discussion includes economic considerations crucial for a broad dissemination of tactile maps for both blind and visually impaired people.},
isbn = {978-1-4503-4124-0},
keywords = {3d printing,accessibility,audio-tactile,blind,capacitive,capacitive sensing,functional,global,marker,orientation,tactile maps,tangible user interfaces,touch screen},
file = {C:\Users\jseo1005\Zotero\storage\CE9AZRH9\Götzelmann - 2016 - LucentMaps 3D Printed Audiovisual Tactile Maps fo.pdf}
}
@article{gotzelmannVisuallyAugmentedAudioTactile2018,
title = {Visually {{Augmented Audio-Tactile Graphics}} for {{Visually Impaired People}}},
author = {Götzelmann, T.},
date = {2018-06-08},
journaltitle = {ACM Transactions on Accessible Computing},
shortjournal = {ACM Trans. Access. Comput.},
volume = {11},
number = {2},
pages = {8:1--8:31},
issn = {1936-7228},
doi = {10.1145/3186894},
url = {https://dl.acm.org/doi/10.1145/3186894},
urldate = {2023-09-09},
abstract = {Tactile graphics play an essential role in knowledge transfer for blind people. The tactile exploration of these graphics is often challenging because of the cognitive load caused by physiological constraints and their complexity. The coupling of physical tactile graphics with electronic devices offers to support the tactile exploration by auditory feedback. Often, these systems have strict constraints regarding their mobility or the process of coupling both components. Additionally, visually impaired people cannot appropriately benefit from their residual vision. This article presents a concept for 3D printed tactile graphics, which offers to use audio-tactile graphics with usual smartphones or tablet-computers. By using capacitive markers, the coupling of the tactile graphics with the mobile device is simplified. These tactile graphics integrating these markers can be printed in one turn by off-the-shelf 3D printers without any post-processing and allows us to use multiple elevation levels for graphical elements. Based on the developed generic concept on visually augmented audio-tactile graphics, we presented a case study for maps. A prototypical implementation was tested by a user study with visually impaired people. All the participants were able to interact with the 3D printed tactile maps using a standard tablet computer. To study the effect of visual augmentation of graphical elements, we conducted another comprehensive user study. We tested multiple types of graphics and obtained evidence that visual augmentation may offer clear advantages for the exploration of tactile graphics. Even participants with a minor residual vision could solve the tasks with visual augmentation more quickly and accurately.},
keywords = {3D printing,accessibility,audio-tactile,augmented,blind,capacitive,capacitive sensing,global,marker,orientation,Tactile graphics,tangible user interfaces,touch screen,visually impaired},
file = {C:\Users\jseo1005\Zotero\storage\6T38EXCR\Götzelmann - 2018 - Visually Augmented Audio-Tactile Graphics for Visu.pdf}
}
@online{gouldEffectivePracticesDescription2008,
title = {Effective {{Practices}} for {{Description}} of {{Science Content}} within {{Digital Talking Books}}},
author = {Gould, Bryan and O'Connell, Trisha and Freed, Geoff},
date = {2008-12},
url = {http://ncamftp.wgbh.org/ncam-old-site/experience_learn/educational_media/stemdx.html},
urldate = {2023-08-27},
file = {C:\Users\jseo1005\Zotero\storage\UL4K86A9\Experience + Learn Educational Media Effective Practices for Description of Science Content with.pdf}
}
@incollection{hartDevelopmentNASATLXTask1988,
title = {Development of {{NASA-TLX}} ({{Task Load Index}}): {{Results}} of {{Empirical}} and {{Theoretical Research}}},
shorttitle = {Development of {{NASA-TLX}} ({{Task Load Index}})},
booktitle = {Advances in {{Psychology}}},
author = {Hart, Sandra G. and Staveland, Lowell E.},
editor = {Hancock, Peter A. and Meshkati, Najmedin},
date = {1988-01-01},
series = {Human {{Mental Workload}}},
volume = {52},
pages = {139--183},
publisher = {{North-Holland}},
doi = {10.1016/S0166-4115(08)62386-9},
url = {https://www.sciencedirect.com/science/article/pii/S0166411508623869},
urldate = {2023-03-19},
abstract = {The results of a multi-year research program to identify the factors associated with variations in subjective workload within and between different types of tasks are reviewed. Subjective evaluations of 10 workload-related factors were obtained from 16 different experiments. The experimental tasks included simple cognitive and manual control tasks, complex laboratory and supervisory control tasks, and aircraft simulation. Task-, behavior-, and subject-related correlates of subjective workload experiences varied as a function of difficulty manipulations within experiments, different sources of workload between experiments, and individual differences in workload definition. A multi-dimensional rating scale is proposed in which information about the magnitude and sources of six workload-related factors are combined to derive a sensitive and reliable estimate of workload.},
langid = {english},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\G9FV42GA\\Hart and Staveland - 1988 - Development of NASA-TLX (Task Load Index) Results.pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\C4YNUMGI\\S0166411508623869.html}
}
@software{hassakuAudioplotlib2022,
title = {Audio-Plot-Lib},
author = {{hassaku}},
date = {2022-07-19T09:41:21Z},
origdate = {2020-12-13T07:16:03Z},
url = {https://github.com/hassaku/audio-plot-lib},
urldate = {2022-08-21},
abstract = {This library provides graph sonification functions and has been developed for a project named "Data science and machine learning resources for screen reader users". Please refer to the project page for more details.},
keywords = {audio,data-science,google-colab,graphs,machine-learning,python,sonification,visually-impaired}
}
@inproceedings{heTacTILEPreliminaryToolchain2017,
title = {{{TacTILE}}: {{A Preliminary Toolchain}} for {{Creating Accessible Graphics}} with {{3D-Printed Overlays}} and {{Auditory Annotations}}},
shorttitle = {{{TacTILE}}},
booktitle = {Proceedings of the 19th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {He, Liang and Wan, Zijian and Findlater, Leah and Froehlich, Jon E.},
date = {2017-10-19},
series = {{{ASSETS}} '17},
pages = {397--398},
publisher = {{Association for Computing Machinery}},
location = {{New York, NY, USA}},
doi = {10.1145/3132525.3134818},
url = {https://dl.acm.org/doi/10.1145/3132525.3134818},
urldate = {2023-09-09},
abstract = {Tactile overlays with audio annotations can increase the accessibility of touchscreens for blind users; however, preparing these overlays is complex and labor intensive. We introduce TacTILE, a novel toolchain to more easily create tactile overlays with audio annotations for arbitrary touchscreen graphics (e.g., graphs, pictures, maps). The workflow includes: (i) an annotation tool to add audio to graphical elements, (ii) a fabrication process that generates 3D-printed tactile overlays, and (iii) a custom app for the user to explore graphics with these overlays. We close with a pilot study with one blind participant who explores three examples (floor plan, photo, and chart), and a discussion of future work.},
isbn = {978-1-4503-4926-0},
keywords = {3d printing,accessible graphics,blind users,speech,tactile overlays,touchscreens.,visual impairments},
file = {C:\Users\jseo1005\Zotero\storage\T355EZW5\He et al. - 2017 - TacTILE A Preliminary Toolchain for Creating Acce.pdf}
}
@software{HighchartsHighcharts2022,
title = {Highcharts/Highcharts},
date = {2022-08-21T14:58:48Z},
origdate = {2010-06-11T12:23:53Z},
url = {https://github.com/highcharts/highcharts},
urldate = {2022-08-21},
abstract = {Highcharts JS, the JavaScript charting framework},
organization = {{Highcharts}}
}
@software{HighchartsSonificationStudio2022,
title = {Highcharts {{Sonification Studio}}},
date = {2022-11-18T10:46:22Z},
origdate = {2019-08-26T11:20:08Z},
url = {https://github.com/highcharts/sonification-studio},
urldate = {2023-03-11},
organization = {{Highcharts}}
}
@inproceedings{hollowayAnimationsYourFingertips2022,
title = {Animations at {{Your Fingertips}}: {{Using}} a {{Refreshable Tactile Display}} to {{Convey Motion Graphics}} for {{People}} Who Are {{Blind}} or Have {{Low Vision}}},
shorttitle = {Animations at {{Your Fingertips}}},
booktitle = {The 24th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
author = {Holloway, Leona and Ananthanarayan, Swamy and Butler, Matthew and De Silva, Madhuka Thisuri and Ellis, Kirsten and Goncu, Cagatay and Stephens, Kate and Marriott, Kim},
date = {2022-10-22},
pages = {1--16},
publisher = {{ACM}},
location = {{Athens Greece}},
doi = {10.1145/3517428.3544797},
url = {https://dl.acm.org/doi/10.1145/3517428.3544797},
urldate = {2023-09-07},
eventtitle = {{{ASSETS}} '22: {{The}} 24th {{International ACM SIGACCESS Conference}} on {{Computers}} and {{Accessibility}}},
isbn = {978-1-4503-9258-7},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\HWAUDDT7\Holloway et al. - 2022 - Animations at Your Fingertips Using a Refreshable Tactile Display to Convey Motion Graphics for Peo.pdf}
}
@article{hooperDesigningMoreEffective2011,
title = {Towards Designing More Effective Systems by Understanding User Experiences},
author = {Hooper, Clare J.},
date = {2011-09},
journaltitle = {ACM SIGWEB Newsletter},
shortjournal = {SIGWEB Newsl.},
pages = {1--3},
issn = {1931-1745, 1931-1435},
doi = {10.1145/2020936.2020940},
url = {https://dl.acm.org/doi/10.1145/2020936.2020940},
urldate = {2022-08-21},
abstract = {This work is about social technologies, user experiences and the problems of creative design. It is motivated by a desire to give people who are offline --- whether for reasons of poverty, disability, infrastructure or cultural background --- the access to social technologies that is currently provided via the web, letting them access the online content and communication facilities that so many of us take for granted. There exist simple technologically-oriented approaches to this problem, such as identifying functional requirements and prototyping tools. This focus on technology, however, comes at a cost of neglecting the experiential aspects which motivate the work, and can result in systems that are functional but unappealing to (or even unusable by) their target audiences.},
issue = {Autumn},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\JF48F5CD\Hooper - 2011 - Towards designing more effective systems by unders.pdf}
}
@inproceedings{hoqueAccessibleDataRepresentation2023,
title = {Accessible {{Data Representation}} with {{Natural Sound}}},
booktitle = {Proceedings of the 2023 {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Hoque, Md Naimul and Ehtesham-Ul-Haque, Md and Elmqvist, Niklas and Billah, Syed Masum},
date = {2023-04-19},
series = {{{CHI}} '23},
pages = {1--19},
publisher = {{Association for Computing Machinery}},
location = {{New York, NY, USA}},
doi = {10.1145/3544548.3581087},
url = {https://dl.acm.org/doi/10.1145/3544548.3581087},
urldate = {2023-09-10},
abstract = {Sonification translates data into non-speech audio. Such auditory representations can make data visualization accessible to people who are blind or have low vision (BLV). This paper presents a sonification method for translating common data visualization into a blend of natural sounds. We hypothesize that people’s familiarity with sounds drawn from nature, such as birds singing in a forest, and their ability to listen to these sounds in parallel, will enable BLV users to perceive multiple data points being sonified at the same time. Informed by an extensive literature review and a preliminary study with 5 BLV participants, we designed an accessible data representation tool, Susurrus, that combines our sonification method with other accessibility features, such as keyboard interaction and text-to-speech feedback. Finally, we conducted a user study with 12 BLV participants and report the potential and application of natural sounds for sonification compared to existing sonification tools.},
isbn = {978-1-4503-9421-5},
keywords = {Accessibility,Data visualization,Natural sound,Sonification},
file = {C:\Users\jseo1005\Zotero\storage\Q5SR9E3T\Hoque et al. - 2023 - Accessible Data Representation with Natural Sound.pdf}
}
@article{hunterMatplotlib2DGraphics2007,
title = {Matplotlib: {{A 2D Graphics Environment}}},
shorttitle = {Matplotlib},
author = {Hunter, John D.},
date = {2007-05-01},
journaltitle = {Computing in Science \& Engineering},
volume = {9},
number = {03},
pages = {90--95},
publisher = {{IEEE Computer Society}},
issn = {1521-9615},
doi = {10.1109/MCSE.2007.55},
url = {https://www.computer.org/csdl/magazine/cs/2007/03/c3090/13rRUwbJD0A},
urldate = {2023-01-24},
abstract = {Matplotlib is a 2D graphics package for Python for application development, interactive scripting, and publication-quality image generation across user interfaces and operating systems.},
langid = {english}
}
@article{huntInteractiveSonification2011,
title = {Interactive {{Sonification}}},
author = {Hunt, Andy and Hermann, Thomas},
date = {2011},
journaltitle = {The Sonification Handbook},
url = {https://pub.uni-bielefeld.de/record/2935181},
urldate = {2023-09-09},
abstract = {This chapter places a special focus on those situations where there is a tight control loop (a real-time interactive collaboration) between the human user and the system producing the sonification. It explains the background (why humans appear to use interactive sonification as a natural tool for exploring the world) as well as describing the different methods and application domains.},
isbn = {9783832528195},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\ERJKZBBJ\2935181.html}
}
@inproceedings{joynerVisualizationAccessibilityWild2022,
title = {Visualization {{Accessibility}} in the {{Wild}}: {{Challenges Faced}} by {{Visualization Designers}}},
shorttitle = {Visualization {{Accessibility}} in the {{Wild}}},
booktitle = {{{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Joyner, Shakila Cherise S and Riegelhuth, Amalia and Garrity, Kathleen and Kim, Yea-Seul and Kim, Nam Wook},
date = {2022-04-27},
pages = {1--19},
publisher = {{ACM}},
location = {{New Orleans LA USA}},
doi = {10.1145/3491102.3517630},
url = {https://dl.acm.org/doi/10.1145/3491102.3517630},
urldate = {2022-12-29},
abstract = {Data visualizations are now widely used across many disciplines. However, many of them are not easily accessible for visually impaired people. In this work, we use three-staged mixed methods to understand the current practice of accessible visualization design for visually impaired people. We analyzed 95 visualizations from various venues to inspect how they are made inaccessible. To understand the rationale and context behind the design choices, we also conducted surveys with 144 practitioners in the U.S. and follow-up interviews with ten selected survey participants. Our findings include the difficulties of handling modern complex and interactive visualizations and the lack of accessibility support from visualization tools in addition to personal and organizational factors making it challenging to perform accessible design practices.},
eventtitle = {{{CHI}} '22: {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
isbn = {978-1-4503-9157-3},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\B63HSLUG\Joyner et al. - 2022 - Visualization Accessibility in the Wild Challenge.pdf}
}
@software{julianna-langstonChart2Music2022,
title = {{{Chart2Music}}},
author = {family=langston, prefix=julianna-, useprefix=true},
date = {2022-08-15T04:28:31Z},
origdate = {2022-06-12T02:59:46Z},
url = {https://github.com/julianna-langston/chart2music},
urldate = {2022-08-20},
abstract = {Turns charts into music so the blind can hear data}
}
@inproceedings{kadayatImpactSentenceLength2020,
title = {Impact of {{Sentence Length}} on the {{Readability}} of {{Web}} for {{Screen Reader Users}}},
booktitle = {Universal {{Access}} in {{Human-Computer Interaction}}. {{Design Approaches}} and {{Supporting Technologies}}},
author = {Kadayat, Bam Bahadur and Eika, Evelyn},
editor = {Antona, Margherita and Stephanidis, Constantine},
date = {2020},
series = {Lecture {{Notes}} in {{Computer Science}}},
pages = {261--271},
publisher = {{Springer International Publishing}},
location = {{Cham}},
doi = {10.1007/978-3-030-49282-3_18},
abstract = {Readability of text is generally believed to be connected to sentence length. Most studies on readability are based on visual reading. Less is known about text readability for users relying on screen readers, such as users who are blind. This study therefore set out to investigate the effect of sentence length on the readability of web texts accessed using screen readers. A controlled within-subjects experiment was performed with twenty-one participants. Participants used a screen reader to read five texts with different sentence lengths. The participants’ comprehension and perceived workload were measured. The findings reveal that there is a significant effect of sentence length and most participants exhibit the highest comprehension and lowest workload with sentences comprising 16–20 words. Implications of these results are that web content providers should strive for sentence length of 16–20 words to maximize readability.},
isbn = {978-3-030-49282-3},
langid = {english},
keywords = {Accessibility,Blind,Readability,Screen reader,Sentence length,Universal design,Workload},
file = {C:\Users\jseo1005\Zotero\storage\RG3JKF76\Kadayat and Eika - 2020 - Impact of Sentence Length on the Readability of We.pdf}
}
@inproceedings{kierasGeneralizedTransitionNetwork1983,
title = {A Generalized Transition Network Representation for Interactive Systems},
booktitle = {Proceedings of the {{SIGCHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Kieras, David and Polson, Peter G.},
date = {1983-12-12},
series = {{{CHI}} '83},
pages = {103--106},
publisher = {{Association for Computing Machinery}},
location = {{New York, NY, USA}},
doi = {10.1145/800045.801590},
url = {https://dl.acm.org/doi/10.1145/800045.801590},
urldate = {2023-09-07},
abstract = {A general method for describing the behavior of an interactive system is presented which is based on transition networks generalized enough to describe even very complex systems easily, as shown by an example description of a word processor. The key feature is the ability to easily describe hierarchies of modes or states of the system. The representation system is especially valuable as a design tool when used in a simulation of a proposed user interface. In order to characterize the interaction between a user and a system, an explicit and formal representation of the behavior of the system itself is needed. To be of value in the design of user interfaces, the representation should be independent of the actual implementation of the system, but also reflect the structural properties of the system's behavior, such as its hierarchical form, the possible modes, and the consistent patterns of interaction. At the same time, the representation must be easy to define and understand. This paper presents a representation notation with these properties.},
isbn = {978-0-89791-121-4},
file = {C:\Users\jseo1005\Zotero\storage\TGI29GKW\Kieras and Polson - 1983 - A generalized transition network representation for interactive systems.pdf}
}
@article{kimAccessibleVisualizationDesign2021,
title = {Accessible {{Visualization}}: {{Design Space}}, {{Opportunities}}, and {{Challenges}}},
shorttitle = {Accessible {{Visualization}}},
author = {Kim, N. W. and Joyner, S. C. and Riegelhuth, A. and Kim, Y.},
date = {2021-06},
journaltitle = {Computer Graphics Forum},
shortjournal = {Computer Graphics Forum},
volume = {40},
number = {3},
pages = {173--188},
issn = {0167-7055, 1467-8659},
doi = {10.1111/cgf.14298},
url = {https://onlinelibrary.wiley.com/doi/10.1111/cgf.14298},
urldate = {2022-02-09},
abstract = {Visualizations are now widely used across disciplines to understand and communicate data. The benefit of visualizations lies in leveraging our natural visual perception. However, the sole dependency on vision can produce unintended discrimination against people with visual impairments. While the visualization field has seen enormous growth in recent years, supporting people with disabilities is much less explored. In this work, we examine approaches to support this marginalized user group, focusing on visual disabilities. We collected and analyzed papers published for the last 20 years on visualization accessibility. We mapped a design space for accessible visualization that includes seven dimensions: user group, literacy task, chart type, interaction, information granularity, sensory modality, assistive technology. We described the current knowledge gap in light of the latest advances in visualization and presented a preliminary accessibility model by synthesizing findings from existing research. Finally, we reflected on the dimensions and discussed opportunities and challenges for future research.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\E42LXLJL\Kim et al. - 2021 - Accessible Visualization Design Space, Opportunit.pdf}
}
@inproceedings{kimAnsweringQuestionsCharts2020,
title = {Answering {{Questions}} about {{Charts}} and {{Generating Visual Explanations}}},
booktitle = {Proceedings of the 2020 {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Kim, Dae Hyun and Hoque, Enamul and Agrawala, Maneesh},
date = {2020-04-21},
pages = {1--13},
publisher = {{ACM}},
location = {{Honolulu HI USA}},
doi = {10.1145/3313831.3376467},
url = {https://dl.acm.org/doi/10.1145/3313831.3376467},
urldate = {2022-08-21},
eventtitle = {{{CHI}} '20: {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
isbn = {978-1-4503-6708-0},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\PB8PFLZX\Kim et al. - 2020 - Answering Questions about Charts and Generating Vi.pdf}
}
@inproceedings{kimExploringChartQuestion2023,
title = {Exploring {{Chart Question Answering}} for {{Blind}} and {{Low Vision Users}}},
booktitle = {Proceedings of the 2023 {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Kim, Jiho and Srinivasan, Arjun and Kim, Nam Wook and Kim, Yea-Seul},
date = {2023-04-19},
series = {{{CHI}} '23},
pages = {1--15},
publisher = {{Association for Computing Machinery}},
location = {{New York, NY, USA}},
doi = {10.1145/3544548.3581532},
url = {https://dl.acm.org/doi/10.1145/3544548.3581532},
urldate = {2023-05-05},
abstract = {Data visualizations can be complex or involve numerous data points, making them impractical to navigate using screen readers alone. Question answering (QA) systems have the potential to support visualization interpretation and exploration without overwhelming blind and low vision (BLV) users. To investigate if and how QA systems can help BLV users in working with visualizations, we conducted a Wizard of Oz study with 24 BLV people where participants freely posed queries about four visualizations. We collected 979 queries and mapped them to popular analytic task taxonomies. We found that retrieving value and finding extremum were the most common tasks, participants often made complex queries and used visual references, and the data topic notably influenced the queries. We compile a list of design considerations for accessible chart QA systems and make our question corpus publicly available to guide future research and development.},
isbn = {978-1-4503-9421-5},
keywords = {Accessibility,Design Considerations,Human-Subjects Qualitative Studies,Question Answering,Visualization},
file = {C:\Users\jseo1005\Zotero\storage\XWHZ35QQ\Kim et al. - 2023 - Exploring Chart Question Answering for Blind and L.pdf}
}
@book{kressMultimodalitySocialSemiotic2010,
title = {Multimodality: {{A Social Semiotic Approach}} to {{Contemporary Communication}}},
shorttitle = {Multimodality},
author = {Kress, Gunther R.},
date = {2010},
eprint = {ihTm_cI58JQC},
eprinttype = {googlebooks},
publisher = {{Taylor \& Francis}},
abstract = {The 21st century is awash with ever more mixed and remixed images, writing, layout, sound, gesture, speech, and 3D objects. Multimodality looks beyond language and examines these multiple modes of communication and meaning making. Multimodality: A Social Semiotic Approach to Contemporary Communication represents a long-awaited and much anticipated addition to the study of multimodality from the scholar who pioneered and continues to play a decisive role in shaping the field. Written in an accessible manner and illustrated with a wealth of photos and illustrations to clearly demonstrate the points made, Multimodality: A Social Semiotic Approach to Contemporary Communication deliberately sets out to locate communication in the everyday, covering topics and issues not usually discussed in books of this kind, from traffic signs to mobile phones. In this book, Gunther Kress presents a contemporary, distinctive and widely applicable approach to communication. He provides the framework necessary for understanding the attempt to bring all modes of meaning-making together under one unified theoretical roof. This exploration of an increasingly vital area of language and communication studies will be of interest to advanced undergraduate and postgraduate students in the fields of English language and applied linguistics, media and communication studies and education.},
isbn = {978-0-415-32060-3},
langid = {english},
pagetotal = {234},
keywords = {Language Arts \& Disciplines / Communication Studies}
}
@article{krossDemocratizationDataScience2020,
title = {The {{Democratization}} of {{Data Science Education}}},
author = {Kross, Sean and Peng, Roger D. and Caffo, Brian S. and Gooding, Ira and Leek, Jeffrey T.},
date = {2020-01-02},
journaltitle = {The American Statistician},
volume = {74},
number = {1},
pages = {1--7},
publisher = {{Taylor \& Francis}},
issn = {0003-1305},
doi = {10.1080/00031305.2019.1668849},
url = {https://doi.org/10.1080/00031305.2019.1668849},
urldate = {2023-01-05},
abstract = {Over the last three decades, data have become ubiquitous and cheap. This transition has accelerated over the last five years and training in statistics, machine learning, and data analysis has struggled to keep up. In April 2014, we launched a program of nine courses, the Johns Hopkins Data Science Specialization, which has now had more than 4 million enrollments over the past five years. Here, the program is described and compared to standard data science curricula as they were organized in 2014 and 2015. We show that novel pedagogical and administrative decisions introduced in our program are now standard in online data science programs. The impact of the Data Science Specialization on data science education in the U.S. is also discussed. Finally, we conclude with some thoughts about the future of data science education in a data democratized world.},
keywords = {Applications and case studies,Education,Statistical computing}
}
@article{leeDataUseMiddle2018,
title = {Data {{Use}} by {{Middle}} and {{Secondary Students}} in the {{Digital Age}}: {{A Status Report}} and {{Future Prospects}}},
shorttitle = {Data {{Use}} by {{Middle}} and {{Secondary Students}} in the {{Digital Age}}},
author = {Lee, Victor and Wilkerson, Michelle},
date = {2018-01-01},
journaltitle = {Instructional Technology and Learning Sciences Faculty Publications},
pages = {1--43},
url = {https://digitalcommons.usu.edu/itls_facpub/634},
file = {C:\Users\jseo1005\Zotero\storage\PR22UQB9\634.html}
}
@article{leeHowPeopleMake2016,
title = {How Do {{People Make Sense}} of {{Unfamiliar Visualizations}}?: {{A Grounded Model}} of {{Novice}}'s {{Information Visualization Sensemaking}}},
shorttitle = {How Do {{People Make Sense}} of {{Unfamiliar Visualizations}}?},
author = {Lee, Sukwon and Kim, Sung-Hee and Hung, Ya-Hsin and Lam, Heidi and Kang, Youn-Ah and Yi, Ji Soo},
date = {2016-01},
journaltitle = {IEEE Transactions on Visualization and Computer Graphics},
volume = {22},
number = {1},
pages = {499--508},
issn = {1941-0506},
doi = {10.1109/TVCG.2015.2467195},
abstract = {In this paper, we would like to investigate how people make sense of unfamiliar information visualizations. In order to achieve the research goal, we conducted a qualitative study by observing 13 participants when they endeavored to make sense of three unfamiliar visualizations (i.e., a parallel-coordinates plot, a chord diagram, and a treemap) that they encountered for the first time. We collected data including audio/video record of think-aloud sessions and semi-structured interview; and analyzed the data using the grounded theory method. The primary result of this study is a grounded model of NOvice's information VIsualization Sensemaking (NOVIS model), which consists of the five major cognitive activities: 1 encountering visualization, 2 constructing a frame, 3 exploring visualization, 4 questioning the frame, and 5 floundering on visualization. We introduce the NOVIS model by explaining the five activities with representative quotes from our participants. We also explore the dynamics in the model. Lastly, we compare with other existing models and share further research directions that arose from our observations.},
eventtitle = {{{IEEE Transactions}} on {{Visualization}} and {{Computer Graphics}}},
keywords = {Data visualization,Encoding,grounded theory,Hidden Markov models,Image color analysis,information visualization,Interviews,novice users,qualitative study,Sensemaking model,Vehicles,Visualization},
file = {C:\Users\jseo1005\Zotero\storage\5U3YM9W5\Lee et al. - 2016 - How do People Make Sense of Unfamiliar Visualizati.pdf}
}
@article{leeReachingBroaderAudiences2020,
title = {Reaching {{Broader Audiences With Data Visualization}}},
author = {Lee, Bongshin and Choe, Eun Kyoung and Isenberg, Petra and Marriott, Kim and Stasko, John},
date = {2020-03-01},
journaltitle = {IEEE Computer Graphics and Applications},
shortjournal = {IEEE Comput. Grap. Appl.},
volume = {40},
number = {2},
pages = {82--90},
issn = {0272-1716, 1558-1756},
doi = {10.1109/MCG.2020.2968244},
url = {https://ieeexplore.ieee.org/document/9023497/},
urldate = {2023-01-12},
abstract = {The visualization research community can and should reach broader audiences beyond data-savvy groups of people, because these audiences could also greatly benefit from visual access to data. In this paper, we discuss four research topics—personal data visualization, data visualization on mobile devices, inclusive data visualization, and multimodal interaction for data visualization—that, individually and collaboratively, would help us reach broader audiences with data visualization, making data more accessible.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\M7C3NLRW\Lee et al. - 2020 - Reaching Broader Audiences With Data Visualization.pdf}
}
@inproceedings{liuCrossA11yIdentifyingVideo2022,
title = {{{CrossA11y}}: {{Identifying Video Accessibility Issues}} via {{Cross-modal Grounding}}},
shorttitle = {{{CrossA11y}}},
booktitle = {The 35th {{Annual ACM Symposium}} on {{User Interface Software}} and {{Technology}}},
author = {Liu, Xingyu "Bruce" and Wang, Ruolin and Li, Dingzeyu and Chen, Xiang 'Anthony' and Pavel, Amy},
date = {2022-10-29},
eprint = {2208.11144},
eprinttype = {arxiv},
eprintclass = {cs},
pages = {1--14},
doi = {10.1145/3526113.3545703},
url = {http://arxiv.org/abs/2208.11144},
urldate = {2023-01-10},
abstract = {Authors make their videos visually accessible by adding audio descriptions (AD), and auditorily accessible by adding closed captions (CC). However, creating AD and CC is challenging and tedious, especially for non-professional describers and captioners, due to the difficulty of identifying accessibility problems in videos. A video author will have to watch the video through and manually check for inaccessible information frame-by-frame, for both visual and auditory modalities. In this paper, we present CrossA11y, a system that helps authors efficiently detect and address visual and auditory accessibility issues in videos. Using cross-modal grounding analysis, CrossA11y automatically measures accessibility of visual and audio segments in a video by checking for modality asymmetries. CrossA11y then displays these segments and surfaces visual and audio accessibility issues in a unified interface, making it intuitive to locate, review, script AD/CC in-place, and preview the described and captioned video immediately. We demonstrate the effectiveness of CrossA11y through a lab study with 11 participants, comparing to existing baseline.},
keywords = {Computer Science - Human-Computer Interaction},
file = {C\:\\Users\\jseo1005\\Zotero\\storage\\XF5LI8D7\\Liu et al. - 2022 - CrossA11y Identifying Video Accessibility Issues .pdf;C\:\\Users\\jseo1005\\Zotero\\storage\\9V9UW4AL\\2208.html}
}
@article{lundgardAccessibleVisualizationNatural2022,
title = {Accessible {{Visualization}} via {{Natural Language Descriptions}}: {{A Four-Level Model}} of {{Semantic Content}}},
shorttitle = {Accessible {{Visualization}} via {{Natural Language Descriptions}}},
author = {Lundgard, Alan and Satyanarayan, Arvind},
date = {2022-01},
journaltitle = {IEEE Transactions on Visualization and Computer Graphics},
shortjournal = {IEEE Trans. Visual. Comput. Graphics},
volume = {28},
number = {1},
pages = {1073--1083},
issn = {1077-2626, 1941-0506, 2160-9306},
doi = {10.1109/TVCG.2021.3114770},
url = {https://ieeexplore.ieee.org/document/9555469/},
urldate = {2022-07-28},
abstract = {Natural language descriptions sometimes accompany visualizations to better communicate and contextualize their insights, and to improve their accessibility for readers with disabilities. However, it is difficult to evaluate the usefulness of these descriptions, and how effectively they improve access to meaningful information, because we have little understanding of the semantic content they convey, and how different readers receive this content. In response, we introduce a conceptual model for the semantic content conveyed by natural language descriptions of visualizations. Developed through a grounded theory analysis of 2,147 sentences, our model spans four levels of semantic content: enumerating visualization construction properties (e.g., marks and encodings); reporting statistical concepts and relations (e.g., extrema and correlations); identifying perceptual and cognitive phenomena (e.g., complex trends and patterns); and elucidating domain-specific insights (e.g., social and political context). To demonstrate how our model can be applied to evaluate the effectiveness of visualization descriptions, we conduct a mixed-methods evaluation with 30 blind and 90 sighted readers, and find that these reader groups differ significantly on which semantic content they rank as most useful. Together, our model and findings suggest that access to meaningful information is strongly reader-specific, and that research in automatic visualization captioning should orient toward descriptions that more richly communicate overall trends and statistics, sensitive to reader preferences. Our work further opens a space of research on natural language as a data interface coequal with visualization. Index Terms—Visualization, natural language, description, caption, semantic, model, theory, alt text, blind, disability, accessibility.},
langid = {english},
file = {C:\Users\jseo1005\Zotero\storage\QTJXT9NN\Lundgard and Satyanarayan - 2022 - Accessible Visualization via Natural Language Desc.pdf}
}
@inproceedings{mackWhatWeMean2021,
title = {What {{Do We Mean}} by “{{Accessibility Research}}”?: {{A Literature Survey}} of {{Accessibility Papers}} in {{CHI}} and {{ASSETS}} from 1994 to 2019},
shorttitle = {What {{Do We Mean}} by “{{Accessibility Research}}”?},
booktitle = {Proceedings of the 2021 {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},
author = {Mack, Kelly and McDonnell, Emma and Jain, Dhruv and Lu Wang, Lucy and E. Froehlich, Jon and Findlater, Leah},
date = {2021-05-06},
pages = {1--18},
publisher = {{ACM}},
location = {{Yokohama Japan}},
doi = {10.1145/3411764.3445412},
url = {https://dl.acm.org/doi/10.1145/3411764.3445412},
urldate = {2023-05-17},
eventtitle = {{{CHI}} '21: {{CHI Conference}} on {{Human Factors}} in {{Computing Systems}}},