Conference Articles
2023 | |
63. | Deepthi Raghunandan, Aayushi Roy, Shenzhi Shi, Niklas Elmqvist, Leilani Battle (2023): Code Code Evolution: Understanding How People Change Data Science Notebooks Over Time. Proceedings of the ACM Conference on Human Factors in Computing Systems, ACM, New York, NY, USA , 2023. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Raghunandan2023, title = {Code Code Evolution: Understanding How People Change Data Science Notebooks Over Time}, author = {Deepthi Raghunandan and Aayushi Roy and Shenzhi Shi and Niklas Elmqvist and Leilani Battle}, url = {https://users.umiacs.umd.edu/~elm/projects/cce/cce.pdf, PDF}, year = {2023}, date = {2023-04-24}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA }, abstract = {Sensemaking is the iterative process of identifying, extracting, and explaining insights from data, where each iteration is referred to as the “sensemaking loop.” However, little is known about how sensemaking behavior evolves from exploration and explanation during this process. This gap limits our ability to understand the full scope of sensemaking, which in turn inhibits the design of tools that support the process. We contribute the first mixed-method to characterize how sensemaking evolves within computational notebooks. We study 2,574 Jupyter notebooks mined from GitHub by identifying data science notebooks that have undergone significant iterations, presenting a regression model that automatically characterizes sensemaking activity, and using this regression model to calculate and analyze shifts in activity across GitHub versions. Our results show that notebook authors participate in various sensemaking tasks over time, such as annotation, branching analysis, and documentation. We use our insights to recommend extensions to current notebook environments.}, keywords = {} } Sensemaking is the iterative process of identifying, extracting, and explaining insights from data, where each iteration is referred to as the “sensemaking loop.” However, little is known about how sensemaking behavior evolves from exploration and explanation during this process. This gap limits our ability to understand the full scope of sensemaking, which in turn inhibits the design of tools that support the process. We contribute the first mixed-method to characterize how sensemaking evolves within computational notebooks. We study 2,574 Jupyter notebooks mined from GitHub by identifying data science notebooks that have undergone significant iterations, presenting a regression model that automatically characterizes sensemaking activity, and using this regression model to calculate and analyze shifts in activity across GitHub versions. Our results show that notebook authors participate in various sensemaking tasks over time, such as annotation, branching analysis, and documentation. We use our insights to recommend extensions to current notebook environments. |
62. | Md Naimul Hoque, Md Ehtesham-Ul-Haque, Niklas Elmqvist, Syed Masum Billah (2023): Accessible Data Representation with Natural Sound. Proceedings of the ACM Conference on Human Factors in Computing Systems, ACM, New York, NY, USA, 2023. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Hoque2023, title = {Accessible Data Representation with Natural Sound}, author = {Md Naimul Hoque and Md Ehtesham-Ul-Haque and Niklas Elmqvist and Syed Masum Billah}, url = {https://users.umiacs.umd.edu/~elm/projects/susurrus/susurrus.pdf, PDF}, year = {2023}, date = {2023-04-24}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {Sonification translates data into non-speech audio. Such auditory representations can make data visualization accessible to people who are blind or have low vision (BLV). This paper presents a sonification method for translating common data visualization into a blend of natural sounds. We hypothesize that people\'s familiarity with sounds drawn from nature, such as birds singing in a forest, and their ability to listen to these sounds in parallel, will enable BLV users to perceive multiple data points being sonified at the same time. Informed by an extensive literature review and a preliminary study with 5 BLV participants, we designed an accessible data representation tool, Susurrus, that combines our sonification method with other accessibility features, such as keyboard interaction and text-to-speech feedback. Finally, we conducted a user study with 12 BLV participants and report the potential and application of natural sounds for sonification compared to existing sonification tools.}, keywords = {} } Sonification translates data into non-speech audio. Such auditory representations can make data visualization accessible to people who are blind or have low vision (BLV). This paper presents a sonification method for translating common data visualization into a blend of natural sounds. We hypothesize that people's familiarity with sounds drawn from nature, such as birds singing in a forest, and their ability to listen to these sounds in parallel, will enable BLV users to perceive multiple data points being sonified at the same time. Informed by an extensive literature review and a preliminary study with 5 BLV participants, we designed an accessible data representation tool, Susurrus, that combines our sonification method with other accessibility features, such as keyboard interaction and text-to-speech feedback. Finally, we conducted a user study with 12 BLV participants and report the potential and application of natural sounds for sonification compared to existing sonification tools. |
61. | David Saffo, Andrea Batch, Cody Dunne, Niklas Elmqvist (2023): Through Their Eyes and In Their Shoes: Providing Group Awareness During Collaboration Across Virtual Reality and Desktop Platforms. Proceedings of the ACM Conference on Human Factors in Computing Systems, ACM, New York, NY, USA, 2023. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Saffo2023, title = {Through Their Eyes and In Their Shoes: Providing Group Awareness During Collaboration Across Virtual Reality and Desktop Platforms}, author = {David Saffo and Andrea Batch and Cody Dunne and Niklas Elmqvist}, url = {https://users.umiacs.umd.edu/~elm/projects/vrxd/vrxd.pdf, PDF https://osf.io/wgprb/, OSF}, year = {2023}, date = {2023-04-24}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {Many collaborative data analysis situations benefit from collaborators utilizing different platforms. However, maintaining group awareness between team members using diverging devices is difficult, not least because common ground diminishes. A person using head-mounted VR cannot physically see a user on a desktop computer even while co-located, and the desktop user cannot easily relate to the VR user\'s 3D workspace. To address this, we propose the ``eyes-and-shoes\'\' principles for group awareness and abstract them into four levels of techniques. Furthermore, we evaluate these principles with a qualitative user study of 6 participant pairs synchronously collaborating across distributed desktop and VR head-mounted devices. In this study, we vary the group awareness techniques between participants and explore two visualization contexts within participants. The results of this study indicate that the more visual metaphors and views of participants diverge, the greater the level of group awareness is needed. A copy of this paper, the study preregistration, and all supplemental materials required to reproduce the study are available on https://osf.io/wgprb/. }, keywords = {} } Many collaborative data analysis situations benefit from collaborators utilizing different platforms. However, maintaining group awareness between team members using diverging devices is difficult, not least because common ground diminishes. A person using head-mounted VR cannot physically see a user on a desktop computer even while co-located, and the desktop user cannot easily relate to the VR user's 3D workspace. To address this, we propose the ``eyes-and-shoes'' principles for group awareness and abstract them into four levels of techniques. Furthermore, we evaluate these principles with a qualitative user study of 6 participant pairs synchronously collaborating across distributed desktop and VR head-mounted devices. In this study, we vary the group awareness techniques between participants and explore two visualization contexts within participants. The results of this study indicate that the more visual metaphors and views of participants diverge, the greater the level of group awareness is needed. A copy of this paper, the study preregistration, and all supplemental materials required to reproduce the study are available on https://osf.io/wgprb/. |
2022 | |
60. | Sebastian Hubenschmid, Jonathan Wieland, Daniel Immanuel Fink, Andrea Batch, Johannes Zagermann, Niklas Elmqvist, Harald Reiterer (2022): ReLive: Bridging In-Situ and Ex-Situ Visual Analytics for Analyzing Mixed Reality User Studies. Proceedings of the ACM Conference on Human Factors in Computing Systems,, pp. 24:1–24:20, ACM, New York, NY, USA, 2022. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Hubenschmid2022, title = {ReLive: Bridging In-Situ and Ex-Situ Visual Analytics for Analyzing Mixed Reality User Studies}, author = {Sebastian Hubenschmid and Jonathan Wieland and Daniel Immanuel Fink and Andrea Batch and Johannes Zagermann and Niklas Elmqvist and Harald Reiterer}, url = {https://users.umiacs.umd.edu/~elm/projects/relive/relive.pdf, PDF https://youtu.be/BaNZ02QkZ_k, Youtube}, year = {2022}, date = {2022-05-10}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems,}, pages = {24:1--24:20}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {The nascent field of mixed reality is seeing an ever-increasing need for user studies and field evaluation, which are particularly challenging given device heterogeneity, diversity of use, and mobile deployment. Immersive analytics tools have recently emerged to support such analysis in situ, yet the complexity of the data also warrants an ex-situ analysis using more traditional non-immersive visual analytics setups. To bridge the gap between both approaches, we introduce ReLive: a mixed-immersion visual analytics framework for exploring and analyzing mixed reality user studies. ReLive combines an in-situ virtual reality view with a complementary ex-situ desktop view. While the virtual reality view allows users to relive interactive spatial recordings replicating the original study, the synchronized desktop view provides a familiar interface for analyzing aggregated data. We validated our concepts in a two-step evaluation consisting of a design walkthrough and an empirical expert user study.}, keywords = {} } The nascent field of mixed reality is seeing an ever-increasing need for user studies and field evaluation, which are particularly challenging given device heterogeneity, diversity of use, and mobile deployment. Immersive analytics tools have recently emerged to support such analysis in situ, yet the complexity of the data also warrants an ex-situ analysis using more traditional non-immersive visual analytics setups. To bridge the gap between both approaches, we introduce ReLive: a mixed-immersion visual analytics framework for exploring and analyzing mixed reality user studies. ReLive combines an in-situ virtual reality view with a complementary ex-situ desktop view. While the virtual reality view allows users to relive interactive spatial recordings replicating the original study, the synchronized desktop view provides a familiar interface for analyzing aggregated data. We validated our concepts in a two-step evaluation consisting of a design walkthrough and an empirical expert user study. |
59. | Md. Naimul Hoque, Bhavya Ghai, Niklas Elmqvist (2022): DramatVis Personae: Visual Text Analytics for Identifying Social Biases in Creative Writing. Proceedings of the ACM Conference on Designing Interactive Systems, ACM, New York, NY, USA, 2022. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Hoque2022, title = {DramatVis Personae: Visual Text Analytics for Identifying Social Biases in Creative Writing}, author = {Md. Naimul Hoque and Bhavya Ghai and Niklas Elmqvist}, url = {https://users.umiacs.umd.edu/~elm/projects/dvp/dvp.pdf, PDF}, year = {2022}, date = {2022-05-10}, booktitle = {Proceedings of the ACM Conference on Designing Interactive Systems}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {Implicit biases and stereotypes are often pervasive in different forms of creative writing such as novels, screenplays, and children\'s books. To understand the kind of biases writers are concerned about and how they mitigate those in their writing, we conducted formative interviews with nine writers. The interviews suggested that despite a writer\'s best interest, tracking and managing implicit biases such as a lack of agency, supporting or submissive roles, or harmful language for characters representing marginalized groups is challenging as the story becomes longer and complicated. Based on the interviews, we developed DramatVis Personae (DVP), a visual analytics tool that allows writers to assign social identities to characters, and evaluate how characters and different intersectional social identities are represented in the story. To evaluate DVP, we first conducted think-aloud sessions with three writers and found that DVP is easy-to-use, naturally integrates into the writing process, and could potentially help writers in several critical bias identification tasks. We then conducted a follow-up user study with 11 writers and found that participants could answer questions related to bias detection more efficiently using DVP in comparison to a simple text editor. }, keywords = {} } Implicit biases and stereotypes are often pervasive in different forms of creative writing such as novels, screenplays, and children's books. To understand the kind of biases writers are concerned about and how they mitigate those in their writing, we conducted formative interviews with nine writers. The interviews suggested that despite a writer's best interest, tracking and managing implicit biases such as a lack of agency, supporting or submissive roles, or harmful language for characters representing marginalized groups is challenging as the story becomes longer and complicated. Based on the interviews, we developed DramatVis Personae (DVP), a visual analytics tool that allows writers to assign social identities to characters, and evaluate how characters and different intersectional social identities are represented in the story. To evaluate DVP, we first conducted think-aloud sessions with three writers and found that DVP is easy-to-use, naturally integrates into the writing process, and could potentially help writers in several critical bias identification tasks. We then conducted a follow-up user study with 11 writers and found that participants could answer questions related to bias detection more efficiently using DVP in comparison to a simple text editor. |
2021 | |
58. | Deepthi Raghunandan, Zhe Cui, Kartik Krishnan, Segen Tirfe, Shenzhi Shi, Tejaswi Darshan Shrestha, Leilani Battle, Niklas Elmqvist (2021): Lodestar: Supporting Independent Learning and Rapid Experimentation Through Data-Driven Analysis Recommendations. Proceedings of the Symposium on Visualization in Data Science, 2021. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Raghunandan2021, title = {Lodestar: Supporting Independent Learning and Rapid Experimentation Through Data-Driven Analysis Recommendations}, author = {Deepthi Raghunandan and Zhe Cui and Kartik Krishnan and Segen Tirfe and Shenzhi Shi and Tejaswi Darshan Shrestha and Leilani Battle and Niklas Elmqvist}, url = {https://users.umiacs.umd.edu/~elm/projects/lodestar/lodestar.pdf, PDF}, year = {2021}, date = {2021-10-01}, booktitle = {Proceedings of the Symposium on Visualization in Data Science}, abstract = {Keeping abreast of current trends, technologies, and best practices in visualization and data analysis is becoming increasingly difficult, especially for fledgling data scientists. In this paper, we propose Lodestar, an interactive computational notebook that allows users to quickly explore and construct new data science workflows by selecting from a list of automated analysis recommendations. We derive our recommendations from directed graphs of known analysis states, with two input sources: one manually curated from online data science tutorials, and another extracted through semi-automatic analysis of a corpus of over 6,000 Jupyter notebooks. We evaluate Lodestar in a formative study guiding our next set of improvements to the tool. Our results suggest that users find Lodestar useful for rapidly creating data science workflows.}, keywords = {} } Keeping abreast of current trends, technologies, and best practices in visualization and data analysis is becoming increasingly difficult, especially for fledgling data scientists. In this paper, we propose Lodestar, an interactive computational notebook that allows users to quickly explore and construct new data science workflows by selecting from a list of automated analysis recommendations. We derive our recommendations from directed graphs of known analysis states, with two input sources: one manually curated from online data science tutorials, and another extracted through semi-automatic analysis of a corpus of over 6,000 Jupyter notebooks. We evaluate Lodestar in a formative study guiding our next set of improvements to the tool. Our results suggest that users find Lodestar useful for rapidly creating data science workflows. |
2020 | |
57. | Andrea Batch, Biswaksen Patnaik, Moses Akazue, Niklas Elmqvist (2020): Scents and Sensibility: Evaluating Information Olfactation. Proceedings of the ACM Conference on Human Factors in Computing Systems, ACM, New York, NY, USA, 2020. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Batch2019, title = {Scents and Sensibility: Evaluating Information Olfactation}, author = {Andrea Batch and Biswaksen Patnaik and Moses Akazue and Niklas Elmqvist}, url = {https://users.umiacs.umd.edu/~elm/projects/info-olfac/scents-sense.pdf, PDF}, year = {2020}, date = {2020-10-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {Olfaction---the sense of smell---is one of the least explored of the human senses for conveying abstract information. In this paper, we conduct a comprehensive perceptual experiment on information olfactation: the use of olfactory and crossmodal sensory marks and channels to convey data. More specifically, following the example from graphical perception studies, we design an experiment that studies the perceptual accuracy of four cross-modal sensory channels---scent type, scent intensity, airflow, and temperature---for conveying three different types of data---nominal, ordinal, and quantitative. We also present details of a 24-scent multi-sensory display and its software framework that we designed in order to run this experiment. Our results yield a ranking of olfactory and cross-modal sensory channels that follows similar principles as classic rankings for visual channels.}, keywords = {} } Olfaction---the sense of smell---is one of the least explored of the human senses for conveying abstract information. In this paper, we conduct a comprehensive perceptual experiment on information olfactation: the use of olfactory and crossmodal sensory marks and channels to convey data. More specifically, following the example from graphical perception studies, we design an experiment that studies the perceptual accuracy of four cross-modal sensory channels---scent type, scent intensity, airflow, and temperature---for conveying three different types of data---nominal, ordinal, and quantitative. We also present details of a 24-scent multi-sensory display and its software framework that we designed in order to run this experiment. Our results yield a ranking of olfactory and cross-modal sensory channels that follows similar principles as classic rankings for visual channels. |
2019 | |
56. | Zhe Cui, Jayaram Kancherla, Hector Corrada Bravo, Niklas Elmqvist (2019): Sherpa: Leveraging User Attention for Computational Steering in Visual Analytics. Proceedings of the IEEE Symposium on Visualization in Data Science, IEEE, 2019. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Cui2019, title = {Sherpa: Leveraging User Attention for Computational Steering in Visual Analytics}, author = {Zhe Cui and Jayaram Kancherla and Hector Corrada Bravo and Niklas Elmqvist}, url = {http://users.umiacs.umd.edu/~elm/projects/sherpa/sherpa.pdf, PDF}, year = {2019}, date = {2019-10-20}, booktitle = {Proceedings of the IEEE Symposium on Visualization in Data Science}, publisher = {IEEE}, abstract = {We present Sherpa, a computational steering mechanism for progressive visual analytics that automatically prioritizes computations based on the analyst’s navigational behavior in the data. The intuition is that navigation in data space is an indication of the analyst\'s interest in the data. Sherpa implementation provides computational modules, such as statistics of biological inferences about gene regulation. The position of the navigation window on the genomic sequence over time is used to prioritize computations. In a study with genomic and visualization analysts, we found that Sherpa provided comparable accuracy to the offline condition, where computations were completed prior to analysis, with shorter completion times. We also provide a second example on stock market analysis.}, keywords = {} } We present Sherpa, a computational steering mechanism for progressive visual analytics that automatically prioritizes computations based on the analyst’s navigational behavior in the data. The intuition is that navigation in data space is an indication of the analyst's interest in the data. Sherpa implementation provides computational modules, such as statistics of biological inferences about gene regulation. The position of the navigation window on the genomic sequence over time is used to prioritize computations. In a study with genomic and visualization analysts, we found that Sherpa provided comparable accuracy to the offline condition, where computations were completed prior to analysis, with shorter completion times. We also provide a second example on stock market analysis. |
55. | Subramanian Chidambaram, Yunbo Zhang, Venkatraghavan Sundararajan, Ana M. Villanueva, Niklas Elmqvist, Karthik Ramani (2019): Shape Structuralizer: Design, Fabrication and Exploring Structually-Sound Scaffolded Constructions using 3D Mesh Models. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 663:1–663:12, ACM, New York, NY, USA, 2019. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Chidambaram2019, title = {Shape Structuralizer: Design, Fabrication and Exploring Structually-Sound Scaffolded Constructions using 3D Mesh Models}, author = {Subramanian Chidambaram and Yunbo Zhang and Venkatraghavan Sundararajan and Ana M. Villanueva and Niklas Elmqvist and Karthik Ramani}, url = {https://engineering.purdue.edu/cdesign/wp/wp-content/uploads/2019/02/Shape-Structuralizer-Design-Fabrication-and-User-driven-Iterative-Refinement-of-3D-Mesh-Models.pdf, PDF}, year = {2019}, date = {2019-05-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {663:1--663:12}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {Current Computer-Aided Design (CAD) tools lack proper support for guiding novice users towards designs ready for fabrication. We propose Shape Structuralizer (SS), an interactive design support system that repurposes surface models into structural constructions using rods and custom 3D-printed joints. Shape Structuralizer embeds a recommendation system that computationally supports the user during design ideation by providing design suggestions on local refinements of the design. This strategy enables novice users to choose designs that both satisfy stress constraints as well as their personal design intent. The interactive guidance enables users to repurpose existing surface mesh models, analyze them in-situ for stress and displacement constraints, add movable joints to increase functionality, and attach a customized appearance. This also empowers novices to fabricate even complex constructs while ensuring structural soundness. We validate the Shape Structuralizer tool with a qualitative user study where we observed that even novice users were able to generate a large number of structurally safe designs for fabrication.}, keywords = {} } Current Computer-Aided Design (CAD) tools lack proper support for guiding novice users towards designs ready for fabrication. We propose Shape Structuralizer (SS), an interactive design support system that repurposes surface models into structural constructions using rods and custom 3D-printed joints. Shape Structuralizer embeds a recommendation system that computationally supports the user during design ideation by providing design suggestions on local refinements of the design. This strategy enables novice users to choose designs that both satisfy stress constraints as well as their personal design intent. The interactive guidance enables users to repurpose existing surface mesh models, analyze them in-situ for stress and displacement constraints, add movable joints to increase functionality, and attach a customized appearance. This also empowers novices to fabricate even complex constructs while ensuring structural soundness. We validate the Shape Structuralizer tool with a qualitative user study where we observed that even novice users were able to generate a large number of structurally safe designs for fabrication. |
54. | Pranathi Mylavarapu, Adil Yalcin, Xan Gregg, Niklas Elmqvist (2019): Ranked-List Visualization: A Graphical Perception Study. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 192:1–192:12, ACM, New York, NY, USA, 2019. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Mylvarapu2019, title = {Ranked-List Visualization: A Graphical Perception Study}, author = {Pranathi Mylavarapu and Adil Yalcin and Xan Gregg and Niklas Elmqvist}, url = {http://users.umiacs.umd.edu/~elm/projects/ranked-list/ranked-list.pdf, PDF}, year = {2019}, date = {2019-05-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {192:1--192:12}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {Visualization of ranked lists is a common occurrence, but many in-the-wild solutions fly in the face of vision science and visualization wisdom. For example, treemaps and bubble charts are commonly used for this purpose, despite the fact that the data is not hierarchical and that length is easier to perceive than area. Furthermore, several new visual representations have recently been suggested in this area, including wrapped bars, packed bars, piled bars, and Zvinca plots. To quantify the differences and trade-offs for these ranked-list visualizations, we here report on a crowdsourced graphical perception study involving six such visual representations, including the ubiquitous scrolled barchart, in three tasks: ranking (assessing a single item), comparison (two items), and average (assessing global distribution). Results show that wrapped bars may be the best choice for visualizing ranked lists, and that treemaps are surprisingly accurate despite the use of area rather than length to represent value.}, keywords = {} } Visualization of ranked lists is a common occurrence, but many in-the-wild solutions fly in the face of vision science and visualization wisdom. For example, treemaps and bubble charts are commonly used for this purpose, despite the fact that the data is not hierarchical and that length is easier to perceive than area. Furthermore, several new visual representations have recently been suggested in this area, including wrapped bars, packed bars, piled bars, and Zvinca plots. To quantify the differences and trade-offs for these ranked-list visualizations, we here report on a crowdsourced graphical perception study involving six such visual representations, including the ubiquitous scrolled barchart, in three tasks: ranking (assessing a single item), comparison (two items), and average (assessing global distribution). Results show that wrapped bars may be the best choice for visualizing ranked lists, and that treemaps are surprisingly accurate despite the use of area rather than length to represent value. |
53. | Tom Horak, Andreas Mathisen, Clemens Nylandsted Klokmose, Raimund Dachselt, Niklas Elmqvist (2019): Vistribute: Distributing Interactive Visualizations in Dynamic Multi-Device Setups. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 616:1–616:13, ACM, New York, NY, USA, 2019. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Horak2019, title = {Vistribute: Distributing Interactive Visualizations in Dynamic Multi-Device Setups}, author = {Tom Horak and Andreas Mathisen and Clemens Nylandsted Klokmose and Raimund Dachselt and Niklas Elmqvist}, url = {http://users.umiacs.umd.edu/~elm/projects/vistribute/vistribute.pdf, PDF}, year = {2019}, date = {2019-05-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {616:1--616:13}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {We present Vistribute, a framework for the automatic distribution of visualizations and UI components across multiple heterogeneous devices. Our framework consists of three parts: (i) a design space considering properties and relationships of interactive visualizations, devices, and user preferences in multi-display environments; (ii) specific heuristics incorporating these dimensions for guiding the distribution for a given interface and device ensemble; and (iii) a web-based implementation instantiating these heuristics to automatically generate a distribution as well as providing interaction mechanisms for user-defined adaptations. In contrast to existing UI distribution systems, we are able to infer all required information by analyzing the visualizations and devices without relying on additional input provided by users or programmers. In a qualitative study, we let experts create their own distributions and rate both other manual distributions and our automatic ones. We found that all distributions provided comparable quality, hence validating our framework.}, keywords = {} } We present Vistribute, a framework for the automatic distribution of visualizations and UI components across multiple heterogeneous devices. Our framework consists of three parts: (i) a design space considering properties and relationships of interactive visualizations, devices, and user preferences in multi-display environments; (ii) specific heuristics incorporating these dimensions for guiding the distribution for a given interface and device ensemble; and (iii) a web-based implementation instantiating these heuristics to automatically generate a distribution as well as providing interaction mechanisms for user-defined adaptations. In contrast to existing UI distribution systems, we are able to infer all required information by analyzing the visualizations and devices without relying on additional input provided by users or programmers. In a qualitative study, we let experts create their own distributions and rate both other manual distributions and our automatic ones. We found that all distributions provided comparable quality, hence validating our framework. |
52. | Zhenpeng Zhao, Rachael Marr, Jason Shaffer, Niklas Elmqvist (2019): Understanding Partitioning and Sequence in Data-Driven Storytelling: The Case for Comic Strip Narration.. Proceedings of the iConference, pp. 327–338, Springer, 2019. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Zhao2019, title = {Understanding Partitioning and Sequence in Data-Driven Storytelling: The Case for Comic Strip Narration.}, author = {Zhenpeng Zhao and Rachael Marr and Jason Shaffer and Niklas Elmqvist}, url = {http://users.umiacs.umd.edu/~elm/projects/datacomics/datacomics.pdf, PDF}, year = {2019}, date = {2019-04-01}, booktitle = {Proceedings of the iConference}, volume = {11420}, pages = {327--338}, publisher = {Springer}, abstract = {The comic strip narrative style is an effective method for data-driven storytelling. However, surely it is not enough to just add some speech bubbles and clipart to your PowerPoint slideshow to turn it into a data comic? In this paper, we investigate aspects of partitioning and sequence as fundamental mechanisms for comic strip narration: chunking complex visuals into manageable pieces, and organizing them into a meaningful order, respectively. We do this by presenting results from a qualitative study designed to elicit differences in participant behavior when solving questions using a complex infographic compared to when the same visuals are organized into a data comic.}, keywords = {} } The comic strip narrative style is an effective method for data-driven storytelling. However, surely it is not enough to just add some speech bubbles and clipart to your PowerPoint slideshow to turn it into a data comic? In this paper, we investigate aspects of partitioning and sequence as fundamental mechanisms for comic strip narration: chunking complex visuals into manageable pieces, and organizing them into a meaningful order, respectively. We do this by presenting results from a qualitative study designed to elicit differences in participant behavior when solving questions using a complex infographic compared to when the same visuals are organized into a data comic. |
2018 | |
51. | Andrea Batch, Hanuma Teja Maddali, Kyungjun Lee, Niklas Elmqvist (2018): Gesture and Action Discovery for Evaluating Virtual Environments with Semi-Supervised Segmentation of Telemetry Records. Proceedings of the IEEE Conference on Artificial Intelligence & Virtual Reality, pp. 1–10, IEEE, 2018. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Batch2018, title = {Gesture and Action Discovery for Evaluating Virtual Environments with Semi-Supervised Segmentation of Telemetry Records}, author = {Andrea Batch and Hanuma Teja Maddali and Kyungjun Lee and Niklas Elmqvist}, url = {http://users.umiacs.umd.edu/~elm/projects/hceye/vr-telemetry.pdf, PDF}, year = {2018}, date = {2018-12-10}, booktitle = {Proceedings of the IEEE Conference on Artificial Intelligence & Virtual Reality}, pages = {1--10}, publisher = {IEEE}, abstract = {In this paper, we propose a novel pipeline for semi-supervised behavioral coding of videos of users testing a device or interface, with an eye toward human-computer interaction evaluation for virtual reality. Our system applies existing statistical techniques for time-series classification, including e-divisive change point detection and \"Symbolic Aggregate approXimation\" (SAX) with agglomerative hierarchical clustering, to 3D pose telemetry data. These techniques create classes of short segments of single-person video data–short actions of potential interest called \"micro-gestures.\" A long short-term memory (LSTM) layer then learns these micro-gestures from pose features generated purely from video via a pretrained OpenPose convolutional neural network (CNN) to predict their occurrence in unlabeled test videos. We present and discuss the results from testing our system on the single user pose videos of the CMU Panoptic Dataset. }, keywords = {} } In this paper, we propose a novel pipeline for semi-supervised behavioral coding of videos of users testing a device or interface, with an eye toward human-computer interaction evaluation for virtual reality. Our system applies existing statistical techniques for time-series classification, including e-divisive change point detection and "Symbolic Aggregate approXimation" (SAX) with agglomerative hierarchical clustering, to 3D pose telemetry data. These techniques create classes of short segments of single-person video data–short actions of potential interest called "micro-gestures." A long short-term memory (LSTM) layer then learns these micro-gestures from pose features generated purely from video via a pretrained OpenPose convolutional neural network (CNN) to predict their occurrence in unlabeled test videos. We present and discuss the results from testing our system on the single user pose videos of the CMU Panoptic Dataset. |
50. | Sigfried Gold, Andrea Batch, Robert McClure, Guoqian Jiang, Hadi Kharrazi, Rishi Saripalle, Vojtech Huser, Chunhua Weng, Nancy Roderer, Ana Szarfman, Niklas Elmqvist, David Gotz (2018): Clinical Concept Value Sets and Interoperability in Health Data Analytics. Proceedings of the Annual AMIA Symposium, 2018. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Gold2018, title = {Clinical Concept Value Sets and Interoperability in Health Data Analytics}, author = {Sigfried Gold and Andrea Batch and Robert McClure and Guoqian Jiang and Hadi Kharrazi and Rishi Saripalle and Vojtech Huser and Chunhua Weng and Nancy Roderer and Ana Szarfman and Niklas Elmqvist and David Gotz}, url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6371254/, article}, year = {2018}, date = {2018-11-03}, booktitle = {Proceedings of the Annual AMIA Symposium}, abstract = {This paper focuses on value sets as an essential component in the health analytics ecosystem. We discuss shared repositories of reusable value sets and offer recommendations for their further development and adoption. In order to motivate these contributions, we explain how value sets fit into specific analytic tasks and the health analytics landscape more broadly; their growing importance and ubiquity with the advent of Common Data Models, Distributed Research Networks, and the availability of higher order, reusable analytic resources like electronic phenotypes and electronic clinical quality measures; the formidable barriers to value set reuse; and our introduction of a concept-agnostic orientation to vocabulary collections. The costs of ad hoc value set management and the benefits of value set reuse are described or implied throughout. Our standards, infrastructure, and design recommendations are not systematic or comprehensive but invite further work to support value set reuse for health analytics}, keywords = {} } This paper focuses on value sets as an essential component in the health analytics ecosystem. We discuss shared repositories of reusable value sets and offer recommendations for their further development and adoption. In order to motivate these contributions, we explain how value sets fit into specific analytic tasks and the health analytics landscape more broadly; their growing importance and ubiquity with the advent of Common Data Models, Distributed Research Networks, and the availability of higher order, reusable analytic resources like electronic phenotypes and electronic clinical quality measures; the formidable barriers to value set reuse; and our introduction of a concept-agnostic orientation to vocabulary collections. The costs of ad hoc value set management and the benefits of value set reuse are described or implied throughout. Our standards, infrastructure, and design recommendations are not systematic or comprehensive but invite further work to support value set reuse for health analytics |
49. | Senthil Chandrasegaran, Devarajan Ramanujan, Niklas Elmqvist (2018): How Do Sketching and Non-Sketching Actions Convey Design Intent?. Proceedings of the ACM Conference on Designing Interactive Systems, pp. 373–385, 2018. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Chandrasegaran2018, title = {How Do Sketching and Non-Sketching Actions Convey Design Intent?}, author = {Senthil Chandrasegaran and Devarajan Ramanujan and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/design-intent/design-intent.pdf, PDF}, year = {2018}, date = {2018-06-01}, booktitle = {Proceedings of the ACM Conference on Designing Interactive Systems}, pages = {373--385}, abstract = {Sketches are much more than marks on paper; they play a key role for designers both in ideation and problem-solving as well as in communication with other designers. Thus, the act of sketching is often enriched with annotations, references, and physical actions, such as gestures or speech—all of which constitute meta-data about the designer’s reasoning. Conventional paper-based design notebooks cannot capture this rich metadata, but digital design notebooks can. To understand how and what data to capture, we conducted an observational study of design practitioners where they explore design solutions for a set of problems. We recorded and coded their sketching and non-sketching actions that reflect their exploration of the design space. We then categorized the captured meta-data and mapped observed physical actions to design intent. These findings inform the creation of future digital design notebooks that can better capture designers’ reasoning during sketching.}, keywords = {} } Sketches are much more than marks on paper; they play a key role for designers both in ideation and problem-solving as well as in communication with other designers. Thus, the act of sketching is often enriched with annotations, references, and physical actions, such as gestures or speech—all of which constitute meta-data about the designer’s reasoning. Conventional paper-based design notebooks cannot capture this rich metadata, but digital design notebooks can. To understand how and what data to capture, we conducted an observational study of design practitioners where they explore design solutions for a set of problems. We recorded and coded their sketching and non-sketching actions that reflect their exploration of the design space. We then categorized the captured meta-data and mapped observed physical actions to design intent. These findings inform the creation of future digital design notebooks that can better capture designers’ reasoning during sketching. |
48. | Jiawei Zhang, Chittayong Surakitbanharn, Niklas Elmqvist, Ross Maciejewski, Zhenyu Quan, David Ebert (2018): TopoText: Context-Preserving Semantic Exploration Across Multiple Spatial Scales. Proceedings of the ACM Conference on Human Factors in Computing Systems, 2018. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Zhang2018, title = {TopoText: Context-Preserving Semantic Exploration Across Multiple Spatial Scales}, author = {Jiawei Zhang and Chittayong Surakitbanharn and Niklas Elmqvist and Ross Maciejewski and Zhenyu Quan and David Ebert}, url = {http://www.umiacs.umd.edu/~elm/projects/topotext/topotext.pdf, PDF}, year = {2018}, date = {2018-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, abstract = {TopoText is a context-preserving technique for visualizing semantic data for multi-scale spatial aggregates to gain insight into spatial phenomena. Conventional exploration requires users to navigate across multiple scales but only presents the information related to the current scale. This limitation potentially adds more steps of interaction and cognitive overload to the users. TopoText renders multi-scale aggregates into a single visual display combining novel text-based encoding and layout methods that draw labels along the boundary or filled within the aggregates. The text itself not only summarizes the semantics at each individual scale, but also indicates the spatial coverage of the aggregates and their underlying hierarchical relationships. We validate TopoText with both a user study as well as several application examples.}, keywords = {} } TopoText is a context-preserving technique for visualizing semantic data for multi-scale spatial aggregates to gain insight into spatial phenomena. Conventional exploration requires users to navigate across multiple scales but only presents the information related to the current scale. This limitation potentially adds more steps of interaction and cognitive overload to the users. TopoText renders multi-scale aggregates into a single visual display combining novel text-based encoding and layout methods that draw labels along the boundary or filled within the aggregates. The text itself not only summarizes the semantics at each individual scale, but also indicates the spatial coverage of the aggregates and their underlying hierarchical relationships. We validate TopoText with both a user study as well as several application examples. |
47. | Tom Horak, Sriram Karthik Badam, Niklas Elmqvist, Raimund Dachselt (2018): When David Meets Goliath: Combining Smartwatches with a Large Vertical Display for Visual Data Exploration. Proceedings of the ACM Conference on Human Factors in Computing Systems, 2018. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Horak2018, title = {When David Meets Goliath: Combining Smartwatches with a Large Vertical Display for Visual Data Exploration}, author = {Tom Horak and Sriram Karthik Badam and Niklas Elmqvist and Raimund Dachselt}, url = {http://www.umiacs.umd.edu/~elm/projects/david-goliath/david-goliath.pdf, PDF}, year = {2018}, date = {2018-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, abstract = {We explore the combination of smartwatches and a large interactive display to support visual data analysis. These two extremes of interactive surfaces are increasingly popular, but feature different characteristics—display and input modalities, personal/public use, performance, and portability. In this paper, we first identify possible roles for both devices and the interplay between them through an example scenario. We then propose a conceptual framework to enable analysts to explore data items, track interaction histories, and alter visualization configurations through mechanisms using both devices in combination. We validate an implementation of our framework through a formative evaluation and a user study. The results show that this device combination, compared to just a large display, allows users to develop complex insights more fluidly by leveraging the roles of the two devices. Finally, we report on the interaction patterns and interplay between the devices for visual exploration as observed during our study.}, keywords = {} } We explore the combination of smartwatches and a large interactive display to support visual data analysis. These two extremes of interactive surfaces are increasingly popular, but feature different characteristics—display and input modalities, personal/public use, performance, and portability. In this paper, we first identify possible roles for both devices and the interplay between them through an example scenario. We then propose a conceptual framework to enable analysts to explore data items, track interaction histories, and alter visualization configurations through mechanisms using both devices in combination. We validate an implementation of our framework through a formative evaluation and a user study. The results show that this device combination, compared to just a large display, allows users to develop complex insights more fluidly by leveraging the roles of the two devices. Finally, we report on the interaction patterns and interplay between the devices for visual exploration as observed during our study. |
2017 | |
46. | M. Adil Yalcin, Niklas Elmqvist, Benjamin B. Bederson (2017): Raising the Bars: Evaluating Treemaps vs. Wrapped Bars for Dense Visualization of Sorted Numeric Data. Proceedings of Graphics Interface, 2017. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Yalcin2017, title = {Raising the Bars: Evaluating Treemaps vs. Wrapped Bars for Dense Visualization of Sorted Numeric Data}, author = {M. Adil Yalcin and Niklas Elmqvist and Benjamin B. Bederson}, url = {http://www.umiacs.umd.edu/~elm/projects/raising-bars/RaisingTheBars-GI2017.pdf, PDF}, year = {2017}, date = {2017-05-15}, booktitle = {Proceedings of Graphics Interface}, abstract = {A standard (single-column) bar chart can effectively visualize a sorted list of numeric records. However, the chart height limits the number of visible records. To show more records, the bars could be made thinner (which could hinder identifying records individually), and scrolling requires interaction to see the overview. Treemaps have been used in practice in non-hierarchical settings for dense visualization of numeric data. Alternatively, we consider wrapped bars, a multi-column bar chart that uses length instead of area to encode numeric values. We compare treemaps and wrapped bars based on their design characteristics, and graphical perception performance for comparison, ranking, and overview tasks using crowdsourced experiments. Our analysis found that wrapped bars perceptually outperform treemaps in all three tasks for dense visualization of non-hierarchical, sorted numeric data.}, keywords = {} } A standard (single-column) bar chart can effectively visualize a sorted list of numeric records. However, the chart height limits the number of visible records. To show more records, the bars could be made thinner (which could hinder identifying records individually), and scrolling requires interaction to see the overview. Treemaps have been used in practice in non-hierarchical settings for dense visualization of numeric data. Alternatively, we consider wrapped bars, a multi-column bar chart that uses length instead of area to encode numeric values. We compare treemaps and wrapped bars based on their design characteristics, and graphical perception performance for comparison, ranking, and overview tasks using crowdsourced experiments. Our analysis found that wrapped bars perceptually outperform treemaps in all three tasks for dense visualization of non-hierarchical, sorted numeric data. |
45. | Senthil Chandrasegaran, Sriram Karthik Badam, Ninger Zhou, Zhenpeng Zhao, Lorraine Kisselburgh, Kylie Peppler Niklas Elmqvist, Karthik Ramani (2017): Merging Sketches for Creative Design Exploration: An Evaluation of Physical and Cognitive Operations. Proceedings of Graphics Interface, 2017. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Chandrasegaran2017, title = {Merging Sketches for Creative Design Exploration: An Evaluation of Physical and Cognitive Operations}, author = {Senthil Chandrasegaran and Sriram Karthik Badam and Ninger Zhou and Zhenpeng Zhao and Lorraine Kisselburgh and Kylie Peppler Niklas Elmqvist and Karthik Ramani}, url = {http://www.umiacs.umd.edu/~elm/projects/merge-study/merge-study.pdf, PDF}, year = {2017}, date = {2017-05-15}, booktitle = {Proceedings of Graphics Interface}, abstract = {Despite its grounding in creativity techniques, merging multiple source sketches to create new ideas has received scant attention in design literature. In this paper, we identify the physical operations that in merging sketch components. We also introduce cognitive operations of reuse, repurpose, refactor, and reinterpret, and explore their relevance to creative design. To examine the relationship of cognitive operations, physical techniques, and creative sketch outcomes, we conducted a qualitative user study where student designers merged existing sketches to generate either an alternative design, or an unrelated new design. We compared two digital selection techniques: freeform selection, and a stroke-cluster-based \"object select\" technique. The resulting merge sketches were subjected to crowdsourced evaluation of these sketches, and manual coding for the use of cognitive operations. Our findings establish a firm connection between the proposed cognitive operations and the context and outcome of creative tasks. Key findings indicate that reinterpret cognitive operations correlate strongly with creativity in merged sketches, while reuse operations correlate negatively with creativity. Furthermore, freeform selection techniques are preferred significantly by designers. We discuss the empirical contributions of understanding the use of cognitive operations during design exploration, and the practical implications for designing interfaces in digital tools that facilitate creativity in merging sketches. }, keywords = {} } Despite its grounding in creativity techniques, merging multiple source sketches to create new ideas has received scant attention in design literature. In this paper, we identify the physical operations that in merging sketch components. We also introduce cognitive operations of reuse, repurpose, refactor, and reinterpret, and explore their relevance to creative design. To examine the relationship of cognitive operations, physical techniques, and creative sketch outcomes, we conducted a qualitative user study where student designers merged existing sketches to generate either an alternative design, or an unrelated new design. We compared two digital selection techniques: freeform selection, and a stroke-cluster-based "object select" technique. The resulting merge sketches were subjected to crowdsourced evaluation of these sketches, and manual coding for the use of cognitive operations. Our findings establish a firm connection between the proposed cognitive operations and the context and outcome of creative tasks. Key findings indicate that reinterpret cognitive operations correlate strongly with creativity in merged sketches, while reuse operations correlate negatively with creativity. Furthermore, freeform selection techniques are preferred significantly by designers. We discuss the empirical contributions of understanding the use of cognitive operations during design exploration, and the practical implications for designing interfaces in digital tools that facilitate creativity in merging sketches. |
44. | Sriram Karthik Badam, Zehua Zheng, Emily Wall, Alex Endert, Niklas Elmqvist (2017): Supporting Team-First Visual Analytics through Group Activity Representations. Proceedings of Graphics Interface, 2017. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Badam2017, title = {Supporting Team-First Visual Analytics through Group Activity Representations}, author = {Sriram Karthik Badam and Zehua Zheng and Emily Wall and Alex Endert and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/group-awareness/group-awareness.pdf, PDF}, year = {2017}, date = {2017-05-15}, booktitle = {Proceedings of Graphics Interface}, abstract = {Collaborative visual analytics (CVA) involves sensemaking activities within teams of analysts based on coordination of work across team members, awareness of team activity, and communication of hypotheses, observations, and insights. We introduce a new type of CVA tools based on the notion of \"team-first\" visual analytics, where supporting the analytical process and needs of the entire team is the primary focus of the graphical user interface before that of the individual analysts. To this end, we present the design space and guidelines for team-first tools in terms of conveying analyst presence, focus, and activity within the interface. We then introduce InsightsDrive, a CVA tool for multidimensional data, that contains team-first features into the interface through group activity visualizations. This includes (1) in-situ representations that show the focus regions of all users integrated in the data visualizations themselves using color-coded selection shadows, as well as (2) ex-situ representations showing the data coverage of each analyst using multidimensional visual representations. We conducted two user studies, one with individual analysts to identify the affordances of different visual representations to inform data coverage, and the other to evaluate the performance of our team-first design with exsitu and in-situ awareness for visual analytic tasks. Our results give an understanding of the performance of our team-first features and unravel their advantages for team coordination.}, keywords = {} } Collaborative visual analytics (CVA) involves sensemaking activities within teams of analysts based on coordination of work across team members, awareness of team activity, and communication of hypotheses, observations, and insights. We introduce a new type of CVA tools based on the notion of "team-first" visual analytics, where supporting the analytical process and needs of the entire team is the primary focus of the graphical user interface before that of the individual analysts. To this end, we present the design space and guidelines for team-first tools in terms of conveying analyst presence, focus, and activity within the interface. We then introduce InsightsDrive, a CVA tool for multidimensional data, that contains team-first features into the interface through group activity visualizations. This includes (1) in-situ representations that show the focus regions of all users integrated in the data visualizations themselves using color-coded selection shadows, as well as (2) ex-situ representations showing the data coverage of each analyst using multidimensional visual representations. We conducted two user studies, one with individual analysts to identify the affordances of different visual representations to inform data coverage, and the other to evaluate the performance of our team-first design with exsitu and in-situ awareness for visual analytic tasks. Our results give an understanding of the performance of our team-first features and unravel their advantages for team coordination. |
43. | Jiawei Zhang, Abish Malik, Benjamin Ahlbrand, Niklas Elmqvist, Ross Maciejewski, David S. Ebert (2017): TopoGroups: Context-Preserving Visual Illustration of Multi-Scale Spatial Aggregates. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 2940–2951, ACM, 2017. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Zhang2017, title = {TopoGroups: Context-Preserving Visual Illustration of Multi-Scale Spatial Aggregates}, author = {Jiawei Zhang and Abish Malik and Benjamin Ahlbrand and Niklas Elmqvist and Ross Maciejewski and David S. Ebert}, url = {http://www.umiacs.umd.edu/~elm/projects/topogroups/topogroups.pdf, PDF}, year = {2017}, date = {2017-05-08}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {2940--2951}, publisher = {ACM}, abstract = {Spatial datasets, such as tweets in a geographic area, often exhibit different distribution patterns at multiple levels of scale, such as live updates about events occurring in very specific locations on the map. Navigating in such multi-scale data-rich spaces is often inefficient, requires users to choose between overview or detail information, and does not support identifying spatial patterns at varying scales. In this paper, we propose TopoGroups, a novel context-preserving technique that aggregates spatial data into hierarchical clusters to improve exploration and navigation at multiple spatial scales. The technique uses a boundary distortion algorithm to minimize the visual clutter caused by overlapping aggregates. Our user study explores multiple visual encoding strategies for TopoGroups including color, transparency, shading, and shapes in order to convey the hierarchical and statistical information of the geographical aggregates at different scales.}, keywords = {} } Spatial datasets, such as tweets in a geographic area, often exhibit different distribution patterns at multiple levels of scale, such as live updates about events occurring in very specific locations on the map. Navigating in such multi-scale data-rich spaces is often inefficient, requires users to choose between overview or detail information, and does not support identifying spatial patterns at varying scales. In this paper, we propose TopoGroups, a novel context-preserving technique that aggregates spatial data into hierarchical clusters to improve exploration and navigation at multiple spatial scales. The technique uses a boundary distortion algorithm to minimize the visual clutter caused by overlapping aggregates. Our user study explores multiple visual encoding strategies for TopoGroups including color, transparency, shading, and shapes in order to convey the hierarchical and statistical information of the geographical aggregates at different scales. |
42. | Cecil Piya, Vinayak, Senthil Chandrasegaran, Niklas Elmqvist, Karthik Ramani (2017): Co-3Deator: A Team-First Collaborative 3D Design Ideation Tool. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 6581–6592, 2017. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Piya2017, title = {Co-3Deator: A Team-First Collaborative 3D Design Ideation Tool}, author = {Cecil Piya and Vinayak and Senthil Chandrasegaran and Niklas Elmqvist and Karthik Ramani}, url = {http://www.umiacs.umd.edu/~elm/projects/co3deator/co3deator.pdf, PDF}, year = {2017}, date = {2017-05-08}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {6581--6592}, abstract = {We present CO-3DEATOR, a sketch-based collaborative 3D modeling system based on the notion of “team-first” ideation tools, where the needs and processes of the entire design team come before that of an individual designer. Co-3Deator includes two specific team-first features: a concept component hierarchy which provides a design representation suitable for multi-level sharing and reusing of design information, and a collaborative design explorer for storing, viewing, and accessing hierarchical design data during collaborative design activities. We conduct two controlled user studies, one with individual designers to elicit the form and functionality of the collaborative design explorer, and the other with design teams to evaluate the utility of the concept component hierarchy and design explorer towards collaborative design ideation. Our results support our rationale for both of the proposed team-first collaboration mechanisms and suggest further ways to streamline collaborative design.}, keywords = {} } We present CO-3DEATOR, a sketch-based collaborative 3D modeling system based on the notion of “team-first” ideation tools, where the needs and processes of the entire design team come before that of an individual designer. Co-3Deator includes two specific team-first features: a concept component hierarchy which provides a design representation suitable for multi-level sharing and reusing of design information, and a collaborative design explorer for storing, viewing, and accessing hierarchical design data during collaborative design activities. We conduct two controlled user studies, one with individual designers to elicit the form and functionality of the collaborative design explorer, and the other with design teams to evaluate the utility of the concept component hierarchy and design explorer towards collaborative design ideation. Our results support our rationale for both of the proposed team-first collaboration mechanisms and suggest further ways to streamline collaborative design. |
2016 | |
41. | Matthias Nielsen, Niklas Elmqvist, Kaj Grønbæk (2016): Scribble Query: Fluid Touch Brushing for Multivariate Data Visualization. Proceedings of the Australian Conference on Human-Computer Interaction, 2016. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Nielsen2016, title = {Scribble Query: Fluid Touch Brushing for Multivariate Data Visualization}, author = {Matthias Nielsen and Niklas Elmqvist and Kaj Grønbæk}, url = {http://www.umiacs.umd.edu/~elm/projects/scribble-query/scribble-query.pdf, PDF}, year = {2016}, date = {2016-12-01}, booktitle = {Proceedings of the Australian Conference on Human-Computer Interaction}, abstract = {The wide availability of touch-enabled devices is a unique opportunity for visualization research to invent novel techniques to fluently explore, analyse, and understand complex and large-scale data. In this paper, we introduce Scribble Query, a novel interaction technique for fluid freehand scribbling (casual drawing) on touch-enabled devices to support interactive querying in data visualizations. Inspired by the low-entry yet rich interaction of touch drawing applications, a Scribble Query can be created with a single touch stroke yet have the expressiveness of multiple brushes (a conventionally used interaction technique). We have applied the Scribble Query interaction technique in a multivariate visualization tool, deployed the tool with domain experts from five different domains, and conducted deployment studies with these domain experts on their utilization of multivariate visualization with Scribble Query. The studies suggest that Scribble Query has a low entry barrier facilitating easy adoption, casual and infrequent usage, and in one case, enabled live dissemination of findings by the domain expert to managers in the organization.}, keywords = {} } The wide availability of touch-enabled devices is a unique opportunity for visualization research to invent novel techniques to fluently explore, analyse, and understand complex and large-scale data. In this paper, we introduce Scribble Query, a novel interaction technique for fluid freehand scribbling (casual drawing) on touch-enabled devices to support interactive querying in data visualizations. Inspired by the low-entry yet rich interaction of touch drawing applications, a Scribble Query can be created with a single touch stroke yet have the expressiveness of multiple brushes (a conventionally used interaction technique). We have applied the Scribble Query interaction technique in a multivariate visualization tool, deployed the tool with domain experts from five different domains, and conducted deployment studies with these domain experts on their utilization of multivariate visualization with Scribble Query. The studies suggest that Scribble Query has a low entry barrier facilitating easy adoption, casual and infrequent usage, and in one case, enabled live dissemination of findings by the domain expert to managers in the organization. |
40. | Sriram Karthik Badam, Feresteh Amini, Niklas Elmqvist, Pourang Irani (2016): Supporting Visual Exploration for Multiple Users in Large Display Environments. Proceedings of the IEEE Conference on Visual Analytics Science & Technology, 2016. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Badam2016b, title = {Supporting Visual Exploration for Multiple Users in Large Display Environments}, author = {Sriram Karthik Badam and Feresteh Amini and Niklas Elmqvist and Pourang Irani}, url = {http://umiacs.umd.edu/~elm/projects/multiuser-vis/multiuser-vis.pdf, PDF https://www.youtube.com/watch?v=xd7G_q8nocc, Youtube}, year = {2016}, date = {2016-10-21}, booktitle = {Proceedings of the IEEE Conference on Visual Analytics Science & Technology}, abstract = {We present a design space exploration of interaction techniques for supporting multiple collaborators exploring data on a shared large display. Our proposed solution is based on users controlling individual lenses using both explicit gestures as well as proxemics: the spatial relations between people and physical artifacts such as their distance, orientation, and movement. We discuss different design considerations for implicit and explicit interactions through the lens, and evaluate the user experience to find a balance between the implicit and explicit interaction styles. Our findings indicate that users favor implicit interaction through proxemics for navigation and collaboration, but prefer using explicit mid-air gestures to perform actions that are perceived to be direct, such as terminating a lens composition. Based on these results, we propose a hybrid technique utilizing both proxemics and mid-air gestures, along with examples applying this technique to other datasets. Finally, we performed a usability evaluation of the hybrid technique and observed user performance improvements in the presence of both implicit and explicit interaction styles. }, keywords = {} } We present a design space exploration of interaction techniques for supporting multiple collaborators exploring data on a shared large display. Our proposed solution is based on users controlling individual lenses using both explicit gestures as well as proxemics: the spatial relations between people and physical artifacts such as their distance, orientation, and movement. We discuss different design considerations for implicit and explicit interactions through the lens, and evaluate the user experience to find a balance between the implicit and explicit interaction styles. Our findings indicate that users favor implicit interaction through proxemics for navigation and collaboration, but prefer using explicit mid-air gestures to perform actions that are perceived to be direct, such as terminating a lens composition. Based on these results, we propose a hybrid technique utilizing both proxemics and mid-air gestures, along with examples applying this technique to other datasets. Finally, we performed a usability evaluation of the hybrid technique and observed user performance improvements in the presence of both implicit and explicit interaction styles. |
39. | Deok Gun Park, Simranjit Singh, Nicholas Diakopoulos, Niklas Elmqvist (2016): Supporting Comment Moderators in Identifying High Quality Online News Comments. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 1114–1125, 2016. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Park2016, title = {Supporting Comment Moderators in Identifying High Quality Online News Comments}, author = {Deok Gun Park and Simranjit Singh and Nicholas Diakopoulos and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/commentiq/commentiq.pdf, PDF}, year = {2016}, date = {2016-05-05}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {1114--1125}, abstract = {Online comments submitted by readers of news articles can provide valuable feedback and critique, personal views and perspectives, and opportunities for discussion. The varying quality of these comments necessitates that publishers remove the low quality ones, but there is also a growing awareness that by identifying and highlighting high quality contributions this can promote the general quality of the community. In this paper we take a user-centered design approach towards developing a system, CommentIQ, which supports comment moderators in interactively identifying high quality comments using a combination of comment analytic scores as well as visualizations and flexible UI components. We evaluated this system with professional comment moderators working at local and national news outlets and provide insights into the utility and appropriateness of features for journalistic tasks, as well as how the system may enable or transform journalistic practices around online comments.}, keywords = {} } Online comments submitted by readers of news articles can provide valuable feedback and critique, personal views and perspectives, and opportunities for discussion. The varying quality of these comments necessitates that publishers remove the low quality ones, but there is also a growing awareness that by identifying and highlighting high quality contributions this can promote the general quality of the community. In this paper we take a user-centered design approach towards developing a system, CommentIQ, which supports comment moderators in interactively identifying high quality comments using a combination of comment analytic scores as well as visualizations and flexible UI components. We evaluated this system with professional comment moderators working at local and national news outlets and provide insights into the utility and appropriateness of features for journalistic tasks, as well as how the system may enable or transform journalistic practices around online comments. |
38. | Sriram Karthik Badam, Jieqiong Zhao, Shivalik Sen, Niklas Elmqvist, David Ebert (2016): TimeFork: Interactive Prediction of Time Series. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 5409–5420, 2016. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Badam2016, title = {TimeFork: Interactive Prediction of Time Series}, author = {Sriram Karthik Badam and Jieqiong Zhao and Shivalik Sen and Niklas Elmqvist and David Ebert}, url = {http://www.umiacs.umd.edu/~elm/projects/timefork/timefork.pdf, PDF}, year = {2016}, date = {2016-05-05}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {5409--5420}, abstract = {We present TimeFork, an interactive prediction technique to support users predicting the future of time-series data, such as in financial, scientific, or medical domains. TimeFork combines visual representations of multiple time series with prediction information generated by computational models. Using this method, analysts engage in a back-and-forth dialogue with the computational model by alternating between manually predicting future changes through interaction and letting the model automatically determine the most likely outcomes, to eventually come to a common prediction using the model. This computer-supported prediction approach allows for harnessing the user’s knowledge of factors influencing future behavior, as well as sophisticated computational models drawing on past performance. To validate the TimeFork technique, we conducted a user study in a stock market prediction game. We present evidence of improved performance for participants using TimeFork compared to fully manual or fully automatic predictions, and characterize qualitative usage patterns observed during the user study.}, keywords = {} } We present TimeFork, an interactive prediction technique to support users predicting the future of time-series data, such as in financial, scientific, or medical domains. TimeFork combines visual representations of multiple time series with prediction information generated by computational models. Using this method, analysts engage in a back-and-forth dialogue with the computational model by alternating between manually predicting future changes through interaction and letting the model automatically determine the most likely outcomes, to eventually come to a common prediction using the model. This computer-supported prediction approach allows for harnessing the user’s knowledge of factors influencing future behavior, as well as sophisticated computational models drawing on past performance. To validate the TimeFork technique, we conducted a user study in a stock market prediction game. We present evidence of improved performance for participants using TimeFork compared to fully manual or fully automatic predictions, and characterize qualitative usage patterns observed during the user study. |
2015 | |
37. | Alexandru Dancu, Mickael Fourgeaud, Mohammad Obaid, Morten Fjeld, Niklas Elmqvist (2015): Map Navigation Using a Wearable Mid-air Display. Proceedings of the ACM Conference on Human-Computer Interaction with Mobile Devices and Services, pp. 71–76, 2015. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Dancu2015, title = {Map Navigation Using a Wearable Mid-air Display}, author = {Alexandru Dancu and Mickael Fourgeaud and Mohammad Obaid and Morten Fjeld and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/midairmap/midairmap.pdf, Paper https://www.youtube.com/watch?v=yswf1bJafp8, Talk}, year = {2015}, date = {2015-07-01}, booktitle = {Proceedings of the ACM Conference on Human-Computer Interaction with Mobile Devices and Services}, journal = {Proceedings of the ACM Conference on Human-Computer Interaction with Mobile Devices and Services}, pages = {71--76}, abstract = {Advances in display technologies will soon make wearable mid-air displays---devices that project dynamic images floating in mid-air relative to a mobile user---widely available. This kind of device will offer new input and output modalities compared to current mobile devices, and display information on the go. In this paper, we present a functional prototype for the purpose of understanding these modalities in more detail, including suitable applications and device placement. We first collected results from an online survey that identified map navigation as one of the most desirable applications and suggested placement preferences. Based on these rankings, we built a physical mid-air display prototype consisting of mobile phone, pico projector, and a holder frame, mountable in two different configurations: wrist and chest. We then designed a user study, asking participants to navigate different physical routes using map navigation displayed in midair. Participants considered the wrist mount to be three times safer in map navigation than the chest mount. The study results validate the use of a mid-air display for map navigation. Based on both our online survey and user study, we derive implications for the design of wearable mid-air displays.}, keywords = {} } Advances in display technologies will soon make wearable mid-air displays---devices that project dynamic images floating in mid-air relative to a mobile user---widely available. This kind of device will offer new input and output modalities compared to current mobile devices, and display information on the go. In this paper, we present a functional prototype for the purpose of understanding these modalities in more detail, including suitable applications and device placement. We first collected results from an online survey that identified map navigation as one of the most desirable applications and suggested placement preferences. Based on these rankings, we built a physical mid-air display prototype consisting of mobile phone, pico projector, and a holder frame, mountable in two different configurations: wrist and chest. We then designed a user study, asking participants to navigate different physical routes using map navigation displayed in midair. Participants considered the wrist mount to be three times safer in map navigation than the chest mount. The study results validate the use of a mid-air display for map navigation. Based on both our online survey and user study, we derive implications for the design of wearable mid-air displays. |
2014 | |
36. | Sriram Karthik Badam, Senthil Chandrasegaran, Niklas Elmqvist, Karthik Ramani (2014): Tracing and Sketching Performance using Blunt-Tipped Styli on Direct-Touch Tablets. Proceedings of the ACM Conference on Advanced Visual Interfaces, pp. 193–200, 2014. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Badam2014a, title = {Tracing and Sketching Performance using Blunt-Tipped Styli on Direct-Touch Tablets}, author = {Sriram Karthik Badam and Senthil Chandrasegaran and Niklas Elmqvist and Karthik Ramani}, url = {http://www.umiacs.umd.edu/~elm/projects/sketch-media/sketch-media.pdf, Paper http://www.slideshare.net/NickElm/tracing-and-sketching-performance-using-blunttipped-styli-on-directtouch-tablets, Slides}, year = {2014}, date = {2014-07-01}, booktitle = {Proceedings of the ACM Conference on Advanced Visual Interfaces}, pages = {193--200}, abstract = {Direct-touch tablets are quickly replacing traditional pen-and-paper tools in many applications, but not in case of the designer’s sketchbook. In this paper, we explore the tradeoffs inherent in replacing such paper sketchbooks with digital tablets in terms of two major tasks: tracing and free-hand sketching. Given the importance of the pen for sketching, we also study the impact of using a blunt-and-soft-tipped capacitive stylus in tablet settings. We thus conducted experiments to evaluate three sketch media: pen-paper, finger-tablet, and stylus-tablet based on the above tasks. We analyzed the tracing data with respect to speed and accuracy, and the quality of the free-hand sketches through a crowdsourced survey. The pen-paper and stylus-tablet media both performed significantly better than the finger-tablet medium in accuracy, while the pen-paper sketches were significantly rated higher quality compared to both tablet interfaces. A follow-up study comparing the performance of this stylus with a sharp, hard-tip version showed no significant difference in tracing performance, though participants preferred the sharp tip for sketching.}, keywords = {} } Direct-touch tablets are quickly replacing traditional pen-and-paper tools in many applications, but not in case of the designer’s sketchbook. In this paper, we explore the tradeoffs inherent in replacing such paper sketchbooks with digital tablets in terms of two major tasks: tracing and free-hand sketching. Given the importance of the pen for sketching, we also study the impact of using a blunt-and-soft-tipped capacitive stylus in tablet settings. We thus conducted experiments to evaluate three sketch media: pen-paper, finger-tablet, and stylus-tablet based on the above tasks. We analyzed the tracing data with respect to speed and accuracy, and the quality of the free-hand sketches through a crowdsourced survey. The pen-paper and stylus-tablet media both performed significantly better than the finger-tablet medium in accuracy, while the pen-paper sketches were significantly rated higher quality compared to both tablet interfaces. A follow-up study comparing the performance of this stylus with a sharp, hard-tip version showed no significant difference in tracing performance, though participants preferred the sharp tip for sketching. |
35. | Sujin Jang, Niklas Elmqvist, Karthik Ramani (2014): GestureAnalyzer: Visual Analytics for Exploratory Analysis of Gesture Patterns. Proceedings of the ACM Symposium on Spatial User Interfaces, pp. 30–39, 2014. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Sujin2014, title = {GestureAnalyzer: Visual Analytics for Exploratory Analysis of Gesture Patterns}, author = {Sujin Jang and Niklas Elmqvist and Karthik Ramani}, url = {http://www.umiacs.umd.edu/~elm/projects/gesture-analyzer/gesture-analyzer.pdf, Paper}, year = {2014}, date = {2014-07-01}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interfaces}, pages = {30--39}, abstract = {Understanding the intent behind human gestures is a critical problem in the design of gestural interactions. A common method to observe and understand how users express gestures is to use elicitation studies. However, these studies require time-consuming analysis of user data to identify gesture patterns. Also, the analysis by humans cannot describe gestures in as detail as in data-based representations of motion features. In this paper, we present GestureAnalyzer, a system that supports exploratory analysis of gesture patterns by applying interactive clustering and visualization techniques to motion tracking data. GestureAnalyzer enables rapid categorization of similar gestures, and visual investigation of various geometric and kinematic properties of user gestures. We describe the system components, and then demonstrate its utility through a case study on mid-air hand gestures obtained from elicitation studies.}, keywords = {} } Understanding the intent behind human gestures is a critical problem in the design of gestural interactions. A common method to observe and understand how users express gestures is to use elicitation studies. However, these studies require time-consuming analysis of user data to identify gesture patterns. Also, the analysis by humans cannot describe gestures in as detail as in data-based representations of motion features. In this paper, we present GestureAnalyzer, a system that supports exploratory analysis of gesture patterns by applying interactive clustering and visualization techniques to motion tracking data. GestureAnalyzer enables rapid categorization of similar gestures, and visual investigation of various geometric and kinematic properties of user gestures. We describe the system components, and then demonstrate its utility through a case study on mid-air hand gestures obtained from elicitation studies. |
34. | Sriram Karthik Badam, Niklas Elmqvist (2014): PolyChrome: A Cross-Device Framework for Collaborative Web Visualization. Proceedings of the ACM Conference on Interactive Tabletops and Surfaces, pp. 109–118, 2014. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Badam2014b, title = {PolyChrome: A Cross-Device Framework for Collaborative Web Visualization}, author = {Sriram Karthik Badam and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/polychrome/polychrome.pdf, Paper http://www.slideshare.net/NickElm/polychrome-a-crossdevice-framework-for-collaborative-web-visualization, Slides}, year = {2014}, date = {2014-07-01}, booktitle = {Proceedings of the ACM Conference on Interactive Tabletops and Surfaces}, journal = {Proceedings of the ACM Conference on Interactive Tabletops and Surfaces}, pages = {109--118}, abstract = {We present PolyChrome, an application framework for creating web-based collaborative visualizations that can span multiple devices. The framework supports (1) co-browsing new web applications as well as legacy websites with no migration costs (i.e., a distributed web browser); (2) an API to develop new web applications that can synchronize the UI state on multiple devices to support synchronous and asynchronous collaboration; and (3) maintenance of state and input events on a server to handle common issues with distributed applications such as consistency management, conflict resolution, and undo operations. We describe PolyChrome\'s general design, architecture, and implementation followed by application examples showcasing collaborative web visualizations created using the framework. Finally, we present performance results that suggest that PolyChrome adds minimal overhead compared to single-device applications.}, keywords = {} } We present PolyChrome, an application framework for creating web-based collaborative visualizations that can span multiple devices. The framework supports (1) co-browsing new web applications as well as legacy websites with no migration costs (i.e., a distributed web browser); (2) an API to develop new web applications that can synchronize the UI state on multiple devices to support synchronous and asynchronous collaboration; and (3) maintenance of state and input events on a server to handle common issues with distributed applications such as consistency management, conflict resolution, and undo operations. We describe PolyChrome's general design, architecture, and implementation followed by application examples showcasing collaborative web visualizations created using the framework. Finally, we present performance results that suggest that PolyChrome adds minimal overhead compared to single-device applications. |
33. | William Benjamin, Senthil Chandrasegaran, Devarajan Ramanujan, Niklas Elmqvist, SVN Vishwanathan, Karthik Ramani (2014): Juxtapoze: supporting serendipity and creative expression in clipart compositions. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 341–350, 2014. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Benjamin2014, title = {Juxtapoze: supporting serendipity and creative expression in clipart compositions}, author = {William Benjamin and Senthil Chandrasegaran and Devarajan Ramanujan and Niklas Elmqvist and SVN Vishwanathan and Karthik Ramani}, url = {http://www.umiacs.umd.edu/~elm/projects/juxtapoze/juxtapoze.pdf, Paper https://youtu.be/YkLFX16fSrA, Youtube video}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {341--350}, abstract = {Juxtapoze is a clipart composition workflow that supports creative expression and serendipitous discoveries in the shape domain. We achieve creative expression by supporting a workflow of searching, editing, and composing: the user queries the shape database using strokes, selects the desired search result, and finally modifies the selected image before composing it into the overall drawing. Serendipitous discovery of shapes is facilitated by allowing multiple exploration channels, such as doodles, shape filtering, and relaxed search. Results from a qualitative evaluation show that Juxtapoze makes the process of creating image compositions enjoyable and supports creative expression and serendipity.}, keywords = {} } Juxtapoze is a clipart composition workflow that supports creative expression and serendipitous discoveries in the shape domain. We achieve creative expression by supporting a workflow of searching, editing, and composing: the user queries the shape database using strokes, selects the desired search result, and finally modifies the selected image before composing it into the overall drawing. Serendipitous discovery of shapes is facilitated by allowing multiple exploration channels, such as doodles, shape filtering, and relaxed search. Results from a qualitative evaluation show that Juxtapoze makes the process of creating image compositions enjoyable and supports creative expression and serendipity. |
32. | Ahmad M. M. Razip, Shehzad Afzak, Matthew Potrawski, Ross Maciejewski, Yun Jang, Niklas Elmqvist, David S. Ebert (2014): A Mobile Visual Analytics Approach for Law Enforcement Situation Awareness. Proceedings of the IEEE Pacific Symposium on Visualization, pp. 1235–1244, 2014. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Razip2014, title = {A Mobile Visual Analytics Approach for Law Enforcement Situation Awareness}, author = {Ahmad M. M. Razip and Shehzad Afzak and Matthew Potrawski and Ross Maciejewski and Yun Jang and Niklas Elmqvist and David S. Ebert}, url = {http://www.umiacs.umd.edu/~elm/projects/iVALET/iVALET.pdf, Paper}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the IEEE Pacific Symposium on Visualization}, pages = {1235--1244}, abstract = {The advent of modern smartphones and handheld devices has given analysts, decision-makers, and even the general public the ability to rapidly ingest data and translate it into actionable information on-the-go. In this paper, we explore the design and use of a mobile visual analytics toolkit for public safety data that equips law enforcement agencies with effective situation awareness and risk assessment tools. Our system provides users with a suite of interactive tools that allow them to perform analysis and detect trends, patterns and anomalies among criminal, traffic and civil (CTC) incidents. The system also provides interactive risk assessment tools that allow users to identify regions of potential high risk and determine the risk at any user-specified location and time. Our system has been designed for the iPhone/iPad environment and is currently being used and evaluated by a consortium of law enforcement agencies. We report their use of the system and some initial feedback.}, keywords = {} } The advent of modern smartphones and handheld devices has given analysts, decision-makers, and even the general public the ability to rapidly ingest data and translate it into actionable information on-the-go. In this paper, we explore the design and use of a mobile visual analytics toolkit for public safety data that equips law enforcement agencies with effective situation awareness and risk assessment tools. Our system provides users with a suite of interactive tools that allow them to perform analysis and detect trends, patterns and anomalies among criminal, traffic and civil (CTC) incidents. The system also provides interactive risk assessment tools that allow users to identify regions of potential high risk and determine the risk at any user-specified location and time. Our system has been designed for the iPhone/iPad environment and is currently being used and evaluated by a consortium of law enforcement agencies. We report their use of the system and some initial feedback. |
31. | Zhenpeng Zhao, Sriram Karthik Badam, Senthil Chandrasegaran, Deo Gun Park, Niklas Elmqvist, Lorraine Kisselburgh, Karthik Ramani (2014): skWiki: A Multimedia Sketching System for Collaborative Creativity. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 1235–1244, 2014. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Zhao2014, title = {skWiki: A Multimedia Sketching System for Collaborative Creativity}, author = {Zhenpeng Zhao and Sriram Karthik Badam and Senthil Chandrasegaran and Deo Gun Park and Niklas Elmqvist and Lorraine Kisselburgh and Karthik Ramani}, url = {http://www.umiacs.umd.edu/~elm/projects/skwiki/skwiki.pdf, Paper https://www.youtube.com/watch?v=QxtTR14EXFQ, Video http://www.slideshare.net/NickElm/skwiki-a-multimedia-sketching-system-for-collaborative-creativity, Slides}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {1235--1244}, abstract = {We present skWiki, a web application framework for collaborative creativity in digital multimedia projects, including text, hand-drawn sketches, and photographs. skWiki overcomes common drawbacks of existing wiki software by providing a rich viewer/editor architecture for all media types that is integrated into the web browser itself, thus avoiding dependence on client-side editors. Instead of files, skWiki uses the concept of paths as trajectories of persistent state over time. This model has intrinsic support for collaborative editing, including cloning, branching, and merging paths edited by multiple contributors. We demonstrate skWiki\'s utility using a qualitative, sketching-based user study.}, keywords = {} } We present skWiki, a web application framework for collaborative creativity in digital multimedia projects, including text, hand-drawn sketches, and photographs. skWiki overcomes common drawbacks of existing wiki software by providing a rich viewer/editor architecture for all media types that is integrated into the web browser itself, thus avoiding dependence on client-side editors. Instead of files, skWiki uses the concept of paths as trajectories of persistent state over time. This model has intrinsic support for collaborative editing, including cloning, branching, and merging paths edited by multiple contributors. We demonstrate skWiki's utility using a qualitative, sketching-based user study. |
2012 | |
30. | Waqas Javed, Sohaib Ghani, Niklas Elmqvist (2012): GravNav: Using a Gravity Model for Multi-Scale Navigation. Proceedings of the ACM Conference on Advanced Visual Interfaces, pp. 217–224, 2012. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Javed2012c, title = {GravNav: Using a Gravity Model for Multi-Scale Navigation}, author = {Waqas Javed and Sohaib Ghani and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/gravnav/gravnav.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the ACM Conference on Advanced Visual Interfaces}, pages = {217--224}, abstract = {We present gravity navigation (GravNav), a family of multi-scale navigation techniques that use a gravity-inspired model for assisting navigation in large visual 2D spaces based on the interest and salience of visual objects in the space. GravNav is an instance of topology-aware navigation, which makes use of the structure of the visual space to aid navigation. We have performed a controlled study comparing GravNav to standard zoom and pan navigation, with and without variable-rate zoom control. Our results show a significant improvement for GravNav over standard navigation, particularly when coupled with variable-rate zoom. We also report findings on user behavior in multi-scale navigation.}, keywords = {} } We present gravity navigation (GravNav), a family of multi-scale navigation techniques that use a gravity-inspired model for assisting navigation in large visual 2D spaces based on the interest and salience of visual objects in the space. GravNav is an instance of topology-aware navigation, which makes use of the structure of the visual space to aid navigation. We have performed a controlled study comparing GravNav to standard zoom and pan navigation, with and without variable-rate zoom control. Our results show a significant improvement for GravNav over standard navigation, particularly when coupled with variable-rate zoom. We also report findings on user behavior in multi-scale navigation. |
29. | Waqas Javed, Sohaib Ghani, Niklas Elmqvist (2012): PolyZoom: Multiscale and Multifocus Exploration in 2D Visual Spaces. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 287–296, 2012. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Javed2012b, title = {PolyZoom: Multiscale and Multifocus Exploration in 2D Visual Spaces}, author = {Waqas Javed and Sohaib Ghani and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/polyzoom/polyzoom.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {287--296}, abstract = {The most common techniques for navigating in multiscale visual spaces are pan, zoom, and bird’s eye views. However, these techniques are often tedious and cumbersome to use, especially when objects of interest are located far apart. We present the PolyZoom technique where users progressively build hierarchies of focus regions, stacked on each other such that each subsequent level shows a higher magnification. Correlation graphics show the relation between parent and child viewports in the hierarchy. To validate the new technique, we compare it to standard navigation techniques in two user studies, one on multiscale visual search and the other on multifocus interaction. Results show that PolyZoom performs better than current standard techniques. }, keywords = {} } The most common techniques for navigating in multiscale visual spaces are pan, zoom, and bird’s eye views. However, these techniques are often tedious and cumbersome to use, especially when objects of interest are located far apart. We present the PolyZoom technique where users progressively build hierarchies of focus regions, stacked on each other such that each subsequent level shows a higher magnification. Correlation graphics show the relation between parent and child viewports in the hierarchy. To validate the new technique, we compare it to standard navigation techniques in two user studies, one on multiscale visual search and the other on multifocus interaction. Results show that PolyZoom performs better than current standard techniques. |
28. | Waqas Javed, Niklas Elmqvist (2012): Exploring the Design Space of Composite Visualization. Proceedings of the IEEE Pacific Symposium on Visualization, pp. 1–8, 2012. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Javed2012a, title = {Exploring the Design Space of Composite Visualization}, author = {Waqas Javed and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/compvis/compvis.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the IEEE Pacific Symposium on Visualization}, pages = {1--8}, abstract = {We propose the notion of composite visualization views (CVVs) as a theoretical model that unifies the existing coordinated multiple views (CMV) paradigm with other strategies for combining visual representations in the same geometrical space. We identify five such strategies--called CVV design patterns--based on an extensive review of the literature in composite visualization. We go on to show how these design patterns can all be expressed in terms of a design space describing the correlation between two visualizations in terms of spatial mapping as well as the data relationships between items in the visualizations. We also discuss how to use this design space to suggest potential directions for future research.}, keywords = {} } We propose the notion of composite visualization views (CVVs) as a theoretical model that unifies the existing coordinated multiple views (CMV) paradigm with other strategies for combining visual representations in the same geometrical space. We identify five such strategies--called CVV design patterns--based on an extensive review of the literature in composite visualization. We go on to show how these design patterns can all be expressed in terms of a design space describing the correlation between two visualizations in terms of spatial mapping as well as the data relationships between items in the visualizations. We also discuss how to use this design space to suggest potential directions for future research. |
27. | Abish Malik, Ross Maciejewski, Yun Jang, Whitney Huang, Niklas Elmqvist, David Ebert (2012): A Correlative Analysis Process in a Visual Analytics Environment. Proceedings of the IEEE Conference on Visual Analytics Science and Technology, pp. 33–42, 2012. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Malik2012, title = {A Correlative Analysis Process in a Visual Analytics Environment}, author = {Abish Malik and Ross Maciejewski and Yun Jang and Whitney Huang and Niklas Elmqvist and David Ebert}, url = {https://ieeexplore.ieee.org/document/6400491, IEEE Xplore}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the IEEE Conference on Visual Analytics Science and Technology}, pages = {33--42}, abstract = {Finding patterns and trends in spatial and temporal datasets has been a long studied problem in statistics and different domains of science. This paper presents a visual analytics approach for the interactive exploration and analysis of spatiotemporal correlations among multivariate datasets. Our approach enables users to discover correlations and explore potentially causal or predictive links at different spatiotemporal aggregation levels among the datasets, and allows them to understand the underlying statistical foundations that precede the analysis. Our technique utilizes the Pearson\'s product-moment correlation coefficient and factors in the lead or lag between different datasets to detect trends and periodic patterns amongst them.}, keywords = {} } Finding patterns and trends in spatial and temporal datasets has been a long studied problem in statistics and different domains of science. This paper presents a visual analytics approach for the interactive exploration and analysis of spatiotemporal correlations among multivariate datasets. Our approach enables users to discover correlations and explore potentially causal or predictive links at different spatiotemporal aggregation levels among the datasets, and allows them to understand the underlying statistical foundations that precede the analysis. Our technique utilizes the Pearson's product-moment correlation coefficient and factors in the lead or lag between different datasets to detect trends and periodic patterns amongst them. |
26. | Will McGrath, Brian Bowman, David McCallum, Juan-David Hincapie-Ramos, Niklas Elmqvist, Pourang Irani (2012): Branch-Explore-Merge: Facilitating Real-Time Revision Control in Collaborative Visual Exploration. Proceedings of the ACM Conference on Interactive Tabletops and Surfaces, pp. 235–244, 2012. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{McGrath2012, title = {Branch-Explore-Merge: Facilitating Real-Time Revision Control in Collaborative Visual Exploration}, author = {Will McGrath and Brian Bowman and David McCallum and Juan-David Hincapie-Ramos and Niklas Elmqvist and Pourang Irani}, url = {http://www.umiacs.umd.edu/~elm/projects/bem/bem.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the ACM Conference on Interactive Tabletops and Surfaces}, pages = {235--244}, abstract = {Collaborative work is characterized by participants seamlessly transitioning from working together (coupled) to working alone (decoupled). Groupware should therefore facilitate smoothly varying coupling throughout the entire collaborative session. Towards achieving such transitions for collaborative exploration and search, we propose a protocol based on managing revisions for each collaborator exploring a dataset. The protocol allows participants to diverge from the shared analysis path (branch), study the data independently (explore), and then contribute back their findings onto the shared display (merge). We apply this concept to collaborative search in multidimensional data, and propose an implementation where the public view is a tabletop display and the private views are embedded in handheld tablets. We then use this implementation to perform a qualitative user study involving a real estate dataset. Results show that participants leverage the BEM protocol, spend significant time using their private views (40% to 80% of total task time), and apply public view changes for consultation with collaborators.}, keywords = {} } Collaborative work is characterized by participants seamlessly transitioning from working together (coupled) to working alone (decoupled). Groupware should therefore facilitate smoothly varying coupling throughout the entire collaborative session. Towards achieving such transitions for collaborative exploration and search, we propose a protocol based on managing revisions for each collaborator exploring a dataset. The protocol allows participants to diverge from the shared analysis path (branch), study the data independently (explore), and then contribute back their findings onto the shared display (merge). We apply this concept to collaborative search in multidimensional data, and propose an implementation where the public view is a tabletop display and the private views are embedded in handheld tablets. We then use this implementation to perform a qualitative user study involving a real estate dataset. Results show that participants leverage the BEM protocol, spend significant time using their private views (40% to 80% of total task time), and apply public view changes for consultation with collaborators. |
25. | Sundar Murugappan, Vinayak, Niklas Elmqvist, Karthik Ramani (2012): Extended Multitouch: Recovering Touch Posture and Differentiating Users using a Depth Camera. Proceedings of the ACM Symposium on User Interface Software and Technology, pp. 487–496, 2012. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Murugappan2012, title = {Extended Multitouch: Recovering Touch Posture and Differentiating Users using a Depth Camera}, author = {Sundar Murugappan and Vinayak and Niklas Elmqvist and Karthik Ramani}, url = {http://www.umiacs.umd.edu/~elm/projects/emtouch/emtouch.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the ACM Symposium on User Interface Software and Technology}, pages = {487--496}, abstract = {Multitouch surfaces are becoming prevalent, but most existing technologies are only capable of detecting the user’s actual points of contact on the surface and not the identity, posture, and handedness of the user. In this paper, we define the concept of extended multitouch interaction as a richer input modality that includes all of this information. We further present a practical solution to achieve this on tabletop displays based on mounting a single commodity depth camera above a horizontal surface. This will enable us to not only detect when the surface is being touched, but also recover the user’s exact finger and hand posture, as well as distinguish between different users and their handedness. We validate our approach using two user studies, and deploy the technique in a scratchpad tool and in a pen + touch sketch tool.}, keywords = {} } Multitouch surfaces are becoming prevalent, but most existing technologies are only capable of detecting the user’s actual points of contact on the surface and not the identity, posture, and handedness of the user. In this paper, we define the concept of extended multitouch interaction as a richer input modality that includes all of this information. We further present a practical solution to achieve this on tabletop displays based on mounting a single commodity depth camera above a horizontal surface. This will enable us to not only detect when the surface is being touched, but also recover the user’s exact finger and hand posture, as well as distinguish between different users and their handedness. We validate our approach using two user studies, and deploy the technique in a scratchpad tool and in a pen + touch sketch tool. |
2011 | |
24. | Pierre Dragicevic, Anastasia Bezerianos, Waqas Javed, Niklas Elmqvist, Jean-Daniel Fekete (2011): Temporal Distortion for Animated Transitions. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 2009-2018, 2011. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Dragicevic2011, title = {Temporal Distortion for Animated Transitions}, author = {Pierre Dragicevic and Anastasia Bezerianos and Waqas Javed and Niklas Elmqvist and Jean-Daniel Fekete}, url = {http://www.umiacs.umd.edu/~elm/projects/timedistort/timedistort.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {2009-2018}, abstract = {Animated transitions are popular in many visual applications but they can be difficult to follow, especially when many objects move at the same time. One informal design guideline for creating effective animated transitions has long been the use of slow-in/slow-out pacing, but no empirical data exist to support this practice. We remedy this by studying object tracking performance under different conditions of temporal distortion, i.e., constant speed transitions, slow-in/slow-out, fast-in/fast-out, and an adaptive technique that slows down the visually complex parts of the animation. Slow-in/slow-out outperformed other techniques, but we saw technique differences depending on the type of visual transition.}, keywords = {} } Animated transitions are popular in many visual applications but they can be difficult to follow, especially when many objects move at the same time. One informal design guideline for creating effective animated transitions has long been the use of slow-in/slow-out pacing, but no empirical data exist to support this practice. We remedy this by studying object tracking performance under different conditions of temporal distortion, i.e., constant speed transitions, slow-in/slow-out, fast-in/fast-out, and an adaptive technique that slows down the visually complex parts of the animation. Slow-in/slow-out outperformed other techniques, but we saw technique differences depending on the type of visual transition. |
23. | Sohaib Ghani, Niklas Elmqvist (2011): Improving Revisitation in Graphs through Static Spatial Features. Proceedings of Graphics Interface, pp. 175-182, 2011. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Ghani2011b, title = {Improving Revisitation in Graphs through Static Spatial Features}, author = {Sohaib Ghani and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/ssgf/ssgf.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of Graphics Interface}, pages = {175-182}, abstract = {People generally remember locations in visual spaces with respect to spatial features and landmarks. Geographical maps provide many spatial features and hence are easy to remember. However, graphs are often visualized as node-link diagrams with few spatial features. We evaluate whether adding static spatial features to node-link diagrams will help in graph revisitation. We discuss three strategies for embellishing a graph and evaluate each in a user study. In our first study, we evaluate how to best add background features to a graph. In the second, we encode position using node size and color. In the third and final study, we take the best techniques from the first and second study, as well as shapes added to the graph as virtual landmarks, to find the best combination of spatial features for graph revisitation. We discuss the user study results and give our recommendations for design of graph visualization software.}, keywords = {} } People generally remember locations in visual spaces with respect to spatial features and landmarks. Geographical maps provide many spatial features and hence are easy to remember. However, graphs are often visualized as node-link diagrams with few spatial features. We evaluate whether adding static spatial features to node-link diagrams will help in graph revisitation. We discuss three strategies for embellishing a graph and evaluate each in a user study. In our first study, we evaluate how to best add background features to a graph. In the second, we encode position using node size and color. In the third and final study, we take the best techniques from the first and second study, as well as shapes added to the graph as virtual landmarks, to find the best combination of spatial features for graph revisitation. We discuss the user study results and give our recommendations for design of graph visualization software. |
22. | Waqas Javed, KyungTae Kim, Sohaib Ghani, Niklas Elmqvist (2011): Evaluating Physical/Virtual Occlusion Management Techniques for Horizontal Displays. Proceedings of INTERACT, pp. 391-408, 2011. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Javed2011, title = {Evaluating Physical/Virtual Occlusion Management Techniques for Horizontal Displays}, author = {Waqas Javed and KyungTae Kim and Sohaib Ghani and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/occtable/occtable.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of INTERACT}, pages = {391-408}, abstract = {We evaluate unguided and guided visual search performance for a set of techniques that mitigate occlusion between physical and virtual objects on a tabletop display. The techniques are derived from a general model of hybrid physical/virtual occlusion, and take increasingly drastic measures to make the user aware of, identify, and access hidden objects---but with increasingly space-consuming and disruptive impact on the display. Performance is different depending on the visual display, suggesting a tradeoff between management strength and visual space deformation.}, keywords = {} } We evaluate unguided and guided visual search performance for a set of techniques that mitigate occlusion between physical and virtual objects on a tabletop display. The techniques are derived from a general model of hybrid physical/virtual occlusion, and take increasingly drastic measures to make the user aware of, identify, and access hidden objects---but with increasingly space-consuming and disruptive impact on the display. Performance is different depending on the visual display, suggesting a tradeoff between management strength and visual space deformation. |
21. | KyungTae Kim, Sungahn Ko, Niklas Elmqvist, David Ebert (2011): WordBridge: Using Composite Tag Clouds in Node-Link Diagrams for Visualizing Content and Relations in Text Corpora. Proceedings of the Hawaii International Conference on System Sciences, pp. ., 2011. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Kim2011, title = {WordBridge: Using Composite Tag Clouds in Node-Link Diagrams for Visualizing Content and Relations in Text Corpora}, author = {KyungTae Kim and Sungahn Ko and Niklas Elmqvist and David Ebert}, url = {http://www.umiacs.umd.edu/~elm/projects/wordbridge/wordbridge.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the Hawaii International Conference on System Sciences}, pages = {.}, abstract = {We introduce WordBridge, a novel graph-based visualization technique for showing relationships between entities in text corpora. The technique is a node-link visualization where both nodes and links are tag clouds. Using these tag clouds, WordBridge can reveal relationships by representing not only entities and their connections, but also the nature of their relationship using representative keywords for nodes and edges. In this paper, we apply the technique to an interactive web-based visual analytics environment---Apropos---where a user can explore a text corpus using WordBridge. We validate the technique using several case studies based on document collections such as intelligence reports, co-authorship networks, and works of fiction.}, keywords = {} } We introduce WordBridge, a novel graph-based visualization technique for showing relationships between entities in text corpora. The technique is a node-link visualization where both nodes and links are tag clouds. Using these tag clouds, WordBridge can reveal relationships by representing not only entities and their connections, but also the nature of their relationship using representative keywords for nodes and edges. In this paper, we apply the technique to an interactive web-based visual analytics environment---Apropos---where a user can explore a text corpus using WordBridge. We validate the technique using several case studies based on document collections such as intelligence reports, co-authorship networks, and works of fiction. |
20. | Sungahn Ko, KyungTae Kim, Tejas Kulkarni, Niklas Elmqvist (2011): Applying Mobile Device Soft Keyboards to Collaborative Multitouch Tabletop Displays: Design and Evaluation. Proceedings of the ACM Conference on Interactive Tabletops and Surfaces, pp. 130-139, 2011. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Ko2011, title = {Applying Mobile Device Soft Keyboards to Collaborative Multitouch Tabletop Displays: Design and Evaluation}, author = {Sungahn Ko and KyungTae Kim and Tejas Kulkarni and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/table-text/table-text.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the ACM Conference on Interactive Tabletops and Surfaces}, pages = {130-139}, abstract = {We present an evaluation of text entry methods for tabletop displays given small display space allocations, an increasingly important design constraint as tabletops become collaborative platforms. Small space is already a requirement of mobile text entry methods, and these can often be easily ported to tabletop settings. The purpose of this work is to determine whether these mobile text entry methods are equally useful for tabletop displays, or whether there are unique aspects of text entry on large, horizontal surfaces that influence design. Our evaluation consists of two studies designed to elicit differences between the mobile and tabletop domains. Results show that standard soft keyboards perform best, even at small space allocations. Furthermore, occlusion-reduction methods like Shift do not yield significant improvements to text entry; we speculate that this is due to the low ratio of resolution per surface units (i.e., DPI) for current tabletops.}, keywords = {} } We present an evaluation of text entry methods for tabletop displays given small display space allocations, an increasingly important design constraint as tabletops become collaborative platforms. Small space is already a requirement of mobile text entry methods, and these can often be easily ported to tabletop settings. The purpose of this work is to determine whether these mobile text entry methods are equally useful for tabletop displays, or whether there are unique aspects of text entry on large, horizontal surfaces that influence design. Our evaluation consists of two studies designed to elicit differences between the mobile and tabletop domains. Results show that standard soft keyboards perform best, even at small space allocations. Furthermore, occlusion-reduction methods like Shift do not yield significant improvements to text entry; we speculate that this is due to the low ratio of resolution per surface units (i.e., DPI) for current tabletops. |
19. | Bumchul Kwon, Waqas Javed, Niklas Elmqvist, Ji-Soo Yi (2011): Direct Manipulation Through Surrogate Objects. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 627-636, 2011. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Kwon2011, title = {Direct Manipulation Through Surrogate Objects}, author = {Bumchul Kwon and Waqas Javed and Niklas Elmqvist and Ji-Soo Yi}, url = {http://www.umiacs.umd.edu/~elm/projects/surrogate/surrogate.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {627-636}, abstract = {Direct manipulation has had major influence on interface design since it was proposed by Shneiderman in 1982. Although directness generally benefits users, direct manipulation also has weaknesses. In some cases, such as when a user needs to manipulate small, attribute-rich objects or multiple objects simultaneously, indirect manipulation may be more efficient at the cost of directness or intuitiveness of the interaction. Several techniques have been developed over the years to address these issues, but these are all isolated and limited efforts with no coherent underlying principle. We propose the notion of Surrogate Interaction that ties together a large subset of these techniques through the use of a surrogate object that allow users to interact with the surrogate instead of the domain object. We believe that formalizing this family of interaction techniques will provide an additional and powerful interface design alternative for interaction designers, as well as uncover opportunities for future research. }, keywords = {} } Direct manipulation has had major influence on interface design since it was proposed by Shneiderman in 1982. Although directness generally benefits users, direct manipulation also has weaknesses. In some cases, such as when a user needs to manipulate small, attribute-rich objects or multiple objects simultaneously, indirect manipulation may be more efficient at the cost of directness or intuitiveness of the interaction. Several techniques have been developed over the years to address these issues, but these are all isolated and limited efforts with no coherent underlying principle. We propose the notion of Surrogate Interaction that ties together a large subset of these techniques through the use of a surrogate object that allow users to interact with the surrogate instead of the domain object. We believe that formalizing this family of interaction techniques will provide an additional and powerful interface design alternative for interaction designers, as well as uncover opportunities for future research. |
2010 | |
18. | Waqas Javed, Niklas Elmqvist (2010): Stack Zooming for Multi-Focus Interaction in Time-Series Data Visualization. Proceedings of the IEEE Pacific Symposium on Visualization, pp. 33–40, 2010. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Javed2010, title = {Stack Zooming for Multi-Focus Interaction in Time-Series Data Visualization}, author = {Waqas Javed and Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/stackzoom/stackzoom.pdf, Paper https://www.youtube.com/watch?v=dK0De4XPm5Y, Youtube video http://www.slideshare.net/NickElm/stack-zooming-for-multifocus-interaction-in-timeseries-data-visualization, Slides}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the IEEE Pacific Symposium on Visualization}, pages = {33--40}, abstract = {Information visualization shows tremendous potential for helping both expert and casual users alike make sense of temporal data, but current time series visualization tools provide poor support for comparing several foci in a temporal dataset while retaining context and distance awareness. We introduce a method for supporting this kind of multi-focus interaction that we call stack zooming. The approach is based on the user interactively building hierarchies of 1D strips stacked on top of each other, where each subsequent stack represents a higher zoom level, and sibling strips represent branches in the visual exploration. Correlation graphics show the relation between stacks and strips of different levels, providing context and distance awareness among the focus points. The zoom hierarchies can also be used as graphical histories and for communicating insights to stakeholders. We also discuss how visual spaces that support stack zooming can be extended with annotation and local statistics computations that fit the hierarchical stacking metaphor.}, keywords = {} } Information visualization shows tremendous potential for helping both expert and casual users alike make sense of temporal data, but current time series visualization tools provide poor support for comparing several foci in a temporal dataset while retaining context and distance awareness. We introduce a method for supporting this kind of multi-focus interaction that we call stack zooming. The approach is based on the user interactively building hierarchies of 1D strips stacked on top of each other, where each subsequent stack represents a higher zoom level, and sibling strips represent branches in the visual exploration. Correlation graphics show the relation between stacks and strips of different levels, providing context and distance awareness among the focus points. The zoom hierarchies can also be used as graphical histories and for communicating insights to stakeholders. We also discuss how visual spaces that support stack zooming can be extended with annotation and local statistics computations that fit the hierarchical stacking metaphor. |
17. | KyungTae Kim, Waqas Javed, Cary Williams, Niklas Elmqvist, Pourang Irani (2010): Hugin: A Framework Awareness and Coordination in Mixed-Presence Collaborative Information Visualization. Proceedings of the ACM Conference on Interactive Tabletops and Surfaces, pp. 231–240, 2010. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Kim2010, title = {Hugin: A Framework Awareness and Coordination in Mixed-Presence Collaborative Information Visualization}, author = {KyungTae Kim and Waqas Javed and Cary Williams and Niklas Elmqvist and Pourang Irani}, url = {http://www.umiacs.umd.edu/~elm/projects/hugin/hugin.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the ACM Conference on Interactive Tabletops and Surfaces}, pages = {231--240}, abstract = {Analysts are increasingly encountering datasets that are larger and more complex than ever before. Effectively exploring such datasets requires collaboration between multiple analysts, who more often than not are distributed in time or in space. Mixed-presence groupware provide a shared workspace medium that supports this combination of co-located and distributed collaboration. However, collaborative visualization systems for such distributed settings have their own cost and are still uncommon in the visualization community. We present Hugin, a novel layer-based graphical framework for this kind of mixed-presence synchronous collaborative visualization over digital tabletop displays. The design of the framework focuses on issues like awareness and access control, while using information visualization for the collaborative data exploration on network-connected tabletops. To validate the usefulness of the framework, we also present examples of how Hugin can be used to implement new visualizations supporting these collaborative mechanisms.}, keywords = {} } Analysts are increasingly encountering datasets that are larger and more complex than ever before. Effectively exploring such datasets requires collaboration between multiple analysts, who more often than not are distributed in time or in space. Mixed-presence groupware provide a shared workspace medium that supports this combination of co-located and distributed collaboration. However, collaborative visualization systems for such distributed settings have their own cost and are still uncommon in the visualization community. We present Hugin, a novel layer-based graphical framework for this kind of mixed-presence synchronous collaborative visualization over digital tabletop displays. The design of the framework focuses on issues like awareness and access control, while using information visualization for the collaborative data exploration on network-connected tabletops. To validate the usefulness of the framework, we also present examples of how Hugin can be used to implement new visualizations supporting these collaborative mechanisms. |
2009 | |
16. | Jean-Daniel Fekete, Niklas Elmqvist, Yves Guiard (2009): Motion-Pointing: Target Selection using Elliptical Motions. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 289–298, 2009. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2009a, title = {Motion-Pointing: Target Selection using Elliptical Motions}, author = {Jean-Daniel Fekete and Niklas Elmqvist and Yves Guiard}, url = {http://www.umiacs.umd.edu/~elm/projects/motionpointing/motionpointing.pdf, Paper}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {289--298}, abstract = {We present a novel method called motion-pointing for selecting a set of visual items, such as push-buttons or radio-buttons, without actually pointing to them. Instead, each potential target displays an animated point we call the driver. To select a specific item, the user only has to imitate the motion of its driver using the input device. Once the motion has been recognized by the system, the user can confirm the selection to trigger the action. We consider cyclic motions on an elliptic trajectory with a specific period, and study the most effective methods for real-time matching such a trajectory, as well as the range of parameters a human can reliably reproduce. We then show how to implement motion-pointing in real applications using an interaction technique we call move-and-stroke. Finally, we measure the input throughput and error rate of move-and-stroke in a controlled experiment. We show that the selection time is linearly proportional to the number of input bits conveyed up to 6 bits, confirming that motion-pointing is a practical input method.}, keywords = {} } We present a novel method called motion-pointing for selecting a set of visual items, such as push-buttons or radio-buttons, without actually pointing to them. Instead, each potential target displays an animated point we call the driver. To select a specific item, the user only has to imitate the motion of its driver using the input device. Once the motion has been recognized by the system, the user can confirm the selection to trigger the action. We consider cyclic motions on an elliptic trajectory with a specific period, and study the most effective methods for real-time matching such a trajectory, as well as the range of parameters a human can reliably reproduce. We then show how to implement motion-pointing in real applications using an interaction technique we call move-and-stroke. Finally, we measure the input throughput and error rate of move-and-stroke in a controlled experiment. We show that the selection time is linearly proportional to the number of input bits conveyed up to 6 bits, confirming that motion-pointing is a practical input method. |
2008 | |
15. | Niklas Elmqvist, Jean-Daniel Fekete (2008): Semantic Pointing for Object Picking in Complex 3D Environments. Proceedings of Graphics Interface, pp. 243–250, 2008. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2008f, title = {Semantic Pointing for Object Picking in Complex 3D Environments}, author = {Niklas Elmqvist and Jean-Daniel Fekete}, url = {http://www.umiacs.umd.edu/~elm/projects/sempoint3d/sempoint3d.pdf, Paper https://www.youtube.com/watch?v=Ebv7QG0Z6lM, Youtube video}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of Graphics Interface}, pages = {243--250}, abstract = {Today\'s large and high-resolution displays coupled with powerful graphics hardware offer the potential for highly realistic 3D virtual environments, but also cause increased target acquisition difficulty for users interacting with these environments. We present an adaptation of semantic pointing to object picking in 3D environments. Essentially, semantic picking shrinks empty space and expands potential targets on the screen by dynamically adjusting the ratio between movement in visual space and motor space for relative input devices such as the mouse. Our implementation operates in the image-space using a hierarchical representation of the standard stencil buffer to allow for real-time calculation of the closest targets for all positions on the screen. An informal user study indicates that subjects perform more accurate pointing with semantic 3D pointing than without.}, keywords = {} } Today's large and high-resolution displays coupled with powerful graphics hardware offer the potential for highly realistic 3D virtual environments, but also cause increased target acquisition difficulty for users interacting with these environments. We present an adaptation of semantic pointing to object picking in 3D environments. Essentially, semantic picking shrinks empty space and expands potential targets on the screen by dynamically adjusting the ratio between movement in visual space and motor space for relative input devices such as the mouse. Our implementation operates in the image-space using a hierarchical representation of the standard stencil buffer to allow for real-time calculation of the closest targets for all positions on the screen. An informal user study indicates that subjects perform more accurate pointing with semantic 3D pointing than without. |
14. | Niklas Elmqvist, Nathalie Henry, Yann Riche, Jean-Daniel Fekete (2008): Mélange: Space Folding for Multi-Focus Interaction. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 1333–1342, 2008. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2008d, title = {Mélange: Space Folding for Multi-Focus Interaction}, author = {Niklas Elmqvist and Nathalie Henry and Yann Riche and Jean-Daniel Fekete}, url = {http://www.umiacs.umd.edu/~elm/projects/melange/melange.pdf, Paper https://www.youtube.com/watch?v=I1KiO1iZ1DI, Youtube video http://www.slideshare.net/NickElm/melange-space-folding-for-multifocus-interaction, Slides}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {1333--1342}, abstract = {Interaction and navigation in large geometric spaces typically require a sequence of pan and zoom actions. This strategy is often ineffective and cumbersome, especially when trying to study several distant objects. We propose a new distortion technique that folds the intervening space to guarantee visibility of multiple focus regions. The folds themselves show contextual information and support unfolding and paging interactions. Compared to previous work, our method provides more context and distance awareness. We conducted a study comparing the space-folding technique to existing approaches, and found that participants performed significantly better with the new technique.}, keywords = {} } Interaction and navigation in large geometric spaces typically require a sequence of pan and zoom actions. This strategy is often ineffective and cumbersome, especially when trying to study several distant objects. We propose a new distortion technique that folds the intervening space to guarantee visibility of multiple focus regions. The folds themselves show contextual information and support unfolding and paging interactions. Compared to previous work, our method provides more context and distance awareness. We conducted a study comparing the space-folding technique to existing approaches, and found that participants performed significantly better with the new technique. |
13. | Niklas Elmqvist, Mihail Eduard Tudoreanu, Philippas Tsigas (2008): Evaluating Motion Constraints for 3D Wayfinding in Immersive and Desktop Virtual Environments. Proceedings of the ACM Conference on Human Factors in Computing Systems, pp. 1769–1778, 2008. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2008c, title = {Evaluating Motion Constraints for 3D Wayfinding in Immersive and Desktop Virtual Environments}, author = {Niklas Elmqvist and Mihail Eduard Tudoreanu and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/motcon/motcon.pdf, Paper https://www.youtube.com/watch?v=LRVTyoeuhpo, Youtube video http://www.slideshare.net/NickElm/evaluating-motion-constraints-for-3d-wayfinding-in-immersive-and-desktop-virtual-environments, Slides}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems}, pages = {1769--1778}, abstract = {Motion constraints providing guidance for 3D navigation have recently been suggested as a way of offloading some of the cognitive effort of traversing complex 3D environments on a computer. We present findings from an evaluation of the benefits of this practice where users achieved significantly better results in memory recall and performance when given access to such a guidance method. The study was conducted on both standard desktop computers with mouse and keyboard, as well as on an immersive CAVE system. Interestingly, our results also show that the improvements were more dramatic for desktop users than for CAVE users, even outperforming the latter. Furthermore, the study indicates that allowing the users to retain local control over the navigation on the desktop platform helps them in familiarizing themselves with the 3D world.}, keywords = {} } Motion constraints providing guidance for 3D navigation have recently been suggested as a way of offloading some of the cognitive effort of traversing complex 3D environments on a computer. We present findings from an evaluation of the benefits of this practice where users achieved significantly better results in memory recall and performance when given access to such a guidance method. The study was conducted on both standard desktop computers with mouse and keyboard, as well as on an immersive CAVE system. Interestingly, our results also show that the improvements were more dramatic for desktop users than for CAVE users, even outperforming the latter. Furthermore, the study indicates that allowing the users to retain local control over the navigation on the desktop platform helps them in familiarizing themselves with the 3D world. |
12. | Niklas Elmqvist, Thanh-Nghi Do, Howard Goodell, Nathalie Henry, Jean-Daniel Fekete (2008): ZAME: Interactive Large-Scale Graph Visualization. Proceedings of the IEEE Pacific Symposium on Visualization, pp. 215–222, 2008. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2008b, title = {ZAME: Interactive Large-Scale Graph Visualization}, author = {Niklas Elmqvist and Thanh-Nghi Do and Howard Goodell and Nathalie Henry and Jean-Daniel Fekete}, url = {http://www.umiacs.umd.edu/~elm/projects/zame/zame.pdf, Paper https://www.youtube.com/watch?v=Zr25Lt_pmfw, Youtube video}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the IEEE Pacific Symposium on Visualization}, pages = {215--222}, abstract = {We present the Zoomable Adjacency Matrix Explorer (ZAME), a visualization tool for exploring graphs at a scale of millions of nodes and edges. ZAME is based on an adjacency matrix graph representation aggregated at multiple scales. It allows analysts to explore a graph at many levels, zooming and panning with interactive performance from an overview to the most detailed views. Several components work together in the ZAME tool to make this possible. Efficient matrix ordering algorithms group related elements. Individual data cases are aggregated into higher-order meta representations. Aggregates are arranged into a pyramid hierarchy that allows for on-demand paging to GPU shader programs to support smooth multiscale browsing. Using ZAME, we are able to explore the entire French Wikipedia---over 500,000 articles and 6,000,000 links---with interactive performance on standard consumer-level computer hardware.}, keywords = {} } We present the Zoomable Adjacency Matrix Explorer (ZAME), a visualization tool for exploring graphs at a scale of millions of nodes and edges. ZAME is based on an adjacency matrix graph representation aggregated at multiple scales. It allows analysts to explore a graph at many levels, zooming and panning with interactive performance from an overview to the most detailed views. Several components work together in the ZAME tool to make this possible. Efficient matrix ordering algorithms group related elements. Individual data cases are aggregated into higher-order meta representations. Aggregates are arranged into a pyramid hierarchy that allows for on-demand paging to GPU shader programs to support smooth multiscale browsing. Using ZAME, we are able to explore the entire French Wikipedia---over 500,000 articles and 6,000,000 links---with interactive performance on standard consumer-level computer hardware. |
2007 | |
11. | Niklas Elmqvist, Mihail Eduard Tudoreanu, Philippas Tsigas (2007): Tour Generation for Exploration of 3D Virtual Environments. Proceedings of the ACM Symposium on Virtual Reality Software and Technology, pp. 207–210, 2007. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2007i, title = {Tour Generation for Exploration of 3D Virtual Environments}, author = {Niklas Elmqvist and Mihail Eduard Tudoreanu and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/tourgen/tourgen.pdf, Paper https://www.youtube.com/watch?v=LRVTyoeuhpo, Youtube video}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology}, pages = {207--210}, abstract = {Navigation in complex and large-scale 3D virtual environments has been shown to be a difficult task, imposing a high cognitive load on the user. In this paper, we present a comprehensive method for assisting users in exploring and understanding such 3D worlds. The method consists of two distinct phases: an off-line computation step deriving a grand tour using the world geometry and any semantic target information as input, and an on-line interactive navigation step providing guided exploration and improved spatial perception for the user. The former phase is based on a voxelized version of the geometrical dataset that is used to compute a connectivity graph for use in a TSP-like formulation of the problem. The latter phase takes the output tour from the off-line step as input for guiding 3D navigation through the environment.}, keywords = {} } Navigation in complex and large-scale 3D virtual environments has been shown to be a difficult task, imposing a high cognitive load on the user. In this paper, we present a comprehensive method for assisting users in exploring and understanding such 3D worlds. The method consists of two distinct phases: an off-line computation step deriving a grand tour using the world geometry and any semantic target information as input, and an on-line interactive navigation step providing guided exploration and improved spatial perception for the user. The former phase is based on a voxelized version of the geometrical dataset that is used to compute a connectivity graph for use in a TSP-like formulation of the problem. The latter phase takes the output tour from the off-line step as input for guiding 3D navigation through the environment. |
10. | Niklas Elmqvist, John Stasko, Philippas Tsigas (2007): DataMeadow: A Visual Canvas for Analysis of Large-Scale Multivariate Data. Proceedings of the IEEE Symposium on Visual Analytics Science and Technology, pp. 187–194, 2007. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2007h, title = {DataMeadow: A Visual Canvas for Analysis of Large-Scale Multivariate Data}, author = {Niklas Elmqvist and John Stasko and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/datameadow/datameadow.pdf, Paper https://www.youtube.com/watch?v=FO2MsmtWX_4, Youtube video http://www.slideshare.net/NickElm/datameadow-a-visual-canvas-for-analysis-of-largescale-multivariate-data, Slides}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the IEEE Symposium on Visual Analytics Science and Technology}, pages = {187--194}, abstract = {Supporting visual analytics of multiple large-scale multidimensional datasets requires a high degree of interactivity and user control beyond the conventional challenges of visualizing such datasets. We present the DataMeadow, a visual canvas providing rich interaction for constructing visual queries using graphical set representations called DataRoses. A DataRose is essentially a starplot of selected columns in a dataset displayed as multivariate visualizations with dynamic query sliders integrated into each axis. The purpose of the DataMeadow is to allow users to create advanced visual queries by iteratively selecting and filtering into the multidimensional data. Furthermore, the canvas provides a clear history of the analysis that can be annotated to facilitate dissemination of analytical results to outsiders. Towards this end, the DataMeadow has a direct manipulation interface for selection, filtering, and creation of sets, subsets, and data dependencies using both simple and complex mouse gestures. We have evaluated our system using a qualitative expert review involving two researchers working in the area. Results from this review are favorable for our new method.}, keywords = {} } Supporting visual analytics of multiple large-scale multidimensional datasets requires a high degree of interactivity and user control beyond the conventional challenges of visualizing such datasets. We present the DataMeadow, a visual canvas providing rich interaction for constructing visual queries using graphical set representations called DataRoses. A DataRose is essentially a starplot of selected columns in a dataset displayed as multivariate visualizations with dynamic query sliders integrated into each axis. The purpose of the DataMeadow is to allow users to create advanced visual queries by iteratively selecting and filtering into the multidimensional data. Furthermore, the canvas provides a clear history of the analysis that can be annotated to facilitate dissemination of analytical results to outsiders. Towards this end, the DataMeadow has a direct manipulation interface for selection, filtering, and creation of sets, subsets, and data dependencies using both simple and complex mouse gestures. We have evaluated our system using a qualitative expert review involving two researchers working in the area. Results from this review are favorable for our new method. |
9. | Niklas Elmqvist, Ulf Assarsson, Philippas Tsigas (2007): Employing Dynamic Transparency for 3D Occlusion Management: Design Issues and Evaluation. Proceedings of INTERACT, pp. 532–545, 2007. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2007b, title = {Employing Dynamic Transparency for 3D Occlusion Management: Design Issues and Evaluation}, author = {Niklas Elmqvist and Ulf Assarsson and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/dyntrans/dyntrans.pdf, Paper https://www.youtube.com/watch?v=77N5KVbbEmQ, Youtube video http://www.slideshare.net/NickElm/employing-dynamic-transparency-for-3d-occlusion-management-design-issues-and-evaluation, Slides}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of INTERACT}, pages = {532--545}, abstract = {Recent developments in occlusion management for 3D environments often involve the use of dynamic transparency, or virtual \"X-ray vision\", to promote target discovery and access in complex 3D worlds. However, there are many different approaches to achieving this effect and their actual utility for the user has yet to be evaluated. Furthermore, the introduction of semi-transparent surfaces adds additional visual complexity that may actually have a negative impact on task performance. In this paper, we report on an empirical user study comparing dynamic transparency to standard viewpoint controls. Our implementation of the technique is an image-space algorithm built using modern programmable shaders to achieve real-time performance and visually pleasing results. Results from the user study indicate that dynamic transparency is superior for perceptual tasks in terms of both efficiency and correctness.}, keywords = {} } Recent developments in occlusion management for 3D environments often involve the use of dynamic transparency, or virtual "X-ray vision", to promote target discovery and access in complex 3D worlds. However, there are many different approaches to achieving this effect and their actual utility for the user has yet to be evaluated. Furthermore, the introduction of semi-transparent surfaces adds additional visual complexity that may actually have a negative impact on task performance. In this paper, we report on an empirical user study comparing dynamic transparency to standard viewpoint controls. Our implementation of the technique is an image-space algorithm built using modern programmable shaders to achieve real-time performance and visually pleasing results. Results from the user study indicate that dynamic transparency is superior for perceptual tasks in terms of both efficiency and correctness. |
8. | Niklas Elmqvist, Philippas Tsigas (2007): TrustNeighborhoods: Visualizing Trust in Distributed File Systems. Proceedings of the Eurographics/IEEE VGTC Symposium on Visualization, pp. 107–114, 2007. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2007e, title = {TrustNeighborhoods: Visualizing Trust in Distributed File Systems}, author = {Niklas Elmqvist and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/trustvis/trustvis.pdf, Paper}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the Eurographics/IEEE VGTC Symposium on Visualization}, pages = {107--114}, abstract = {We present TrustNeighborhoods, a security trust visualization for situational awareness on the Internet aimed at novice and intermediate users of a distributed file sharing system. The TrustNeighborhoods technique uses the metaphor of a multi-layered city or fortress to intuitively represent trust as a simple geographic relation. The visualization uses a radial space-filling layout; there is a 2D mode for editing and configuration, as well as a 3D mode for exploration and overview. In addition, the 3D mode supports a simple animated \"fly-to\" command that is intended to show the user the context and trust of a particular document by zooming in on the document and its immediate neighborhood in the 3D city. The visualization is intended for integration into an existing desktop environment, connecting to the distributed file sharing mechanisms of the environment and non-obtrusively displaying a 3D orientation animation in the background for any file being accessed over the network. A formal user study shows that the technique supports significantly higher trust assignment accuracy than manual trust assignment at the cost of only a minor time investment.}, keywords = {} } We present TrustNeighborhoods, a security trust visualization for situational awareness on the Internet aimed at novice and intermediate users of a distributed file sharing system. The TrustNeighborhoods technique uses the metaphor of a multi-layered city or fortress to intuitively represent trust as a simple geographic relation. The visualization uses a radial space-filling layout; there is a 2D mode for editing and configuration, as well as a 3D mode for exploration and overview. In addition, the 3D mode supports a simple animated "fly-to" command that is intended to show the user the context and trust of a particular document by zooming in on the document and its immediate neighborhood in the 3D city. The visualization is intended for integration into an existing desktop environment, connecting to the distributed file sharing mechanisms of the environment and non-obtrusively displaying a 3D orientation animation in the background for any file being accessed over the network. A formal user study shows that the technique supports significantly higher trust assignment accuracy than manual trust assignment at the cost of only a minor time investment. |
7. | Niklas Elmqvist, Philippas Tsigas (2007): A Taxonomy of 3D Occlusion Management Techniques. Proceedings of the IEEE Conference on Virtual Reality, pp. 51–58, 2007. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2007f, title = {A Taxonomy of 3D Occlusion Management Techniques}, author = {Niklas Elmqvist and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/occmgt/occmgt.pdf, Paper}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the IEEE Conference on Virtual Reality}, pages = {51--58}, abstract = {While an important factor in depth perception, the occlusion effect in 3D environments also has a detrimental impact on tasks involving discovery, access, and spatial relation of objects in a 3D visualization. A number of interactive techniques have been developed in recent years to directly or indirectly deal with this problem using a wide range of different approaches. In this paper, we build on previous work on mapping out the problem space of 3D occlusion by defining a taxonomy of the design space of occlusion management techniques in an effort to formalize a common terminology and theoretical framework for this class of interactions. We classify a total of 25 different techniques for occlusion management using our taxonomy and then go on to analyze the results, deriving a set of five orthogonal design patterns for effective reduction of 3D occlusion. We also discuss the \"gaps\" in the design space, areas of the taxonomy not yet populated with existing techniques, and use these to suggest future research directions into occlusion management.}, keywords = {} } While an important factor in depth perception, the occlusion effect in 3D environments also has a detrimental impact on tasks involving discovery, access, and spatial relation of objects in a 3D visualization. A number of interactive techniques have been developed in recent years to directly or indirectly deal with this problem using a wide range of different approaches. In this paper, we build on previous work on mapping out the problem space of 3D occlusion by defining a taxonomy of the design space of occlusion management techniques in an effort to formalize a common terminology and theoretical framework for this class of interactions. We classify a total of 25 different techniques for occlusion management using our taxonomy and then go on to analyze the results, deriving a set of five orthogonal design patterns for effective reduction of 3D occlusion. We also discuss the "gaps" in the design space, areas of the taxonomy not yet populated with existing techniques, and use these to suggest future research directions into occlusion management. |
2006 | |
6. | Niklas Elmqvist, Mihail Eduard Tudoreanu (2006): Evaluating the Effectiveness of Occlusion Reduction Techniques for 3D Virtual Environments. Proceedings of the ACM Symposium on Virtual Reality Software and Technology, pp. 9-18, 2006. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2006a, title = {Evaluating the Effectiveness of Occlusion Reduction Techniques for 3D Virtual Environments}, author = {Niklas Elmqvist and Mihail Eduard Tudoreanu}, url = {http://www.umiacs.umd.edu/~elm/projects/balloonprobe/balloonprobe-full.pdf, Paper https://www.youtube.com/watch?v=ynqG3JE6744, Youtube video}, year = {2006}, date = {2006-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology}, pages = {9-18}, abstract = {We present an empirical usability experiment studying the relative strengths and weaknesses of three different occlusion reduction techniques for discovering and accessing objects in information-rich 3D virtual environments. More specifically, the study compares standard 3D navigation, generalized fisheye techniques using object scaling and transparency, and the BalloonProbe interactive 3D space distortion technique. Subjects are asked to complete a number of different tasks, including counting, pattern recognition, and object relation, in different kinds of environments with various properties. The environments include a free-space abstract 3D environment and a virtual 3D walkthrough application for a simple building floor. The study involved 16 subjects and was conducted in a three-sided CAVE environment. Our results confirm the general guideline that each task calls for a specialized interaction---no single technique performed best across all tasks and worlds. The results also indicate a clear trade-off between speed and accuracy; simple navigation was the fastest but also most error-prone technique, whereas spherical BalloonProbe proved the most accurate but required longer completion time, making it suitable for applications where mistakes incur a high cost.}, keywords = {} } We present an empirical usability experiment studying the relative strengths and weaknesses of three different occlusion reduction techniques for discovering and accessing objects in information-rich 3D virtual environments. More specifically, the study compares standard 3D navigation, generalized fisheye techniques using object scaling and transparency, and the BalloonProbe interactive 3D space distortion technique. Subjects are asked to complete a number of different tasks, including counting, pattern recognition, and object relation, in different kinds of environments with various properties. The environments include a free-space abstract 3D environment and a virtual 3D walkthrough application for a simple building floor. The study involved 16 subjects and was conducted in a three-sided CAVE environment. Our results confirm the general guideline that each task calls for a specialized interaction---no single technique performed best across all tasks and worlds. The results also indicate a clear trade-off between speed and accuracy; simple navigation was the fastest but also most error-prone technique, whereas spherical BalloonProbe proved the most accurate but required longer completion time, making it suitable for applications where mistakes incur a high cost. |
5. | Niklas Elmqvist, Philippas Tsigas (2006): View Projection Animation for Occlusion Reduction. Proceedings of the ACM Conference on Advanced Visual Interfaces, pp. 471–475, 2006. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2006b, title = {View Projection Animation for Occlusion Reduction}, author = {Niklas Elmqvist and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/pmorph/pmorph.pdf, Paper}, year = {2006}, date = {2006-01-01}, booktitle = {Proceedings of the ACM Conference on Advanced Visual Interfaces}, pages = {471--475}, abstract = {Inter-object occlusion is inherent to 3D environments and is one of the challenges of using 3D instead of 2D computer graphics for information visualization. In this paper, we examine this occlusion problem by building a theoretical framework of its causes and components. As a result of this analysis, we present an interaction technique for view projection animation that reduces inter-object occlusion in 3D environments without modifying the geometrical properties of the objects themselves. The technique provides smooth on-demand animation between parallel and perspective projection modes as well as online manipulation of view parameters, allowing the user to quickly and easily adapt the view to avoid occlusion. A user study indicates that the technique significantly improves object discovery over normal perspective views. We have also implemented a prototype of the technique in the Blender 3D modeller.}, keywords = {} } Inter-object occlusion is inherent to 3D environments and is one of the challenges of using 3D instead of 2D computer graphics for information visualization. In this paper, we examine this occlusion problem by building a theoretical framework of its causes and components. As a result of this analysis, we present an interaction technique for view projection animation that reduces inter-object occlusion in 3D environments without modifying the geometrical properties of the objects themselves. The technique provides smooth on-demand animation between parallel and perspective projection modes as well as online manipulation of view parameters, allowing the user to quickly and easily adapt the view to avoid occlusion. A user study indicates that the technique significantly improves object discovery over normal perspective views. We have also implemented a prototype of the technique in the Blender 3D modeller. |
4. | Samuel Sandberg, Calle Håkansson, Niklas Elmqvist, Philippas Tsigas, Fang Chen (2006): Using 3D Audio Guidance to Locate Indoor Static Objects. Proceedings of the Human Factors and Ergonomics Society Annual Meeting, pp. 1581–1584, 2006. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Sandberg2006a, title = {Using 3D Audio Guidance to Locate Indoor Static Objects}, author = {Samuel Sandberg and Calle Håkansson and Niklas Elmqvist and Philippas Tsigas and Fang Chen}, url = {http://www.umiacs.umd.edu/~elm/projects/3daudio/3daudio.pdf, Paper}, year = {2006}, date = {2006-01-01}, booktitle = {Proceedings of the Human Factors and Ergonomics Society Annual Meeting}, pages = {1581--1584}, abstract = {Is 3D audio an interesting technology for displaying navigational information in an indoor environment? This study found no significant differences between map- and 3D audio navigation. The user tasks tested involved finding objects in a real office environment. In order to conduct the study, a custom-made 3D audio system was built based on a public-domain HRTF-library to playback 3D sound beacons through a pair of earphones. Our results indicate that 3D audio is indeed a qualified candidate for navigation systems, and may be especially suitable for environments or individuals where vision is obstructed, insufficient, or unavailable. The study also suggests that special cues should be added to the pure spatial information to emphasize important information.}, keywords = {} } Is 3D audio an interesting technology for displaying navigational information in an indoor environment? This study found no significant differences between map- and 3D audio navigation. The user tasks tested involved finding objects in a real office environment. In order to conduct the study, a custom-made 3D audio system was built based on a public-domain HRTF-library to playback 3D sound beacons through a pair of earphones. Our results indicate that 3D audio is indeed a qualified candidate for navigation systems, and may be especially suitable for environments or individuals where vision is obstructed, insufficient, or unavailable. The study also suggests that special cues should be added to the pure spatial information to emphasize important information. |
2005 | |
3. | Niklas Elmqvist (2005): BalloonProbe: Reducing Occlusion in 3D using Interactive Space Distortion. Proceedings of the ACM Symposium on Virtual Reality Software and Technology, pp. 134–137, 2005. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2005a, title = {BalloonProbe: Reducing Occlusion in 3D using Interactive Space Distortion}, author = {Niklas Elmqvist}, url = {http://www.umiacs.umd.edu/~elm/projects/balloonprobe/balloonprobe.pdf, Paper https://www.youtube.com/watch?v=ynqG3JE6744, Youtube video}, year = {2005}, date = {2005-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology}, pages = {134--137}, abstract = {Using a 3D virtual environment for information visualization is a promising approach, but can in many cases be plagued by a phenomenon of literally not being able to see the forest for the trees. Some parts of the 3D visualization will inevitably occlude other parts, leading both to loss of efficiency and, more seriously, correctness; users may have to change their viewpoint in a non-trivial way to be able to access hidden objects, and, worse, they may not even discover some of the objects in the visualization due to this inter-object occlusion. In this paper, we present a space distortion interaction technique called the BalloonProbe which, on the user’s command, inflates a spherical force field that repels objects around the 3D cursor to the surface of the sphere, separating occluding objects from each other. Inflating and deflating the sphere is performed through smooth animation, ghosted traces showing the displacement of each repelled object. Our prototype implementation uses a 3D cursor for positioning as well as for inflating and deflating the force field \"balloon\". Informal testing suggests that the BalloonProbe is a powerful way of giving users interactive control over occlusion in 3D visualizations.}, keywords = {} } Using a 3D virtual environment for information visualization is a promising approach, but can in many cases be plagued by a phenomenon of literally not being able to see the forest for the trees. Some parts of the 3D visualization will inevitably occlude other parts, leading both to loss of efficiency and, more seriously, correctness; users may have to change their viewpoint in a non-trivial way to be able to access hidden objects, and, worse, they may not even discover some of the objects in the visualization due to this inter-object occlusion. In this paper, we present a space distortion interaction technique called the BalloonProbe which, on the user’s command, inflates a spherical force field that repels objects around the 3D cursor to the surface of the sphere, separating occluding objects from each other. Inflating and deflating the sphere is performed through smooth animation, ghosted traces showing the displacement of each repelled object. Our prototype implementation uses a 3D cursor for positioning as well as for inflating and deflating the force field "balloon". Informal testing suggests that the BalloonProbe is a powerful way of giving users interactive control over occlusion in 3D visualizations. |
2003 | |
2. | Niklas Elmqvist, Philippas Tsigas (2003): Causality Visualization Using Animated Growing Polygons. Proceedings of the IEEE Symposium on Information Visualization, pp. 189–196, 2003. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2003b, title = {Causality Visualization Using Animated Growing Polygons}, author = {Niklas Elmqvist and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/causality/growing-polys.pdf, Paper}, year = {2003}, date = {2003-01-01}, booktitle = {Proceedings of the IEEE Symposium on Information Visualization}, pages = {189--196}, abstract = {We present Growing Polygons, a novel visualization technique for the graphical representation of causal relations and information flow in a system of interacting processes. Using this method, individual processes are displayed as partitioned polygons with color-coded segments showing dependencies to other processes. The entire visualization is also animated to communicate the dynamic execution of the system to the user. The results from a comparative user study of the method show that the Growing Polygons technique is significantly more efficient than the traditional Hasse diagram visualization for analysis tasks related to deducing information flow in a system for both small and large executions. Furthermore, our findings indicate that the correctness when solving causality tasks is significantly improved using our method. In addition, the subjective ratings of the users rank the method as superior in all regards, including usability, efficiency, and enjoyability.}, keywords = {} } We present Growing Polygons, a novel visualization technique for the graphical representation of causal relations and information flow in a system of interacting processes. Using this method, individual processes are displayed as partitioned polygons with color-coded segments showing dependencies to other processes. The entire visualization is also animated to communicate the dynamic execution of the system to the user. The results from a comparative user study of the method show that the Growing Polygons technique is significantly more efficient than the traditional Hasse diagram visualization for analysis tasks related to deducing information flow in a system for both small and large executions. Furthermore, our findings indicate that the correctness when solving causality tasks is significantly improved using our method. In addition, the subjective ratings of the users rank the method as superior in all regards, including usability, efficiency, and enjoyability. |
1. | Niklas Elmqvist, Philippas Tsigas (2003): Growing Squares: Animated Visualization of Causal Relations. Proceedings of the ACM Symposium on Software Visualization, pp. 17–26, 2003. (Type: Inproceeding | Abstract | Links | BibTeX) @inproceedings{Elmqvist2003a, title = {Growing Squares: Animated Visualization of Causal Relations}, author = {Niklas Elmqvist and Philippas Tsigas}, url = {http://www.umiacs.umd.edu/~elm/projects/causality/causalviz.pdf, Paper}, year = {2003}, date = {2003-01-01}, booktitle = {Proceedings of the ACM Symposium on Software Visualization}, pages = {17--26}, abstract = {We present a novel information visualization technique for the graphical representation of causal relations, that is based on the metaphor of color pools spreading over time on a piece of paper. Messages between processes in the system affect the colors of their respective pool, making it possible to quickly see the influences each process has received. This technique, called Growing Squares, has been evaluated in a comparative user study and shown to be significantly faster and more efficient for sparse data sets than the traditional Hasse diagram visualization. Growing Squares were also more efficient for large data sets, but not significantly so. Test subjects clearly favored Growing Squares over old methods, naming the new technique easier, more efficient, and much more enjoyable to use.}, keywords = {} } We present a novel information visualization technique for the graphical representation of causal relations, that is based on the metaphor of color pools spreading over time on a piece of paper. Messages between processes in the system affect the colors of their respective pool, making it possible to quickly see the influences each process has received. This technique, called Growing Squares, has been evaluated in a comparative user study and shown to be significantly faster and more efficient for sparse data sets than the traditional Hasse diagram visualization. Growing Squares were also more efficient for large data sets, but not significantly so. Test subjects clearly favored Growing Squares over old methods, naming the new technique easier, more efficient, and much more enjoyable to use. |