Sorry, no publications matched your criteria.
Ahrens, James; Brislawn, Kristi; Martin, Ken; Geveci, Berk; Law, Charles; Papka, Michael
Large-scale data visualization using parallel data streaming Journal Article
In: Computer Graphics and Applications, IEEE, vol. 21, no. 4, pp. 34–41, 2001, (LA-UR-01-0970).
@article{ahrens2001large,
title = {Large-scale data visualization using parallel data streaming},
author = {James Ahrens and Kristi Brislawn and Ken Martin and Berk Geveci and Charles Law and Michael Papka},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/LargeScaleDataVisualizationUsingParallelDataStreaming.pdf},
year = {2001},
date = {2001-01-01},
journal = {Computer Graphics and Applications, IEEE},
volume = {21},
number = {4},
pages = {34--41},
publisher = {IEEE},
abstract = {Effective large-scale data visualization remains a significant and important challenge with analysis codes already producing terabyte results on clusters with thousands of processors. Frequently the analysis codes produce distributed data and consume a significant portion of the available memory per node. This paper presents an architectural approach to handling these visualization problems based on mixed dataset topology parallel data streaming. This enables visualizations on a parallel cluster that would normally require more storage/memory than is available while at the same time achieving high code reuse. Results from a variety of hardware and visualization configurations are discussed with data sizes ranging near to a petabyte.},
note = {LA-UR-01-0970},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Keahey, Alan; McCormick, Patrick; Ahrens, James; Keahey, Katarzyna
Qviz: a framework for querying and visualizing data Proceedings Article
In: Photonics West 2001-Electronic Imaging, pp. 259–267, International Society for Optics and Photonics 2001, (LA-UR-00-6116).
@inproceedings{keahey2001qviz,
title = {Qviz: a framework for querying and visualizing data},
author = {Alan Keahey and Patrick McCormick and James Ahrens and Katarzyna Keahey},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/QvizAFrameworkForQueryingAndVisualizaingData.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Photonics West 2001-Electronic Imaging},
pages = {259--267},
organization = {International Society for Optics and Photonics},
abstract = {Qviz is a lightweight, modular, and easy to use parallel system for interactive analytical query processing and visual presentation of large datasets. Qviz allows queries of arbitrary complexity to be easily constructed using a specialized scripting language. Visual presentation of the results is also easily achived via simple scripted and interactive commands to our query-specific visualization tools. This paper describes our initial experiences with the Qviz system for querying and visualizing scientific datasets, showing how Qviz has been used in two different applications: ocean modeling and linear accelerator simulations.},
note = {LA-UR-00-6116},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Keahey, Katarzyna; Beckman, Peter; Ahrens, James
Ligature: Component architecture for high performance applications Journal Article
In: International Journal of High Performance Computing Applications, vol. 14, no. 4, pp. 347–356, 2000, (LA-UR-00-1519).
@article{keahey2000ligature,
title = {Ligature: Component architecture for high performance applications},
author = {Katarzyna Keahey and Peter Beckman and James Ahrens},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/LigatureComponentArchitectureForHigh-PerformanceApplications.pdf},
year = {2000},
date = {2000-01-01},
journal = {International Journal of High Performance Computing Applications},
volume = {14},
number = {4},
pages = {347--356},
publisher = {SAGE Publications},
abstract = {The increasing feasibility of developing applications spanning nationwide supercomputing resources makes pos- sible the creation of simulations composed of multiple in- terdisciplinary components and capable of modeling natu- ral and social phenomena of national importance with un- precedented speed and accuracy. However, the potential offered by hardware technology often fails to be fully re- alized due to the lack of software environments support- ing such efforts. Furthermore, the complexity of combin- ing within one application components with different per- formance characteristics often prevents such applications from achieving required performance levels. The Ligature project at LANL addresses the issue of designing a soft- ware infrastructure enabling fast and efficient development of multi-component applications, and that of providing per- formance guidance to the programmer using this infrastruc- ture. Ligature allows the programmer to define component interfaces specifying how heterogeneous, distributed com- ponents can interact within a larger system and provides a reusable infrastructure capable of connecting these com- ponents. These interfaces, as well as information about component performance are accessible through a database. Within this framework we are trying to understand how information about the performance of individual compo- nents, and information about performance of the framework can be combined to develop a performance-aware multi- component application.},
note = {LA-UR-00-1519},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ahrens, James; Law, Charles; Schroeder, Will; Martin, Ken; Papka, Michael
A Parallel Approach for Efficiently Visualizing Extremely Large, Time-Varying Datasets. Technical Report
2000, (LA-UR-00-1620).
@techreport{info:lanl-repo/lareport/LA-UR-00-1620,
title = {A Parallel Approach for Efficiently Visualizing Extremely Large, Time-Varying Datasets.},
author = {James Ahrens and Charles Law and Will Schroeder and Ken Martin and Michael Papka},
url = {http://datascience.dsscale.org/wp-content/uploads/2017/09/LA-UR-00-1620.pdf},
year = {2000},
date = {2000-01-01},
abstract = {A significant unsolved problem in scientific visualization is how to efficiently visualize extremely large time-varying datasets. Using parallelism provides a promising solution. One drawback of this approach is the high overhead and specialized knowledge often required to create parallel visualization programs. In this paper, we present a parallel visualization system that is scalable, portable and encapsulates parallel programming details for its users. Our approach was to augment an existing visualization system, the visualization toolkit(VTK). Process and communication abstractions were added in order to support task, pipeline and data parallelism. The resulting system allows users to quickly write parallel visualization programs and avoid rewriting these programs when porting to new platforms. The performance of a collection of parallel visualization programs written using this system and run on both a cluster of SGI Origin 2000s and a Linux-based PC cluster is presented. In addition to showing the utility of our approach, the results offer a comparison of the performance of commodity-based computing clusters.},
howpublished = {IEEE/VISUALIZATION CONF. ; 200010 ; SALT LAKE CITY},
note = {LA-UR-00-1620},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Ahrens, James; Painter, James
Efficient sort-last rendering using compression-based image compositing Proceedings Article
In: Proceedings of the 2nd Eurographics Workshop on Parallel Graphics and Visualization, pp. 145–151, Citeseer 1998, (LA-UR-98-2968).
@inproceedings{ahrens1998efficient,
title = {Efficient sort-last rendering using compression-based image compositing},
author = {James Ahrens and James Painter},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/EfficientSort-LastRenderingUsingCompression-BasedImageCompositing.pdf},
year = {1998},
date = {1998-01-01},
booktitle = {Proceedings of the 2nd Eurographics Workshop on Parallel Graphics and Visualization},
pages = {145--151},
organization = {Citeseer},
abstract = {State of the art scientific simulations are currently working with data set sizes on the order of a billion cells. Parallel rendering is a promising approach for interactively visualizing multiple isosurface variables from data sets of this magnitude. In sort-last rendering, each processor creates a depth buffered image of its assigned objects. All processors’ images are composited together to create a final result. Improving the efficiency of this compositing step is key to interactive parallel rendering. This paper presents a compression-based image compositing algorithm which can provide significant savings in both communication and compositing costs.},
note = {LA-UR-98-2968},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McCormick, Patrick; Ahrens, James
Visualization of wildfire simulations Journal Article
In: Computer Graphics and Applications, IEEE, vol. 18, no. 2, pp. 17–19, 1998, (LA-UR-98-0646).
@article{mccormick1998visualization,
title = {Visualization of wildfire simulations},
author = {Patrick McCormick and James Ahrens },
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/VisualizationOfWildfireSimulations.pdf},
year = {1998},
date = {1998-01-01},
journal = {Computer Graphics and Applications, IEEE},
volume = {18},
number = {2},
pages = {17--19},
publisher = {IEEE},
abstract = {Newspaper headlines constantly remind us of the human and property losses we suffer from wildfires, severe storms, earthquakes, and other natural disasters. These disasters cost the United States hundreds of lives and billions of dollars annually. Scientists at Los Alamos National Laboratory are developing computer models to predict the evolution of such disasters. Predicting the course of these events in faster than real time permits developing management strategies to minimize their adverse consequences. Presently, the complexity of models that forecast crises requires the advanced computing systems available at Los Alamos. In the near future, these models will be adapted for use in planning, training, and operational situations, but will still require advanced computing systems to run.},
note = {LA-UR-98-0646},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ahrens, James; McCormick, Patrick; Bossert, James; Reisner, Jon; Winterkamp, Judith
Case study: Wildfire visualization Proceedings Article
In: Visualization'97., Proceedings, pp. 451–454, IEEE 1997, (LA-UR-97-2761).
@inproceedings{ahrens1997case,
title = {Case study: Wildfire visualization},
author = {James Ahrens and Patrick McCormick and James Bossert and Jon Reisner and Judith Winterkamp},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/CaseStudyWildfireVisualization.pdf},
year = {1997},
date = {1997-01-01},
booktitle = {Visualization'97., Proceedings},
pages = {451--454},
organization = {IEEE},
abstract = {The ability to forecast the progress of crisis events would significantly reduce human suffering and loss of life, the destruction of property, and expenditures for assessment and recovery. Los Alamos National Laboratory has established a scientific thrust in crisis forecasting to address this national challenge. In the initial phase of this project, scientists at Los Alamos are developing computer models to predict the spread of a wildfire. Visualization of the results of the wildfire simulation will be used by scientists to assess the quality of the simulation and eventually by fire personnel as a visual forecast of the wildfire’s evolution. The fire personnel and scientists want the visualization to look as realistic as possible without compromising scientific accuracy. This paper describes how the visualization was created, analyzes the tools and approach that was used, and suggests directions for future work and research.},
note = {LA-UR-97-2761},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jakobovits, Rex; Lewis, Lara; Ahrens, James; Shapiro, Linda; Tanimoto, Steven; Brinkley, James
A visual database environment for scientific research Journal Article
In: Journal of Visual Languages & Computing, vol. 7, no. 4, pp. 361–375, 1996, (LA-UR-pending).
@article{jakobovits1996visual,
title = {A visual database environment for scientific research},
author = {Rex Jakobovits and Lara Lewis and James Ahrens and Linda Shapiro and Steven Tanimoto and James Brinkley},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/AVisualDatabaseEnvironmentForScientificResearch.pdf},
year = {1996},
date = {1996-01-01},
journal = {Journal of Visual Languages & Computing},
volume = {7},
number = {4},
pages = {361--375},
publisher = {Elsevier},
abstract = {This paper describes a visual database environment designed to be used for scientific research in the imaging sciences. It provides hierarchical relational structures that allow the user to model data as entities possessing properties, parts and relationships, and it supports multi-level queries on these structures. A schema constructor interface allows users to define for each structure, not only its components, but also its visualization, which is built from its components using graphical primitives. Finally, an experiment management subsystem allows users to construct and run computa- tional experiments that apply imaging operators to data from the database. The experiment management system keeps track of the experimental procedures developed by the user and the results generated by executing these procedures.},
note = {LA-UR-pending},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ahrens, James; Hansen, Charles
Cost-effective data-parallel load balancing Proceedings Article
In: ICPP (2), pp. 218–221, 1995, (LA-UR-95-1462).
@inproceedings{ahrens1995cost,
title = {Cost-effective data-parallel load balancing},
author = {James Ahrens and Charles Hansen},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/Cost-EffectiveData-ParallelLoadBalancing.pdf},
year = {1995},
date = {1995-01-01},
booktitle = {ICPP (2)},
pages = {218--221},
abstract = {Load balancing algorithms improve a program’s performance on unbalanced datasets, but can degrade performance on balanced datasets, because unnecessary load redistributions occur. This paper presents a cost-effective data-parallel load balancing algorithm which performs load redistributions only when the possible savings outweigh the redistribution costs. Experiment s with a data-parallel polygon renderer show a performance improvement of up to a factor of 33 on unbalanced datasets and a maximum performance loss of only 27 percent on balanced datasets when using this algorithm.},
note = {LA-UR-95-1462},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Linda; Tanimoto, Steven; Brinkley, James; Ahrens, James; Jakobovits, Rex; Lewis, Lara
A visual database system for data and experiment management in model-based computer vision Proceedings Article
In: CAD-Based Vision Workshop, 1994., Proceedings of the 1994 Second, pp. 64–72, IEEE 1994, (LA-UR-pending).
@inproceedings{shapiro1994visual,
title = {A visual database system for data and experiment management in model-based computer vision},
author = {Linda Shapiro and Steven Tanimoto and James Brinkley and James Ahrens and Rex Jakobovits and Lara Lewis},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/AVisualDatabaseSystemForDataAndExperimentManagementInModel-BasedComputerVision.pdf},
year = {1994},
date = {1994-01-01},
booktitle = {CAD-Based Vision Workshop, 1994., Proceedings of the 1994 Second},
pages = {64--72},
organization = {IEEE},
abstract = {Computer vision researchers work with many different forms of data. Model-based vision systems work with geometric models of 3D objects, intensity or range images, and many different kinds of features that are extracted from these images. The recognition/pose estimation process involves a number of different steps and different operations all of which take in and generate various forms of data. Figure 1 illustrates the operations and data types required for a sample recognition process (Shapiro, Neal, and Ponder; 1992). The process starts with a gray-scale image and produces an edge image, a line segment structure, and a triple chain structure (described in Section 2). Each object in the model database is represented by a set of its major views, and each major view is represented by a triple chain structure. The triple chain structure that was extracted from the image and the set of triple chain structures representing the major views (view classes) are input to the matching algorithm which tries to identify the view class or classes that most closely match the view in the image. This process illustrates the kind of experiments that modelare simpler than the one shown, and some are much more complex.},
note = {LA-UR-pending},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ortega, Frank; Hansen, Charles; Ahrens, James
Fast Data Parallel Polygon Rendering Proceedings Article
In: Proceedings of the 1993 ACM/IEEE Conference on Supercomputing, pp. 709–718, ACM, Portland, Oregon, USA, 1993, ISBN: 0-8186-4340-4, (LA-UR-93-3173).
@inproceedings{Ortega:1993:FDP:169627.169820,
title = {Fast Data Parallel Polygon Rendering},
author = {Frank Ortega and Charles Hansen and James Ahrens},
url = {http://datascience.dsscale.org/wp-content/uploads/2016/06/FastDataParellelPolygonRendering.pdf
http://doi.acm.org/10.1145/169627.169820},
doi = {10.1145/169627.169820},
isbn = {0-8186-4340-4},
year = {1993},
date = {1993-01-01},
booktitle = {Proceedings of the 1993 ACM/IEEE Conference on Supercomputing},
pages = {709--718},
publisher = {ACM},
address = {Portland, Oregon, USA},
series = {Supercomputing '93},
abstract = {This paper describes a parallel method for polygonal rendering ona massively pamliel SIMD machine. This method, hazed on a simple shading model, is taqeted for applications which require very fad polygon rendering for extremely large sets of polygons such as is found in many scientific visualization applications, The algorithms described in this paper are incorpomted into a library of 9D gmphics routines written for the Connection Machine. The routines am implemented on both the CM-ZOO and the CM-5. This libmry enables a acientid to display $D shaded polygons directly jhm a pamllel machine without the need to tmnamit huge amounts of data to a pod-processing rendering system.},
note = {LA-UR-93-3173},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sane, Sudhanshu; Yenpure, Abhishek; Bujack, Roxana; Larsen, Matthew; Moreland, Ken; Garth, Christoph; Johnson, Chris; Childs, Hank
Scalable In Situ Computation of Lagrangian Representations via Local Flow Maps Journal Article
In: 0000.
@article{osti_1808167,
title = {Scalable In Situ Computation of Lagrangian Representations via Local Flow Maps},
author = {Sudhanshu Sane and Abhishek Yenpure and Roxana Bujack and Matthew Larsen and Ken Moreland and Christoph Garth and Chris Johnson and Hank Childs},
url = {http://www.informatik.uni-leipzig.de/~bujack/2021EGPGV.pdf},
abstract = {In situ computation of Lagrangian flow maps to enable post hoc time-varying vector field analysis has recently become an active area of research. However, the current literature is largely limited to theoretical settings and lacks a solution to address scalability of the technique in distributed memory. To improve scalability, we propose and evaluate the benefits and limitations of a simple, yet novel, performance optimization. Our proposed optimization is a communication-free model resulting in local Lagrangian flow maps, requiring no message passing or synchronization between processes, intrinsically improving scalability, and thereby reducing overall execution time and alleviating the encumbrance placed on simulation codes from communication overheads. To evaluate our approach, we computed Lagrangian flow maps for four time-varying simulation vector fields and investigated how execution time and reconstruction accuracy are impacted by the number of GPUs per compute node, the total number of compute nodes, particles per rank, and storage intervals. Our study consisted of experiments computing Lagrangian flow maps with up to 67M particle trajectories over 500 cycles and used as many as 2048 GPUs across 512 compute nodes. In all, our study contributes an evaluation of a communication-free model as well as a scalability study of computing distributed Lagrangian flow maps at scale using in situ infrastructure on a modern supercomputer.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}