[
{
"URL": "https://archive.org/details/Status_Line_The_Vol._VIII_No._1_1984-01_Infocom_US",
"author": [
{
"literal": "Infocom"
}
],
"id": "1989infocomStatusLine",
"issued": {
"date-parts": [
[
1989,
21
]
]
},
"keyword": "ejr-CV,mention",
"note": "Solved Puzzle #18.",
"title": "The Status Line",
"type": ""
},
{
"DOI": "10.1117/12.279618",
"abstract": "SIMD parallel systems have been employed for image processing and computer vision applications since their inception. This paper describes a system in which parallel programs are implemented using a machine-independent, retargetable object library that provides SIMD execution on the Lockheed Martin PAL-I SIMD parallel processor. Programs’ performance on this machine is improved through on-the-fly execution analysis and scheduling. We describe the relevant elements of the system structure, the general scheme for execution analysis, and the current cost model for scheduling.",
"author": [
{
"family": "Wilson",
"given": "Joseph N."
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"container-title": "Parallel and distributed methods for image processing",
"editor": [
{
"family": "Shi",
"given": "Hongchi"
},
{
"family": "Coffield",
"given": "Patrick C."
}
],
"id": "1997wilsonEfficientSIMDEvaluation",
"issued": {
"date-parts": [
[
1997,
7
]
]
},
"keyword": "ejr-CV,image algebra,parallel algorithm,proceedings",
"page": "199-210",
"publisher": "SPIE",
"publisher-place": "San Diego, CA",
"title": "Efficient SIMD evaluation of image processing programs",
"type": "paper-conference",
"volume": "3166"
},
{
"abstract": "The Tera Multithreaded Architecture, or MTA, addresses scalable shared memory system design with a difierent approach; it tolerates latency through providing fast access to multiple threads of execution. The MTA employs a number of radical design ideas: creation of hardware threads (streams) with frequent context switching; full-empty bits for each memory word; a flat memory hierarchy; and deep pipelines. Recent evaluations of the MTA have taken a top-down approach: port applications and application benchmarks, and compare the absolute performance with conventional systems. While useful, these studies do not reveal the effect of the Tera MTA’s unique hardware features on an application. We present a bottom-up approach to the evaluation of the MTA via a suite of microbenchmarks to examine in detail the underlying hardware mechanisms and the cost of runtime system support for multithreading. In particular, we measure memory, network, and instruction latencies; memory bandwidth; the cost of low-level synchronization via full-empty bits; overhead for stream management; and the effects of software pipelining. These data should provide a foundation for performance modeling on the MTA. We also present results for list ranking on the MTA, an application which has traditionally been difficult to scale on conventional parallel systems.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Vuduc",
"given": "Rich"
}
],
"id": "1999riedyMicrobenchmarkingTeraMTA",
"issued": {
"date-parts": [
[
1999,
5
]
]
},
"keyword": "ejr-CV,memory-centric,novel architecture,parallel algorithm,unpublished",
"note": "Cited",
"title": "Microbenchmarking the Tera MTA",
"type": "manuscript"
},
{
"ISBN": "0-8247-1928-X",
"abstract": "SIMD parallel computers have been employed for image related applications since their inception. They have been leading the way in improving processing speed for those applications. However, current parallel programming technologies have not kept pace with the performance growth and cost decline of parallel hardware. A highly usable parallel software development environment is needed. This chapter presents a computing environment that integrates a SIMD mesh architecture with image algebra for high-performance image processing applications. The environment describes parallel programs through a machine-independent, retargetable image algebra object library that supports SIMD execution on the Lockheed Martin PAL-I parallel computer. Program performance on this machine is improved through on-the-fly execution analysis and scheduling. We describe the relevant elements of the system structure, outline the scheme for execution analysis, and provide examples of the current cost model and scheduling system.",
"author": [
{
"family": "Wilson",
"given": "Joseph N."
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Ritter",
"given": "Gerhard X."
},
{
"family": "Shi",
"given": "Hongchi"
}
],
"container-title": "Visual information representation, communication, and image processing",
"editor": [
{
"family": "Chen",
"given": "C. W."
},
{
"family": "Zhang",
"given": "Y. Q."
}
],
"id": "1999wilsonImageAlgebraBased",
"issued": {
"date-parts": [
[
1999
]
]
},
"keyword": "book-chapter,ejr-CV,image algebra,parallel algorithm",
"page": "523-542",
"publisher": "Marcel Dekker",
"publisher-place": "New York",
"title": "An Image Algebra based SIMD image processing environment",
"type": "chapter"
},
{
"abstract": "The fundamental constraint on a networked sensor is its energy consumption, since it may be either impossible or not feasible to replace its energy source. We analyze the power dissipation implications of implementing the network sensor with either a central processor switching between I/O devices or a family of processors, each dedicated to a single device. We present the energy measurements of the current generations of networked sensors, and develop an abstract description of tradeoffs between both designs.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Szewczyk",
"given": "Robert"
}
],
"id": "2000riedyPowerControlNetworked",
"issued": {
"date-parts": [
[
2000,
5
]
]
},
"keyword": "ejr-CV,embedded,IoT,novel architecture,sensor,unpublished",
"note": "Cited",
"title": "Power and control in networked sensors",
"type": "manuscript"
},
{
"URL": "http://grouper.ieee.org/groups/754/meeting-materials/2002-08-22-pres.pdf",
"author": [
{
"family": "Bindel",
"given": "David"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2002bindelExceptionHandlingInterfaces",
"issued": {
"date-parts": [
[
2002,
8
]
]
},
"keyword": "ejr-CV,floating point,ieee754,presentation",
"title": "Exception handling interfaces, implementations, and evaluation",
"type": "manuscript"
},
{
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"event-place": "Livermore, CA",
"id": "2002riedyParallelBipartiteMatching",
"issued": {
"date-parts": [
[
2002,
3
]
]
},
"keyword": "ejr-CV,graph analysis,parallel algorithm,presentation,sparse matrix",
"title": "Parallel bipartite matching for sparse matrix computation",
"type": "speech"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/siam-cse03-poster.pdf",
"abstract": "Practical and efficient methods exist for parallelizing the numerical work in sparse matrix calculations. The initial symbolic analysis is now becoming a sequential bottleneck, limiting problems’ sizes. One such analysis is the weighted bipartite matching used to achieve scalable, unsymmetric LU factorization in Superlu. Applying a mathematical optimization algorithm produces a distributed-memory implementation with explicit trade-offs between speed and matching quality. We present accuracy and performance results for this phase alone and in the context of Superlu.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2003riedyParallelBipartiteMatching",
"issued": {
"date-parts": [
[
2003,
2
]
]
},
"keyword": "ejr-CV,graph analysis,linear algebra,parallel algorithm,presentation,sparse matrix",
"title": "Parallel bipartite matching for sparse matrix computations",
"type": "manuscript"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/siam-am03.pdf",
"abstract": "Traditional pivoting during parallel, unsymmetric LU factorization introduces heavy communication and restructuring costs. Possible alternatives include pre-pivoting to place heavy elements along the diagonal and limited pivoting that maintains the factors’ structures. Each alternative comes with trade-offs that affect accuracy and performance.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2003riedyPracticalAlternativesParallel",
"issued": {
"date-parts": [
[
2003,
6
]
]
},
"keyword": "ejr-CV,graph analysis,linear algebra,parallel algorithm,presentation,sparse matrix",
"title": "Practical alternatives for parallel pivoting",
"type": "manuscript"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/pp04.pdf",
"abstract": "Bipartite matching is one of graph theory’s workhorses, occuring in the solution or approximation of many problems. Increasingly, applications’ data spans multiple memory spaces, but there is little recent experience with distributed matching algorithms. We present a distributed, parallel implementation for weighted bipartite matching based on Bertsekas’s auction algorithm. The bidding process finds local matchings while summarizing updates for occasional communication, leading to superlinear speed-ups on some sparse problems and modest performance on others.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2004riedyParallelWeightedBipartite",
"issued": {
"date-parts": [
[
2004,
2
]
]
},
"keyword": "ejr-CV,graph analysis,parallel algorithm,presentation,sparse matrix",
"title": "Parallel weighted bipartite matching and applications",
"type": "manuscript"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/csc04.pdf",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2004riedySparseDataStructures",
"issued": {
"date-parts": [
[
2004,
2
]
]
},
"keyword": "ejr-CV,graph analysis,presentation,sparse matrix",
"title": "Sparse data structures for weighted bipartite matching",
"type": "manuscript"
},
{
"URL": "http://www.netlib.org/lapack/lawnspdf/lawn165.pdf",
"abstract": "We present the design and testing of an algorithm for iterative refinement of the solution of linear equations, where the residual is computed with extra precision. This algorithm was originally proposed in the 1960s [6, 22] as a means to compute very accurate solutions to all but the most ill-conditioned linear systems of equations. However two obstacles have until now prevented its adoption in standard subroutine libraries like LAPACK: (1) There was no standard way to access the higher precision arithmetic needed to compute residuals, and (2) it was unclear how to compute a reliable error bound for the computed solution. The completion of the new BLAS Technical Forum Standard [5] has recently removed the first obstacle. To overcome the second obstacle, we show how a single application of iterative refinement can be used to compute an error bound in any norm at small cost, and use this to compute both an error bound in the usual infinity norm, and a componentwise relative error bound. We report extensive test results on over 6.2 million matrices of dimension 5, 10, 100, and 1000. As long as a normwise (resp. componentwise) condition number computed by the algorithm is less than 1/max{10,√n}εw , the computed normwise (resp. componentwise) error bound is at most 2max{10,√n}⋅εw , and indeed bounds the true error. Here, n is the matrix dimension and εw is single precision roundoff error. For worse conditioned problems, we get similarly small correct error bounds in over 89.4% of cases.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Kahan",
"given": "W."
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Mukherjee",
"given": "Sonil"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"genre": "LAPACK Working Note",
"id": "2005demmelErrorBoundsExtraprecise",
"issued": {
"date-parts": [
[
2005,
2
]
]
},
"keyword": "ejr-CV,technical-report",
"note": "Also issued as UCB//CSD-05-1414, UT-CS-05-547, and LBNL-56965; expanded from TOMS version",
"number": "165",
"publisher": "Netlib",
"title": "Error bounds from extra-precise iterative refinement",
"type": "report"
},
{
"DOI": "10.1109/ARITH.2005.10",
"ISBN": "0-7695-2366-8",
"abstract": "The entire process of creating and executing applications that solve interesting problems with acceptable cost and accuracy involves a complex interaction among hardware, system software, programming environments, mathematical software libraries, and applications software, all mediated by standards for arithmetic, operating systems, and programming environments. This panel will discuss various issues arising among these various contending points of view, sometimes from the point of view of issues raised during the current IEEE 754R standards revision effort.",
"author": [
{
"family": "Hough",
"given": "David"
},
{
"family": "Hay",
"given": "Bill"
},
{
"family": "Kidder",
"given": "Jeff"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Jr.",
"given": "Guy L. Steele"
},
{
"family": "Thomas",
"given": "Jim"
}
],
"container-title": "17th IEEE symposium on computer arithmetic (ARITH’05)",
"id": "2005houghArithmeticInteractionsHardware",
"issued": {
"date-parts": [
[
2005,
6
]
]
},
"keyword": "ejr-CV,floating point,ieee754,proceedings",
"note": "See http://purl.oclc.org/NET/jason-riedy/resume/material/arith17-slides.pdfrelated presentation",
"title": "Arithmetic interactions: From hardware to applications",
"title-short": "Arithmetic interactions",
"type": "paper-conference"
},
{
"URL": "http://www.netlib.org/lapack/lawnspdf/lawn172.pdf",
"abstract": "Bisection is one of the most common methods used to compute the eigenvalues of symmetric tridiagonal matrices. Bisection relies on the Sturm count: For a given shift sigma, the number of negative pivots in the factorization T-σI=LDLT equals the number of eigenvalues of T that are smaller than sigma. In IEEE-754 arithmetic, the value ∞ permits the computation to continue past a zero pivot, producing a correct Sturm count when T is unreduced. Demmel and Li showed [IEEE Trans. Comput., 43 (1994), pp. 983–992] that using ∞ rather than testing for zero pivots within the loop could significantly improve performance on certain architectures. When eigenvalues are to be computed to high relative accuracy, it is often preferable to work with LDLT factorizations instead of the original tridiagonal T. One important example is the MRRR algorithm. When bisection is applied to the factored matrix, the Sturm count is computed from LDLT which makes differential stationary and progressive qds algorithms the methods of choice. While it seems trivial to replace T by LDLT, in reality these algorithms are more complicated: In IEEE-754 arithmetic, a zero pivot produces an overflow followed by an invalid exception (NaN, or “Not a Number”) that renders the Sturm count incorrect. We present alternative, safe formulations that are guaranteed to produce the correct result. Benchmarking these algorithms on a variety of platforms shows that the original formulation without tests is always faster provided that no exception occurs. The transforms see speed-ups of up to 2.6x over the careful formulations. Tests on industrial matrices show that encountering exceptions in practice is rare. This leads to the following design: First, compute the Sturm count by the fast but unsafe algorithm. Then, if an exception occurs, recompute the count by a safe, slower alternative. The new Sturm count algorithms improve the speed of bisection by up to 2x on our test matrices. Furthermore, unlike the traditional tiny-pivot substitution, proper use of IEEE-754 features provides a careful formulation that imposes no input range restrictions.",
"author": [
{
"family": "Marques",
"given": "Osni A."
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Vömel",
"given": "Christof"
}
],
"genre": "LAPACK Working Note",
"id": "2005marquesBenefitsIEEE754Features",
"issued": {
"date-parts": [
[
2005,
9
]
]
},
"keyword": "ejr-CV,technical-report",
"note": "Also issued as UCB//CSD-05-1414; expanded from SISC version",
"number": "172",
"publisher": "Netlib",
"title": "Benefits of IEEE-754 features in modern symmetric tridiagonal eigensolvers",
"type": "report"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/future-of-scalapack.pdf",
"abstract": "We are planning new releases of the widely used LAPACK and ScaLAPACK numerical linear algebra libraries. Based on an on-going user survey (http://www.netlib.org/lapack-dev) and research by many people, we are proposing the following improvements: Faster algorithms (including better numerical methods, memory hierarchy optimizations, parallelism, and automatic performance tuning to accomodate new architectures), more accurate algorithms (including better numerical methods, and use of extra precision), expanded functionality (including updating and downdating, new eigenproblems, etc. and putting more of LAPACK into ScaLAPACK), and improved ease of use (friendlier interfaces in multiple languages). To accomplish these goals we are also relying on better software engineering techniques and contributions from collaborators at many institutions. This is joint work with Jack Dongarra.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Demmel",
"given": "James W."
}
],
"id": "2005riedyFutureLAPACKScaLAPACK",
"issued": {
"date-parts": [
[
2005,
11
]
]
},
"keyword": "ejr-CV,floating point,lapack,linear algebra,presentation",
"title": "The future of LAPACK and ScaLAPACK",
"type": "manuscript"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/arith17-slides.pdf",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2005riedyModernLanguageTools",
"issued": {
"date-parts": [
[
2005,
6
]
]
},
"keyword": "ejr-CV,foating point,invited-presentation,lapack,linear algebra,sparse matrix",
"note": "Invited presentation and panelist",
"title": "Modern language tools and 754R",
"type": "manuscript"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/cse05.pdf",
"abstract": "Increasingly, sparse matrix applications produce matrices too large for a single computer’s memory. Distributed, parallel computers provide an avenue around memory limitations, but distributing combinatorial algorithms is historically difficult. We use insights from combinatorial optimization to design loosely coupled algorithms for sparse matrix matching, ordering, and symbolic factorization. These algorithms’ performance depends on both problem instance and computer architecture. We investigate these aspects of performance and demonstrate issues that affect distributed combinatorial computing.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2005riedyParallelCombinatorialComputing",
"issued": {
"date-parts": [
[
2005,
2
]
]
},
"keyword": "ejr-CV,graph analysis,parallel algorithm,presentation,sparse matrix",
"title": "Parallel combinatorial computing and sparse matrices",
"type": "manuscript"
},
{
"DOI": "10.1145/1141885.1141894",
"ISSN": "0098-3500",
"abstract": "We present the design and testing of an algorithm for iterative refinement of the solution of linear equations where the residual is computed with extra precision. This algorithm was originally proposed in 1948 and analyzed in the 1960s as a means to compute very accurate solutions to all but the most ill-conditioned linear systems. However, two obstacles have until now prevented its adoption in standard subroutine libraries like LAPACK: (1) There was no standard way to access the higher precision arithmetic needed to compute residuals, and (2) it was unclear how to compute a reliable error bound for the computed solution. The completion of the new BLAS Technical Forum Standard has essentially removed the first obstacle. To overcome the second obstacle, we show how the application of iterative refinement can be used to compute an error bound in any norm at small cost and use this to compute both an error bound in the usual infinity norm, and a componentwise relative error bound.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Kahan",
"given": "W."
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Mukherjee",
"given": "Sonil"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"container-title": "ACM Transactions on Mathematical Software",
"id": "2006demmelErrorBoundsExtraprecise",
"issue": "2",
"issued": {
"date-parts": [
[
2006,
6
]
]
},
"keyword": "ejr-CV,floating point,ieee754,lapack,linear algebra,refereed",
"page": "325-351",
"title": "Error bounds from extra-precise iterative refinement",
"type": "article-journal",
"volume": "32"
},
{
"DOI": "10.1007/978-3-540-75755-9\\_2",
"URL": "http://www.netlib.org/utk/people/JackDongarra/PAPERS/para06-lapack.pdf",
"abstract": "LAPACK and ScaLAPACK are widely used software libraries for numerical linear algebra. There have been over 68M web hits at www.netlib.org for the associated libraries LAPACK, ScaLAPACK, CLAPACK and LAPACK95. LAPACK and ScaLAPACK are used to solve leading edge science problems and they have been adopted by many vendors and software providers as the basis for their own libraries, including AMD, Apple (under Mac OS X), Cray, Fujitsu, HP, IBM, Intel, NEC, SGI, several Linux distributions (such as Debian), NAG, IMSL, the MathWorks (producers of MATLAB), Interactive Supercomputing, and PGI. Future improvements in these libraries will therefore have a large impact on users.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Dongarra",
"given": "Jack"
},
{
"family": "Parlett",
"given": "Beresford"
},
{
"family": "Kahan",
"given": "W."
},
{
"family": "Gu",
"given": "Ming"
},
{
"family": "Bindel",
"given": "David"
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Marques",
"given": "Osni A."
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Vömel",
"given": "Christof"
},
{
"family": "Langou",
"given": "Julien"
},
{
"family": "Luszczek",
"given": "Piotr"
},
{
"family": "Kurzak",
"given": "Jakub"
},
{
"family": "Buttari",
"given": "Alfredo"
},
{
"family": "Langou",
"given": "Julie"
},
{
"family": "Tomov",
"given": "Stanimire"
}
],
"container-title": "PARA’06: State-of-the-art in scientific and parallel computing",
"id": "2006demmelProspectusNextLAPACK",
"issued": {
"date-parts": [
[
2006,
6
]
]
},
"keyword": "ejr-CV,floating point,lapack,linear algebra,proceedings",
"publisher": "Springer / High Performance Computing Center North (HPC2N) and the Department of Computing Science, Umeå University",
"publisher-place": "Umeå, Sweden",
"title": "Prospectus for the next LAPACK and ScaLAPACK libraries",
"type": "paper-conference"
},
{
"URL": "http://www.netlib.org/lapack-dev/lapack-coding/program-style.html",
"abstract": "The purpose of this document is to facilitate contributions to LAPACK and ScaLAPACK by documenting their design and implementation guidelines. The long-term goal is to provide guidelines for both LAPACK and ScaLAPACK. However, the parallel ScaLAPACK code has more open issues, so this document primarily concerns LAPACK.",
"author": [
{
"family": "Dongarra",
"given": "Jack"
},
{
"family": "Langou",
"given": "Julien"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2006dongarraScaLAPACKProgram",
"issued": {
"date-parts": [
[
2006,
8
]
]
},
"keyword": "blas,ejr-CV,lapack,linear algebra,unpublished",
"title": "Sca/LAPACK program style",
"type": "webpage"
},
{
"DOI": "10.1137/050641624",
"ISSN": "1064-8275",
"abstract": "Bisection is one of the most common methods used to compute the eigenvalues of symmetric tridiagonal matrices. Bisection relies on the Sturm count: For a given shift a, the number of negative pivots in the factorization T - {sigma}I = LDL{sup T} equals the number of eigenvalues of T that are smaller than a. In IEEE-754 arithmetic, the value oo permits the computation to continue past a zero pivot, producing a correct Sturm count when T is unreduced. Demmel and Li showed that using oo rather than testing for zero pivots within the loop could significantly improve performance on certain architectures. When eigenvalues are to be computed to high relative accuracy, it is often preferable to work with LDL{sup T} factorizations instead of the original tridiagonal T. One important example is the MRRR algorithm. When bisection is applied to the factored matrix, the Sturm count is computed from LDL{sup T} which makes differential stationary and progressive qds algorithms the methods of choice. While it seems trivial to replace T by LDL{sup T}, in reality these algorithms are more complicated: In IEEE-754 arithmetic, a zero pivot produces an overflow followed by an invalid exception (NaN, or ’Not a Number’) that renders the Sturm count incorrect. We present alternative, safe formulations that are guaranteed to produce the correct result. Benchmarking these algorithms on a variety of platforms shows that the original formulation without tests is always faster provided that no exception occurs. The transforms see speed-ups of up to 2.6x over the careful formulations. Tests on industrial matrices show that encountering exceptions in practice is rare. This leads to the following design: First, compute the Sturm count by the fast but unsafe algorithm. Then, if an exception occurs, recompute the count by a safe, slower alternative. The new Sturm count algorithms improve the speed of bisection by up to 2x on our test matrices. Furthermore, unlike the traditional tiny-pivot substitution, proper use of IEEE-754 features provides a careful formulation that imposes no input range restrictions.",
"author": [
{
"family": "Marques",
"given": "Osni"
},
{
"family": "Riedy",
"given": "Jason E."
},
{
"family": "Vömel",
"given": "Christof"
}
],
"container-title": "SIAM journal on scientific computing",
"id": "2006marquesBenefitsIEEE754Features",
"issue": "5",
"issued": {
"date-parts": [
[
2006
]
]
},
"keyword": "Algorithms,Design,Eigenvalues,ejr-CV,floating point,ieee754,lapack,linear algebra,Matrices,refereed,Testing",
"language": "en-US",
"page": "1613-1633",
"publisher-place": "United States",
"title": "Benefits of IEEE-754 features in modern symmetric tridiagonal eigensolvers",
"type": "article-journal",
"volume": "28"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/bascd2006-poster.pdf",
"abstract": "For sparse LU factorization, dynamic pivoting tightly couples symbolic and numerical computation. Dynamic structural changes limit parallel scalability. Demmel and Li use static pivoting in distributed SuperLU for performance, but intentionally perturbing the input may lead silently to erroneous results. Are there experimentally stable static pivoting heuristics that lead to a dependable direct solver? The answer is currently a qualified yes. Current heuristics fail on a few systems, but all failures are detectable.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"event-place": "Livermore, CA",
"id": "2006riedyMakingStaticPivoting",
"issued": {
"date-parts": [
[
2006,
3
]
]
},
"keyword": "ejr-CV,floating point,graph analysis,linear algebra,presentation,sparse matrix",
"title": "Making static pivoting dependable",
"type": "speech"
},
{
"URL": "http://www.netlib.org/lapack/lawnspdf/lawn188.pdf",
"abstract": "We present the algorithm, error bounds, and numerical results for extra-precise iterative refinement applied to overdetermined linear least squares (LLS) problems. We apply our linear system refinement algorithm to Björck’s augmented linear system formulation of an LLS problem. Our algorithm reduces the forward normwise and componentwise errors to O(ε) unless the system is too ill conditioned. In contrast to linear systems, we provide two separate error bounds for the solution x and the residual r. The refinement algorithm requires only limited use of extra precision and adds only O(mn) work to the O(mn²) cost of QR factorization for problems of size m-by-n. The extra precision calculation is facilitated by the new extended-precision BLAS standard in a portable way, and the refinement algorithm will be included in a future release of LAPACK and can be extended to the other types of least squares problems.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"genre": "LAPACK Working Note",
"id": "2007demmelExtrapreciseIterativeRefinement",
"issued": {
"date-parts": [
[
2007,
5
]
]
},
"keyword": "ejr-CV,technical-report",
"note": "Also issued as UCB/EECS-2007-77; version accepted for TOMS.",
"number": "188",
"publisher": "Netlib",
"title": "Extra-precise iterative refinement for overdetermined least squares problems",
"type": "report"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/bascd2007-poster.pdf",
"abstract": "Linear least squares (LLS) fitting is the most widely used data modeling technique and is included in almost every data analysis system (e.g. spreadsheets). These software systems often give no feedback on the conditioning of the LLS problem or the floating-point calculation errors present in the solution. With limited use of extra precision, we can eliminate these concerns for all but the most ill-conditioned LLS problems. Our algorithm provides either a solution and residual with relatively tiny error or a notice that the LLS problem is too ill-conditioned.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Vishvanath",
"given": "Meghana"
},
{
"family": "Vu",
"given": "David"
}
],
"event-place": "Stanford, CA",
"id": "2007demmelPreciseSolutionsOverdetermined",
"issued": {
"date-parts": [
[
2007,
3
]
]
},
"keyword": "blas,ejr-CV,floating point,lapack,least squares,linear algebra,presentation",
"title": "Precise solutions for overdetermined least squares problems",
"type": "speech"
},
{
"URL": "http://www.netlib.org/lapack/lawnspdf/lawn181.pdf",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Dongarra",
"given": "Jack"
},
{
"family": "Parlett",
"given": "Beresford"
},
{
"family": "Kahan",
"given": "W."
},
{
"family": "Gu",
"given": "Ming"
},
{
"family": "Bindel",
"given": "David"
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Marques",
"given": "Osni A."
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Vömel",
"given": "Christof"
},
{
"family": "Langou",
"given": "Julien"
},
{
"family": "Luszczek",
"given": "Piotr"
},
{
"family": "Kurzak",
"given": "Jakub"
},
{
"family": "Buttari",
"given": "Alfredo"
},
{
"family": "Langou",
"given": "Julie"
},
{
"family": "Tomov",
"given": "Stanimire"
}
],
"genre": "LAPACK Working Note",
"id": "2007demmelProspectusNextLAPACK",
"issued": {
"date-parts": [
[
2007,
2
]
]
},
"keyword": "ejr-CV,technical-report",
"note": "Also issued as UT-CS-07-592",
"number": "181",
"publisher": "Netlib",
"title": "Prospectus for the next LAPACK and ScaLAPACK libraries",
"type": "report"
},
{
"URL": "http://www.netlib.org/lapack/lawnspdf/lawn203.pdf",
"abstract": "The Householder reflections used in LAPACK’s QR factorization leave positive and negative real entries along R’s diagonal. This is sufficient for most applications of QR factorizations, but a few require that R have a nonnegative diagonal. This note describes a new Householder generation routine to produce a nonnegative diagonal. Additionally, we find that scanning for trailing zeros in the generated reflections leads to large performance improvements when applying reflections with many trailing zeros. Factoring low-profile matrices, those with nonzero entries mostly near the diagonal (e.g., band matrices), now require far fewer operations. For example, QR factorization of matrices with profile width b that are stored densely in an n×n matrix improves from O(n³) to O(n²+nb²). These routines are in LAPACK 3.2.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Hoemmen",
"given": "Mark Frederick"
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"genre": "LAPACK Working Note",
"id": "2008demmelNonnegativeDiagonalsHigh",
"issued": {
"date-parts": [
[
2008,
5
]
]
},
"keyword": "ejr-CV,technical-report",
"note": "Also issued as UCB/EECS-2008-76; modified from SISC version.",
"number": "203",
"publisher": "Netlib",
"title": "Non-negative diagonals and high performance on low-profile matrices from Householder QR",
"type": "report"
},
{
"DOI": "10.1109/IEEESTD.2008.4610935",
"ISBN": "978-0-7381-5753-5",
"abstract": "This standard specifies interchange and arithmetic formats and methods for binary and decimal floating-point arithmetic in computer programming environments. This standard specifies exception conditions and their default handling. An implementation of a floating-point system conforming to this standard may be realized entirely in software, entirely in hardware, or in any combination of software and hardware. For operations specified in the normative part of this standard, numerical results and exceptions are uniquely determined by the values of the input data, sequence of operations, and destination formats, all under user control.",
"author": [
{
"literal": "IEEE 754 Committee"
}
],
"genre": "IEEE Std",
"id": "2008ieee754committeeIEEEStandardFloatingpoint",
"issued": {
"date-parts": [
[
2008,
8
]
]
},
"keyword": "754-2008,arithmetic,arithmetic formats,binary,computer,computer programming,decimal,decimal floating-point arithmetic,ejr-CV,exponent,floating point arithmetic,floating-point,format,IEEE standard,IEEE standards,interchange,NaN,number,programming,rounding,significand,subnormal,technical-report",
"note": "(committee member and contributor)",
"number": "754–2008",
"publisher": "Microprocessor Standards Committee of the IEEE Computer Society",
"publisher-place": "New York, NY",
"title": "IEEE standard for floating-point arithmetic",
"type": "legislation"
},
{
"URL": "http://www.cerfacs.fr/",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2008riedyAuctionsDistributedPossibly",
"issued": {
"date-parts": [
[
2008,
12
]
]
},
"keyword": "ejr-CV,foating point,invited-presentation,lapack,linear algebra,sparse matrix",
"note": "Invited presentation",
"title": "Auctions for distributed (and possibly parallel) matchings",
"type": "manuscript"
},
{
"DOI": "10.1145/1462173.1462177",
"ISSN": "0098-3500",
"abstract": "We present the algorithm, error bounds, and numerical results for extra-precise iterative refinement applied to overdetermined linear least squares (LLS) problems. We apply our linear system refinement algorithm to Björck’s augmented linear system formulation of an LLS problem. Our algorithm reduces the forward normwise and componentwise errors to O(ε) unless the system is too ill conditioned. In contrast to linear systems, we provide two separate error bounds for the solution x and the residual r. The refinement algorithm requires only limited use of extra precision and adds only O(mn) work to the O(mn²) cost of QR factorization for problems of size m-by-n. The extra precision calculation is facilitated by the new extended-precision BLAS standard in a portable way, and the refinement algorithm will be included in a future release of LAPACK and can be extended to the other types of least squares problems.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"container-title": "ACM Transactions on Mathematical Software",
"id": "2009demmelExtrapreciseIterativeRefinement",
"issue": "4",
"issued": {
"date-parts": [
[
2009,
2
]
]
},
"keyword": "ejr-CV,floating point,ieee754,lapack,linear algebra,refereed",
"page": "1-32",
"title": "Extra-precise iterative refinement for overdetermined least squares problems",
"type": "article-journal",
"volume": "35"
},
{
"DOI": "10.1137/080725763",
"ISSN": "1064-8275",
"abstract": "The Householder reflections used in LAPACK’s QR factorization leave positive and negative real entries along R’s diagonal. This is sufficient for most applications of QR factorizations, but a few require that R have a nonnegative diagonal. This note describes a new Householder generation routine to produce a nonnegative diagonal. Additionally, we find that scanning for trailing zeros in the generated reflections leads to large performance improvements when applying reflections with many trailing zeros. Factoring low-profile matrices, those with nonzero entries mostly near the diagonal (e.g., band matrices), now require far fewer operations. For example, QR factorization of matrices with profile width b that are stored densely in an n×n matrix improves from O(n³) to O(n²+nb²). These routines are in LAPACK 3.2.",
"author": [
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Hoemmen",
"given": "Mark Frederick"
},
{
"family": "Hida",
"given": "Yozo"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"container-title": "SIAM Journal on Scientific Computing",
"id": "2009demmelNonnegativeDiagonalsHigh",
"issue": "4",
"issued": {
"date-parts": [
[
2009,
7
]
]
},
"keyword": "ejr-CV,floating-point,Householder reflection,lapack,LAPACK,linear algebra,QR factorization,refereed",
"page": "2832-2841",
"publisher": "SIAM",
"title": "Non-negative diagonals and high performance on low-profile matrices from Householder QR",
"type": "article-journal",
"volume": "31"
},
{
"author": [
{
"literal": "E. Jason Riedy (coPI)"
},
{
"literal": "David A. Bader (PI)"
}
],
"id": "2009e.jasonriedycopiDynamicGraphData",
"issued": {
"date-parts": [
[
2009,
12
]
]
},
"keyword": "ejr-CV,grants",
"note": "$20 000",
"publisher": "Georgia Institute of Technology",
"title": "Dynamic graph data structures in X10",
"type": "report"
},
{
"URL": "http://hdl.handle.net/1853/29795",
"abstract": "Solving a square linear system Ax=b often is considered a black box. It’s supposed to \"just work,\" and failures often are blamed on the original data or subtleties of floating-point. Now that we have an abundance of cheap computations, however, we can do much better. A little extra precision in just the right places produces accurate solutions cheaply or demonstrates when problems are too hard to solve without significant cost. This talk will outline the method, iterative refinement with a new twist; the benefits, small backward and forward errors; and the trade-offs and unexpected benefits.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2009riedyDependableDirectSolutions",
"issued": {
"date-parts": [
[
2009,
8
]
]
},
"keyword": "ejr-CV,foating point,lapack,linear algebra,presentation,sparse matrix",
"note": "Invited presentation",
"title": "Dependable direct solutions for linear systems using a little extra precision",
"type": "manuscript"
},
{
"URL": "http://www.graph500.org/Specifications.html",
"author": [
{
"family": "Bader",
"given": "David A."
},
{
"family": "Berry",
"given": "Jonathan"
},
{
"family": "Kahan",
"given": "Simon"
},
{
"family": "Murphy",
"given": "Richard"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Willcock",
"given": "Jeremiah"
}
],
"id": "2010baderGraph500Benchmark",
"issued": {
"date-parts": [
[
2010,
10
]
]
},
"keyword": "ejr-CV,graph analysis,mistake,parallel algorithm,unpublished",
"note": "Version 1.1",
"title": "Graph 500 benchmark 1 (“search”)",
"type": "manuscript"
},
{
"author": [
{
"literal": "E. Jason Riedy (PI)"
},
{
"literal": "David A. Bader"
}
],
"id": "2010e.jasonriedypiSTINGSpatiotemporalInteraction",
"issued": {
"date-parts": [
[
2010,
4
]
]
},
"keyword": "ejr-CV,grants",
"note": "$375 000",
"publisher": "Georgia Institute of Technology",
"title": "STING: Spatio-temporal interaction networks and graphs; an open-source dynamic graph package for intel platforms",
"title-short": "STING",
"type": "report"
},
{
"DOI": "10.1109/ICPP.2010.66",
"abstract": "Social networks produce an enormous quantity of data. Facebook consists of over 400 million active users sharing over 5 billion pieces of information each month. Analyzing this vast quantity of unstructured data presents challenges for software and hardware. We present GraphCT, a Graph Characterization Tooklit for massive graphs representing social network data. On a 128-processor Cray XMT, GraphCT estimates the betweenness centrality of an artificially generated (R-MAT) 537 million vertex, 8.6 billion edge graph in 55 minutes. We use GraphCT to analyze public data from Twitter, a microblogging network. Twitter’s message connections appear primarily tree-structured as a news dissemination system. Within the public data, however, are clusters of conversations. Using GraphCT, we can rank actors within these conversations and help analysts focus attention on a much smaller data subset.",
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "Jiang",
"given": "Karl"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Corley",
"given": "Courtney"
},
{
"family": "Farber",
"given": "Rob"
},
{
"family": "Reynolds",
"given": "William N."
}
],
"container-title": "39th international conference on parallel processing (ICPP)",
"id": "2010edigerMassiveSocialNetwork",
"issued": {
"date-parts": [
[
2010,
9
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings,streaming data",
"publisher-place": "San Diego, CA",
"title": "Massive social network analysis: Mining twitter for social good",
"title-short": "Massive social network analysis",
"type": "paper-conference"
},
{
"DOI": "10.1109/IPDPSW.2010.5470687",
"abstract": "We present a new approach for parallel massive graph analysis of streaming, temporal data with a dynamic and extensible representation. Handling the constant stream of new data from health care, security, business, and social network applications requires new algorithms and data structures. We examine data structure and algorithm trade-offs that extract the parallelism necessary for high-performance updating analysis of massive graphs. Static analysis kernels often rely on storing input data in a specific structure. Maintaining these structures for each possible kernel with high data rates incurs a significant performance cost. A case study computing clustering coefficients on a general-purpose data structure demonstrates incremental updates can be more efficient than global recomputation. Within this kernel, we compare three methods for dynamically updating local clustering coefficients: a brute-force local recalculation, a sorting algorithm, and our new approximation method using a Bloom filter. On 32 processors of a Cray XMT with a synthetic scale-free graph of 2²⁴≈16 million vertices and 2²⁹≈537 million edges, the brute-force method processes a mean of over 50 000 updates per second and our Bloom filter approaches 200 000 updates per second.",
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "Jiang",
"given": "Karl"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "4th workshop on multithreaded architectures and applications (MTAAP)",
"id": "2010edigerMassiveStreamingData",
"issued": {
"date-parts": [
[
2010,
4
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings,streaming data",
"publisher-place": "Atlanta, GA",
"title": "Massive streaming data analytics: A case study with clustering coefficients",
"title-short": "Massive streaming data analytics",
"type": "paper-conference"
},
{
"author": [
{
"literal": "Participants"
}
],
"id": "2010participantsReportNSFWorkshop",
"issued": {
"date-parts": [
[
2010,
10
]
]
},
"keyword": "accelerator,ejr-CV,high performance data analysis,parallel algorithm,unpublished",
"note": "This workshop is supported by NSF Grant Number 1051537, in response to the Call for Exploratory Workshop Proposals for Scientific Software Innovation Institutes (S2I2).",
"title": "Report on NSF workshop on center scale activities related to accelerators for data intensive applications",
"type": "manuscript"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/nsf-workshop-socnet.pdf",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David"
},
{
"family": "Ediger",
"given": "David"
}
],
"container-title": "NSF workshop on accelerators for data-intensive applications",
"id": "2010riedyApplicationsSocialNetworks",
"issued": {
"date-parts": [
[
2010,
10
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "Applications in social networks",
"type": "manuscript"
},
{
"URL": "http://issuu.com/readwritepoem/docs/read_write_poem_napowrimo_anthology",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"container-title": "Read write poem NaPoWriMo anthology",
"editor": [
{
"family": "Guthrie",
"given": "Dana Martin"
}
],
"id": "2010riedyHereFarthestPoint",
"issued": {
"date-parts": [
[
2010,
9
]
]
},
"keyword": "ejr-CV,non-technical,poetry",
"page": "86",
"publisher": "issuu.com",
"title": "Here, on the farthest point of the peninsula",
"type": "chapter"
},
{
"URL": "http://www.eecs.berkeley.edu/Pubs/TechRpts/2010/EECS-2010-172.html",
"abstract": "Solving square linear systems of equations Ax=b is one of the primary workhorses in scientific computing. With asymptotically and practically small amounts of extra calculation and higher precision, we can render solution techniques dependable. We produce a solution with tiny error for almost all systems where we should expect a tiny error, and we correctly flag potential failures. Our method uses a proven technique: iterative refinement. We extend prior work by applying extra precision not only in calculating the residual b-Ayi of an intermediate solution yi but also in carrying that intermediate solution yi. Analysis shows that extra precision in the intermediate solutions lowers the limiting backward error (measuring perturbations in the initial problem) to levels that produce a forward error (measuring perturbations in the solution) not much larger than the precision used to store the result. We also demonstrate that condition estimation is not necessary for determining success, reducing the computation in refinement substantially. This basic, dependable solver applies to typical dense LU factorization methods using partial pivoting as well as methods that risk greater failure by choosing pivots for non-numerical reasons. Sparse factorization methods may choose pivots to promote structural sparsity or even choose pivots before factorization to decouple the phases. We show through experiments that solutions using these restrictive pivoting methods still have small error so long as an estimate of factorization quality, the growth factor, does not grow too large. Our refinement algorithm dependably flags such failures. Additionally, we find a better choice of heuristic for sparse static pivoting than the defaults in Li and Demmel’s SuperLU package. Static pivoting in a distributed-memory setting needs an algorithm for choosing pivots that does not rely on fitting the entire matrix into one memory space. We investigate a set of algorithms, Bertsekas’s auction algorithms, for choosing a static pivoting via maximum weight perfect bipartite matching. Auction algorithms have a natural mapping to distributed memory computation through their bidding mechanism. We provide an analysis of the auction algorithm fitting it comfortably in linear optimization theory and characterizing approximately maximum weight perfect bipartite matches. These approximately maximum weight perfect matches work well as static pivot choices and can be computed much more quickly than the exact maximum weight matching. Finally, we consider the performance of auction algorithm implementations on a suite of real-world sparse problems. Sequential performance is roughly equivalent to existing implementations like Duff and Koster’s MC64, but varies widely with different parameter and input settings. The parallel performance is even more wildly unpredictable. Computing approximately maximum weight matchings helps performance somewhat, but we still conclude that the performance is too variable for a black-box solution method.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"genre": "PhD thesis",
"id": "2010riedyMakingStaticPivoting",
"issued": {
"date-parts": [
[
2010,
12
]
]
},
"keyword": "ejr-CV,floating point,graph analysis,ieee754,linear algebra,parallel algorithm,phd-thesis",
"publisher": "EECS Department, University of California, Berkeley",
"title": "Making static pivoting scalable and dependable",
"type": "thesis"
},
{
"author": [
{
"family": "Bader",
"given": "David A."
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"container-title": "full day tutorial",
"event-place": "Columbia, MD",
"id": "2011baderParallelProgrammingGraph",
"issued": {
"date-parts": [
[
2011,
9
]
]
},
"keyword": "ejr-CV,graph analysis,high performance data analysis,streaming data,tutorial",
"title": "Parallel programming for graph analysis",
"type": "speech"
},
{
"URL": "http://www.cc.gatech.edu/~bader/papers/GraphAnalysisTutorial-PPoPP2011.html",
"abstract": "An increasingly fast-paced, digital world has produced an ever-growing volume of petabyte-sized datasets. At the same time, terabytes of new, unstructured data arrive daily. As the desire to ask more detailed questions about these massive streams has grown, parallel software and hardware have only recently begun to enable complex analytics in this non-scientific space. In this tutorial, we will discuss the open problems facing us with analyzing this \"data deluge\". We will present algorithms and data structures capable of analyzing spatio-temporal data at massive scale on parallel systems. We will try to understand the difficulties and bottlenecks in parallel graph algorithm design on current systems and will show how multithreaded and hybrid systems can overcome these challenges. We will demonstrate how parallel graph algorithms can be implemented on a variety of architectures using different programming models. The goal of this tutorial is to provide a comprehensive introduction to the field of parallel graph analysis to an audience with computing background, interested in participating in research and/or commercial applications of this field. Moreover, we will cover leading-edge technical and algorithmic developments in the field and discuss open problems and potential solutions.",
"author": [
{
"family": "Bader",
"given": "David A."
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"container-title": "16th ACM SIGPLAN annual symposium on principles and practice of parallel programming (PPoPP)",
"event-place": "San Antonio, TX",
"id": "2011baderParallelProgrammingGrapha",
"issued": {
"date-parts": [
[
2011,
2
]
]
},
"keyword": "ejr-CV,graph analysis,high performance data analysis,streaming data,tutorial",
"title": "Parallel programming for graph analysis",
"type": "speech"
},
{
"author": [
{
"literal": "E. Jason Riedy (coPI)"
},
{
"literal": "David A. Bader (PI)"
}
],
"id": "2011e.jasonriedycopiBenchmarkingIBMPERCS",
"issued": {
"date-parts": [
[
2011,
6
]
]
},
"keyword": "ejr-CV,grants",
"note": "$287 994",
"publisher": "Georgia Institute of Technology",
"title": "Benchmarking the IBM PERCS and Cray CASCADE architectures",
"type": "report"
},
{
"author": [
{
"literal": "E. Jason Riedy (PI)"
}
],
"id": "2011e.jasonriedypiEvaluatingPGASScientific",
"issued": {
"date-parts": [
[
2011,
6
]
]
},
"keyword": "ejr-CV,grants",
"note": "250 000 hours of DoE processing time",
"publisher": "Georgia Institute of Technology",
"title": "Evaluating PGAS scientific graph analysis codes on the Gemini interconnect",
"type": "report"
},
{
"author": [
{
"literal": "E. Jason Riedy (PI)"
},
{
"literal": "Logan Moon"
}
],
"id": "2011e.jasonriedypiTeachingMassiveData",
"issued": {
"date-parts": [
[
2011,
7
]
]
},
"keyword": "ejr-CV,grants",
"note": "$223 800",
"publisher": "Georgia Institute of Technology",
"title": "Teaching massive data analysis and manycore computing",
"type": "report"
},
{
"DOI": "10.1109/IPDPS.2011.326",
"abstract": "Current online social networks are massive and still growing. For example, Facebook has over 500 million active users sharing over 30 billion items per month. The scale within these data streams has outstripped traditional graph analysis methods. Monitoring requires dynamic analysis rather than repeated static analysis. The massive state behind multiple persistent queries requires shared data structures and not problem-specific representations. We present a framework based on the STINGER data structure that can monitor a global property, connected components, on a graph of 16 million vertices at rates of up to 240 000 updates per second on a 32 processor Cray XMT. For very large scale-free graphs, our implementation uses novel batching techniques that exploit the scale-free nature of the data and run over three times faster than prior methods. Our framework handles, for the first time, real-world data rates, opening the door to higher-level analytics such as community and anomaly detection.",
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Meyerhenke",
"given": "Henning"
}
],
"container-title": "5th workshop on multithreaded architectures and applications (MTAAP)",
"id": "2011edigerTrackingStructureStreaming",
"issued": {
"date-parts": [
[
2011,
5
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings,streaming data",
"title": "Tracking structure of streaming social networks",
"type": "paper-conference"
},
{
"URL": "http://hdl.handle.net/1853/36980",
"abstract": "Analyzing massive social networks challenges both high-performance computers and human understanding. These massive networks cannot be visualized easily, and their scale makes applying complex analysis methods computationally expensive. We present a region-growing method for finding a smaller, more tractable subgraph, a community, given a few example seed vertices. Unlike existing work, we focus on a small number of seed vertices, from two to a few dozen. We also present the first comparison between five algorithms for expanding a small seed set into a community. Our comparison applies these algorithms to an R-MAT generated graph component with 240 thousand vertices and 32 million edges and evaluates the community size, modularity, Kullback-Leibler divergence, conductance, and clustering coefficient. We find that our new algorithm with a local modularity maximizing heuristic based on Clauset, Newman, and Moore performs very well when the output is limited to 100 or 1000 vertices. When run without a vertex size limit, a heuristic from McCloskey and Bader generates communities containing around 60% of the graph’s vertices and having a small conductance and modularity appropriate to the result size. A personalized PageRank algorithm based on Andersen, Lang, and Chung also performs well with respect to our metrics.",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Jiang",
"given": "Karl"
},
{
"family": "Pande",
"given": "Pushkar"
},
{
"family": "Sharma",
"given": "Richa"
}
],
"id": "2011riedyDetectingCommunitiesGiven",
"issued": {
"date-parts": [
[
2011,
2
]
]
},
"keyword": "ejr-CV,technical-report",
"number": "GT-CSE-11-01",
"publisher": "Georgia Institute of Technology",
"title": "Detecting communities from given seeds in social networks",
"type": "report"
},
{
"DOI": "10.1007/978-3-642-31464-3\\_29",
"abstract": "Tackling the current volume of graph-structured data requires parallel tools. We extend our work on analyzing such massive graph data with the first massively parallel algorithm for community detection that scales to current data sizes, scaling to graphs of over 122 million vertices and nearly 2 billion edges in under 7300 seconds on a massively multithreaded Cray XMT. Our algorithm achieves moderate parallel scalability without sacrificing sequential operational complexity. Community detection partitions a graph into subgraphs more densely connected within the subgraph than to the rest of the graph. We take an agglomerative approach similar to Clauset, Newman, and Moore’s sequential algorithm, merging pairs of connected intermediate subgraphs to optimize different graph properties. Working in parallel opens new approaches to high performance. On smaller data sets, we find the output’s modularity compares well with the standard sequential algorithms.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "9th international conference on parallel processing and applied mathematics (PPAM11)",
"id": "2011riedyParallelCommunityDetection",
"issued": {
"date-parts": [
[
2011,
9
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings",
"publisher": "Springer",
"title": "Parallel community detection for massive graphs",
"type": "paper-conference"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/GT-STING-for-Intel-beamer.pdf",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Mattson",
"given": "Timothy"
}
],
"id": "2011riedySTINGSpatiotemporalInteraction",
"issued": {
"date-parts": [
[
2011,
8
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "STING: Spatio-temporal interaction networks and graphs for Intel platforms",
"title-short": "STING",
"type": "manuscript"
},
{
"URL": "https://www.lulu.com/en/us/shop/kaspalita-and-fiona-robyn/pay-attention-a-river-of-stones/ebook/product-1vkzw9kr.html",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "Pay attention: A river of stones",
"editor": [
{
"family": "Robyn",
"given": "Fiona"
},
{
"literal": "Kaspalita"
}
],
"id": "2011riedyStormsComingWhen",
"issued": {
"date-parts": [
[
2011,
3
]
]
},
"keyword": "ejr-CV,non-technical,poetry",
"page": "77",
"publisher": "lulu.com",
"title": "The storm’s coming when the chickens spread out",
"type": "chapter"
},
{
"URL": "http://purl.oclc.org/NET/jason-riedy/resume/material/GraphEx-2011.pdf",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Meyerhenke",
"given": "Henning"
}
],
"id": "2011riedyTrackingStructureStreaming",
"issued": {
"date-parts": [
[
2011,
8
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,presentation,streaming data",
"note": "Invited presentation.",
"title": "Tracking structure of streaming social networks",
"type": "manuscript"
},
{
"URL": "http://www.slideshare.net/jasonriedy/streaming-graph-analytics-for-massive-graphs",
"abstract": "Emerging real-world graph problems include detecting community structure in large social networks, improving the resilience of the electric power grid, and detecting and preventing disease in human populations. The volume and richness of data combined with its rate of change renders monitoring properties at scale by static recomputation infeasible. We approach these problems with massive, fine-grained parallelism across different shared memory architectures both to compute solutions and to explore the sensitivity of these solutions to natural bias and omissions within the data.",
"author": [
{
"family": "Bader",
"given": "David A."
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Minneapolis, MN",
"id": "2012baderStreamingGraphAnalytics",
"issued": {
"date-parts": [
[
2012,
7
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "Streaming graph analytics for massive graphs",
"type": "speech"
},
{
"author": [
{
"literal": "E. Jason Riedy (coPI)"
},
{
"literal": "David A. Bader (PI)"
}
],
"id": "2012e.jasonriedycopiGRATEFULGRaphAnalysis",
"issued": {
"date-parts": [
[
2012,
8
]
]
},
"keyword": "ejr-CV,grants",
"note": "$2 929 819",
"number": "HR0011-13-2-0001",
"publisher": "Georgia Institute of Technology",
"title": "GRATEFUL: GRaph Analysis Tackling power EFficiency, Uncertainty, and Locality",
"title-short": "GRATEFUL",
"type": "report"
},
{
"author": [
{
"literal": "E. Jason Riedy (PI)"
},
{
"literal": "David A. Bader"
}
],
"id": "2012e.jasonriedypiOracleMultithreadedAlgorithms",
"issued": {
"date-parts": [
[
2012,
4
]
]
},
"keyword": "ejr-CV,grants",
"note": "$118 000",
"publisher": "Georgia Institute of Technology",
"title": "Oracle: Multithreaded algorithms",
"title-short": "Oracle",
"type": "report"
},
{
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Bader",
"given": "David A."
}
],
"event-place": "Savannah, GA",
"id": "2012edigerAnalyzingMassiveNetworks",
"issued": {
"date-parts": [
[
2012,
2
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "Analyzing massive networks with GraphCT",
"type": "speech"
},
{
"URL": "http://www.cc.gatech.edu/~bader/papers/GraphAnalysisTutorial-PPoPP2012.html",
"abstract": "An increasingly fast-paced, digital world has produced an ever-growing volume of petabyte-sized datasets. At the same time, terabytes of new, unstructured data arrive daily. As the desire to ask more detailed questions about these massive streams has grown, parallel software and hardware have only recently begun to enable complex analytics in this non-scientific space. In this tutorial, we will discuss the open problems facing us with analyzing this \"data deluge\". We will present algorithms and data structures capable of analyzing spatio-temporal data at massive scale on parallel systems. We will try to understand the difficulties and bottlenecks in parallel graph algorithm design on current systems and will show how multithreaded and hybrid systems can overcome these challenges. We will demonstrate how parallel graph algorithms can be implemented on a variety of architectures using different programming models. The goal of this tutorial is to provide a comprehensive introduction to the field of parallel graph analysis to an audience with computing background, interested in participating in research and/or commercial applications of this field. Moreover, we will cover leading-edge technical and algorithmic developments in the field and discuss open problems and potential solutions.",
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "McColl",
"given": "Rob"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "17th ACM SIGPLAN annual symposium on principles and practice of parallel programming (PPoPP)",
"event-place": "New Orleans, LA",
"id": "2012edigerParallelProgrammingGraph",
"issued": {
"date-parts": [
[
2012,
2
]
]
},
"keyword": "ejr-CV,graph analysis,high performance data analysis,streaming data,tutorial",
"title": "Parallel programming for graph analysis",
"type": "speech"
},
{
"DOI": "10.1109/HPEC.2012.6408680",
"abstract": "The current research focus on “big data” problems highlights the scale and complexity of analytics required and the high rate at which data may be changing. In this paper, we present our high performance, scalable and portable software, Spatio-Temporal Interaction Networks and Graphs Extensible Representation (STINGER), that includes a graph data structure that enables these applications. Key attributes of STINGER are fast insertions, deletions, and updates on semantic graphs with skewed degree distributions. We demonstrate a process of algorithmic and architectural optimizations that enable high performance on the Cray XMT family and Intel multicore servers. Our implementation of STINGER on the Cray XMT processes over 3 million updates per second on a scale-free graph with 537 million edges.",
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "McColl",
"given": "Robert"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "The IEEE high performance extreme computing conference (HPEC)",
"id": "2012edigerSTINGERHighPerformance",
"issued": {
"date-parts": [
[
2012,
9
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings,streaming data",
"note": "Best paper award",
"publisher-place": "Waltham, MA",
"title": "STINGER: High performance data structure for streaming graphs",
"title-short": "STINGER",
"type": "paper-conference"
},
{
"author": [
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"event-place": "Savannah, GA",
"id": "2012meyerhenkeParallelCommunityDetection",
"issued": {
"date-parts": [
[
2012,
2
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "Parallel community detection in streaming graphs",
"type": "speech"
},
{
"DOI": "10.1109/ICASSP.2012.6289126",
"URL": "http://www.slideshare.net/jasonriedy/icassp-2012-analysis-of-streaming-social-networks-and-graphs-on-multicore-architectures",
"abstract": "Analyzing static snapshots of massive, graph-structured data cannot keep pace with the growth of social networks, financial transactions, and other valuable data sources. We introduce a framework, STING (Spatio-Temporal Interaction Networks and Graphs), and evaluate its performance on multicore, multisocket Intel(R)-based platforms. STING achieves rates of around 100 000 edge updates per second on large, dynamic graphs with a single, general data structure. We achieve speed-ups of up to 1000× over parallel static computation, improve monitoring a dynamic graph’s connected components, and show an exact algorithm for maintaining local clustering coefficients performs better on Intel-based platforms than our earlier approximate algorithm.",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Mattson",
"given": "Timothy G."
}
],
"container-title": "IEEE international conference on acoustics, speech and signal processing (ICASSP)",
"id": "2012riedyAnalysisStreamingSocial",
"issued": {
"date-parts": [
[
2012,
3
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings,streaming data",
"publisher-place": "Kyoto, Japan",
"title": "Analysis of streaming social networks and graphs on multicore architectures",
"type": "chapter"
},
{
"URL": "http://www.cc.gatech.edu/dimacs10/papers/[15]-dimacs10-community-detection.pdf",
"abstract": "Tackling the current volume of graph-structured data requires parallel tools. We extend our work on analyzing such massive graph data with a massively parallel algorithm for community detection that scales to current data sizes, clustering a real-world graph of over 100 million vertices and over 3 billion edges in under 500 seconds on a four- processor Intel E7-8870-based server. Our algorithm achieves moderate parallel scalability without sacrificing sequential operational complexity. Community detection partitions a graph into subgraphs more densely connected within the subgraph than to the rest of the graph. We take an agglomerative approach similar to Clauset, Newman, and Moore’s sequential algorithm, merging pairs of connected intermediate subgraphs to optimize different graph properties. Working in parallel opens new approaches to high performance. We improve performance of our parallel community detection algorithm on both the Cray XMT2 and OpenMP platforms and adapt our algorithm to the DIMACS Implementation Challenge data set.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "10th DIMACS implementation challenge workshop - graph partitioning and graph clustering",
"id": "2012riedyParallelCommunityDetection",
"issued": {
"date-parts": [
[
2012,
2
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings",
"note": "Won first place in the Mix Challenge and Mix Pareto Challenge",
"publisher-place": "Atlanta, Georgia",
"title": "Parallel community detection for massive graphs",
"type": "paper-conference"
},
{
"DOI": "10.1090/conm/588/11703",
"ISBN": "978-0-8218-9038-7",
"abstract": "Tackling the current volume of graph-structured data requires parallel tools. We extend our work on analyzing such massive graph data with a massively parallel algorithm for community detection that scales to current data sizes, clustering a real-world graph of over 100 million vertices and over 3 billion edges in under 500 seconds on a four-processor Intel E7-8870-based server. Our algorithm achieves moderate parallel scalability without sacrificing sequential operational complexity. Community detection partitions a graph into subgraphs more densely connected within the subgraph than to the rest of the graph. We take an agglomerative approach similar to Clauset, Newman, and Moore’s sequential algorithm, merging pairs of connected intermediate subgraphs to optimize different graph properties. Working in parallel opens new approaches to high performance. We improve performance of our parallel community detection algorithm on both the Cray XMT2 and OpenMP platforms and adapt our algorithm to the DIMACS Implementation Challenge data set.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Bader",
"given": "David A."
}
],
"collection-title": "Contemporary mathematics",
"container-title": "Graph partitioning and graph clustering",
"editor": [
{
"family": "Bader",
"given": "David A."
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Sanders",
"given": "Peter"
},
{
"family": "Wagner",
"given": "Dorothea"
}
],
"id": "2012riedyParallelCommunityDetectiona",
"issued": {
"date-parts": [
[
2012
]
]
},
"keyword": "book-chapter,community detection,ejr-CV,graph analysis,hpda,parallel algorithm",
"page": "207-222",
"publisher": "American Mathematical Society",
"title": "Parallel community detection for massive graphs",
"type": "chapter",
"volume": "588"
},
{
"URL": "http://www.slideshare.net/jasonriedy/siam-pp-2012-scalable-algorithms-for-analysis-of-massive-streaming-graphs",
"abstract": "Graph-structured data in social networks, finance, network security, and others not only are massive but also under continual change. These changes often are scattered across the graph. Repeating complex global analyses on massive snapshots to capture only what has changed is inefficient. We discuss analysis algorithms for streaming graph data that maintain both local and global metrics. We extract parallelism from both analysis kernel and graph data to scale performance to real-world sizes.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Meyerhenke",
"given": "Henning"
}
],
"event-place": "Savannah, GA",
"id": "2012riedyScalableAlgorithmsAnalysis",
"issued": {
"date-parts": [
[
2012,
2
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"note": "Minisymposium organizer with Henning Meyerhenke.",
"title": "Scalable algorithms for analysis of massive, streaming graphs",
"type": "speech"
},
{
"DOI": "10.1109/IPDPSW.2012.203",
"abstract": "The volume of existing graph-structured data requires improved parallel tools and algorithms. Finding communities, smaller subgraphs densely connected within the subgraph than to the rest of the graph, plays a role both in developing new parallel algorithms as well as opening smaller portions of the data to current analysis tools. We improve performance of our parallel community detection algorithm by 20% on the massively multithreaded Cray XMT, evaluate its performance on the next-generation Cray XMT2, and extend its reach to Intel-based platforms with OpenMP. To our knowledge, not only is this the first massively parallel community detection algorithm but also the only such algorithm that achieves excellent performance and good parallel scalability across all these platforms. Our implementation analyzes a moderate sized graph with 105 million vertices and 3.3 billion edges in around 500 seconds on a four processor, 80-logical-core Intel-based system and 1100 seconds on a 64-processor Cray XMT2.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Meyerhenke",
"given": "Henning"
}
],
"container-title": "6th workshop on multithreaded architectures and applications (MTAAP)",
"id": "2012riedyScalableMultithreadedCommunity",
"issued": {
"date-parts": [
[
2012,
5
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings",
"title": "Scalable multi-threaded community detection in social networks",
"type": "paper-conference"
},
{
"abstract": "Current tools for analyzing graph-structured data and semantic networks focus on static graphs. Our STING package tackles analysis of streaming graphs like today’s social networks and communication tools. STING maintains a massive graph under changes while coordinating analysis kernels to achieve analysis at real-world data rates. We show examples of local metrics like clustering coefficients and global metrics like connected components and agglomerative clustering. STING supports parallel Intel architectures as well as the Cray XMT.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Bader",
"given": "David A."
}
],
"event-place": "Savannah, GA",
"id": "2012riedySTINGSoftwareAnalysis",
"issued": {
"date-parts": [
[
2012,
2
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "STING: Software for analysis of spatio-temporal interaction networks and graphs",
"title-short": "STING",
"type": "speech"
},
{
"URL": "http://www.slideshare.net/jasonriedy/gt-stingintelslides",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "McColl",
"given": "Rob"
},
{
"family": "Mattson",
"given": "Timothy G."
}
],
"id": "2012riedySTINGSpatiotemporalInteraction",
"issued": {
"date-parts": [
[
2012,
7
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "STING: Spatio-temporal interaction networks and graphs for Intel platforms",
"title-short": "STING",
"type": "manuscript"
},
{
"abstract": "The DARPA High Productivity Computing Systems (HPCS) program has been focused on providing a new generation of economically viable high productivity computing systems for national security, scientific, industrial and commercial applications. This program was unique because it focused on system productivity that was defined to include enhancing performance, programmability, portability, usability, manageability and robustness of systems as opposed to just being focused on one execution time performance metric. The BOF is for anyone interested in learning about the two HPCS systems and how productivity in High Performance Computing has been enhanced.",
"author": [
{
"family": "Smith",
"given": "Lauren L."
},
{
"family": "Shaffer",
"given": "Dolores A."
}
],
"id": "2012smithDARPAsHighProductivity",
"issued": {
"date-parts": [
[
2012,
11
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,invited-presentation,novel architectures,streaming data",
"note": "Invited panel speaker",
"title": "DARPA’s High Productivity Computing Systems Program: A Final Report",
"title-short": "DARPA’s High Productivity Computing Systems Program",
"type": "manuscript"
},
{
"author": [
{
"literal": "Viktor Prasanna (PI USC)"
},
{
"literal": "Manish Parashar (PI Rutgers)"
},
{
"literal": "Jason Riedy (coPI GT)"
},
{
"literal": "Rich Vuduc (coPI GT)"
},
{
"literal": "Yogesh Simmhan (coPI USC)"
},
{
"literal": "Shantenu Jha (coPI Rutgers)"
},
{
"literal": "David A. Bader (PI GT)"
}
],
"id": "2012viktorprasannapiuscCollaborativeResearchSoftware",
"issued": {
"date-parts": [
[
2012,
10
]
]
},
"keyword": "ejr-CV,grants",
"note": "$104 386",
"number": "NSF 1216504",
"publisher": "Georgia Institute of Technology",
"title": "Collaborative research: Software infrastructure for accelerating grand challenge science with future computing platforms",
"title-short": "Collaborative research",
"type": "report"
},
{
"URL": "http://www.graphanalysis.org/SIAM-CSE13/01_Bader.pdf",
"abstract": "Emerging real-world graph problems include detecting community structure in large social networks, improving the resilience of the electric power grid, and detecting and preventing disease in human populations. We discuss the opportunities and challenges in massive data-intensive computing for applications in social network analysis, genomics, and security. The explosion of real-world graph data poses substantial challenges for software, hardware, algorithms, and application experts.",
"author": [
{
"family": "Bader",
"given": "David A."
},
{
"family": "Meyerhenke",
"given": "Henning"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Boston, MA",
"id": "2013baderApplicationsChallengesLargescale",
"issued": {
"date-parts": [
[
2013,
2
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,presentation,streaming data",
"title": "Applications and challenges in large-scale graph analysis",
"type": "speech"
},
{
"DOI": "10.1002/9781118640708.ch25",
"ISBN": "978-0-470-93688-7",
"abstract": "Handling the constant stream of data from health care, security, business, and social network applications requires new algorithms and data structures. We present a new approach for parallel massive analysis of streaming, temporal, graph-structured data. For this purpose we examine data structure and algorithm trade-offs that extract the parallelism necessary for high-performance updating analysis of massive graphs. As a result of this study, we propose the extensible and flexible data structure for massive graphs called STINGER (Spatio-Temporal Interaction Networks and Graphs Extensible Representation). Two case studies demonstrate our new approach’s effectiveness. The first one computes a dynamic graph’s vertices’ clustering coefficients. We show that incremental updates are far more efficient than global recomputation. Within this kernel, we compare three methods for dynamically updating local clustering coefficients: a brute-force local recalculation, a sorting algorithm, and our new approximation method using a Bloom filter. On 32 processors of a with a synthetic scale-free graph of 2²⁴≈16 million vertices and 2²⁹≈537 million edges, the brute-force method processes a mean of over 50 000 updates per second, while our Bloom filter approaches 200 000 updates per second. The second case study monitors a global feature, a dynamic graph’s connected components. We use similar algorithmic ideas as before to exploit the parallelism in the problem and provided by the hardware architecture. On a 16 million vertex graph, we obtain rates of up to 240 000 updates per second on 32 processors of a . For the large scale-free graphs typical in our applications, our implementation uses novel batching techniques that exploit the scale-free nature of the data and run over three times faster than prior methods. Our new framework is the first to handle real-world data rates, opening the door to higher-level analytics such as community and anomaly detection.",
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Meyerhenke",
"given": "Henning"
}
],
"collection-title": "Parallel and distributed computing",
"container-title": "Large scale network-centric computing systems",
"editor": [
{
"family": "Sarbazi-azad",
"given": "Hamid"
},
{
"family": "Zomaya",
"given": "Albert"
}
],
"id": "2013edigerComputationalGraphAnalytics",
"issued": {
"date-parts": [
[
2013,
7
]
]
},
"keyword": "book-chapter,ejr-CV,graph analysis,hpda,parallel algorithm,streaming data",
"publisher": "Wiley",
"title": "Computational graph analytics for massive streaming data",
"type": "chapter"
},
{
"DOI": "10.1109/TPDS.2012.323",
"ISSN": "1045-9219",
"URL": "http://dx.doi.org/10.1109/TPDS.2012.323",
"abstract": "The digital world has given rise to massive quantities of data that include rich semantic and complex networks. A social graph, for example, containing hundreds of millions of actors and tens of billions of relationships is not uncommon. Analyzing these large data sets, even to answer simple analytic queries, often pushes the limits of algorithms and machine architectures. We present GraphCT, a scalable framework for graph analysis using parallel and multithreaded algorithms on shared memory platforms. Utilizing the unique characteristics of the Cray XMT, GraphCT enables fast network analysis at unprecedented scales on a variety of input data sets. On a synthetic power law graph with 2 billion vertices and 17 billion edges, we can find the connected components in 2 minutes. We can estimate the betweenness centrality of a similar graph with 537 million vertices and over 8 billion edges in under 1 hour. GraphCT is built for portability and performance.",
"author": [
{
"family": "Ediger",
"given": "David"
},
{
"family": "Jiang",
"given": "Karl"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "IEEE Transactions in Parallel and Distributed Systems",
"id": "2013edigerGraphCTMultithreadedAlgorithms",
"issued": {
"date-parts": [
[
2013,
9
]
]
},
"keyword": "ejr-CV,refereed",
"page": "2220-2229",
"title": "GraphCT: Multithreaded algorithms for massive graph analysis",
"title-short": "GraphCT",
"type": "article-journal"
},
{
"abstract": "Analyzing static snapshots of massive, graph-structured data cannot keep pace with the growth of social networks, financial transactions, and other valuable data sources. Our software framework, STING (Spatio-Temporal Interaction Networks and Graphs), uses a scalable, high-performance graph data structure to enable these applications. STING supports fast insertions, deletions, and updates on graphs with semantic information and skewed degree distributions. STING achieves large speed-ups over parallel, static recomputation on both common multicore and specialized multithreaded platforms.",
"author": [
{
"family": "McColl",
"given": "Robert C."
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Boston, MA",
"id": "2013mccollAnalyzingGraphStructure",
"issued": {
"date-parts": [
[
2013,
2
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"title": "Analyzing graph structure in streaming data with STINGER",
"type": "speech"
},
{
"DOI": "10.1145/2425676.2425689",
"ISSN": "1528-4972",
"abstract": "Analyzing massive streaming graphs efficiently requires new algorithms, data structures, and computing platforms.",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "XRDS: Crossroads, The ACM Magazine for Students — Scientific Computing",
"id": "2013riedyMassiveStreamingData",
"issue": "3",
"issued": {
"date-parts": [
[
2013,
3
]
]
},
"keyword": "ejr-CV,graph analysis,high performance data analysis,streaming data,trade-pub",
"page": "37-43",
"publisher": "ACM",
"publisher-place": "New York, NY, USA",
"title": "Massive streaming data analytics: A graph-based approach",
"title-short": "Massive streaming data analytics",
"type": "article-magazine",
"volume": "19"
},
{
"DOI": "10.1109/IPDPSW.2013.229",
"abstract": "Analyzing static snapshots of massive, graph-structured data cannot keep pace with the growth of social networks, financial transactions, and other valuable data sources. Current state-of-the-art industrial methods analyze these streaming sources using only simple, aggregate metrics. There are few existing scalable algorithms for monitoring complex global quantities like decomposition into community structure. Using our framework STING, we present the first known parallel algorithm specifically for monitoring communities in this massive, streaming, graph-structured data. Our algorithm performs incremental re-agglomeration rather than starting from scratch after each batch of changes, reducing the problem’s size to that of the change rather than the entire graph. We analyze our initial implementation’s performance on multithreaded platforms for execution time and latency. On an Intel-based multithreaded platform, our algorithm handles up to 100 million updates per second on social networks with one to 30 million edges, providing a speed-up from 4× to 3700× over statically recomputing the decomposition after each batch of changes. Possibly because of our artificial graph generator, resulting communities’ modularity varies little from the initial graph.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "7th workshop on multithreaded architectures and applications (MTAAP)",
"id": "2013riedyMultithreadedCommunityMonitoring",
"issued": {
"date-parts": [
[
2013,
5
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings,streaming data",
"publisher-place": "Boston, MA",
"title": "Multithreaded community monitoring for massive streaming graph data",
"type": "paper-conference"
},
{
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "San Francisco, CA",
"id": "2013riedySTINGERAnalyzingMassive",
"issued": {
"date-parts": [
[
2013,
7
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,presentation,streaming data",
"title": "STINGER: Analyzing massive, streaming graphs",
"title-short": "STINGER",
"type": "speech"
},
{
"URL": "http://future-compute.usc.edu/index.php/NGS_Workshop",
"author": [
{
"family": "Swenson",
"given": "Shel"
},
{
"family": "Simmhan",
"given": "Yogesh"
},
{
"family": "Prasanna",
"given": "Viktor"
},
{
"family": "Parashar",
"given": "Manish"
},
{
"family": "Bader",
"given": "David"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Vuduc",
"given": "Richard"
}
],
"id": "2013swensonReportWorkshopAccelerating",
"issued": {
"date-parts": [
[
2013,
5
]
]
},
"keyword": "accelerator,ejr-CV,high performance data analysis,parallel algorithm,unpublished",
"note": "Co-located with IPDPS 2013",
"publisher-place": "Boston, MA",
"title": "Report on “workshop on accelerating bioinformatics applications enabled by NextGen-sequencing”",
"type": "manuscript"
},
{
"URL": "http://future-compute.usc.edu/index.php/NGS_Bioinformatics_Workshop",
"author": [
{
"family": "Swenson",
"given": "Shel"
},
{
"family": "Simmhan",
"given": "Yogesh"
},
{
"family": "Prasanna",
"given": "Viktor"
},
{
"family": "Parashar",
"given": "Manish"
},
{
"family": "Bader",
"given": "David"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Vuduc",
"given": "Richard"
}
],
"id": "2013swensonReportWorkshopChallenges",
"issued": {
"date-parts": [
[
2013,
9
]
]
},
"keyword": "accelerator,ejr-CV,high performance data analysis,parallel algorithm,unpublished",
"note": "in conjunction with ACM-BCB 2013",
"publisher-place": "Washington, DC",
"title": "Report on “workshop on challenges in accelerating next-gen sequencing (NGS) bioinformatics”",
"type": "manuscript"
},
{
"URL": "http://arxiv.org/abs/1309.1828",
"abstract": "DNA sequence analysis is fundamental to life science research. The rapid development of next generation sequencing (NGS) technologies, and the richness and diversity of applications it makes feasible, have created an enormous gulf between the potential of this technology and the development of computational methods to realize this potential. Bridging this gap holds possibilities for broad impacts toward multiple grand challenges and offers unprecedented opportunities for software innovation and research. We argue that NGS-enabled applications need a critical mass of sustainable software to benefit from emerging computing platforms’ transformative potential. Accumulating the necessary critical mass will require leaders in computational biology, bioinformatics, computer science, and computer engineering work together to identify core opportunity areas, critical software infrastructure, and software sustainability challenges. Furthermore, due to the quickly changing nature of both bioinformatics software and accelerator technology, we conclude that creating sustainable accelerated bioinformatics software means constructing a sustainable bridge between the two fields. In particular, sustained collaboration between domain developers and technology experts is needed to develop the accelerated kernels, libraries, frameworks and middleware that could provide the needed flexible link from NGS bioinformatics applications to emerging platforms.",
"author": [
{
"family": "Swenson",
"given": "Shel"
},
{
"family": "Simmhan",
"given": "Yogesh"
},
{
"family": "Prasanna",
"given": "Viktor"
},
{
"family": "Parashar",
"given": "Manish"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David"
},
{
"family": "Vuduc",
"given": "Richard"
}
],
"container-title": "First workshop on sustainable software for science: Practice and experiences (WSSSPE1)",
"id": "2013swensonSustainableSoftwareDevelopment",
"issued": {
"date-parts": [
[
2013,
11
]
]
},
"keyword": "accelerator,ejr-CV,high performance data analysis,parallel algorithm,unpublished",
"note": "held in conjunction with SC13, published electronically (http://wssspe.researchcomputing.org.uk/)",
"publisher-place": "Denver, CO",
"title": "Sustainable software development for next-gen sequencing (NGS) bioinformatics on emerging platforms",
"type": "paper-conference"
},
{
"URL": "http://arxiv.org/abs/1309.1828",
"author": [
{
"family": "Swenson",
"given": "Shel"
},
{
"family": "Simmhan",
"given": "Yogesh"
},
{
"family": "Prasanna",
"given": "Viktor K."
},
{
"family": "Parashar",
"given": "Manish"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Vuduc",
"given": "Richard W."
}
],
"id": "2013swensonSustainableSoftwareDevelopmenta",
"issued": {
"date-parts": [
[
2013
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "Sustainable software development for next-gen sequencing (NGS) bioinformatics on emerging platforms",
"type": "webpage"
},
{
"author": [
{
"literal": "Viktor Prasanna (PI USC)"
},
{
"literal": "E. Jason Riedy (coPI GT)"
},
{
"literal": "Rich Vudic (coPI GT)"
},
{
"literal": "David A. Bader (PI GT)"
}
],
"id": "2013viktorprasannapiuscSI2SSICollaborativeXScala",
"issued": {
"date-parts": [
[
2013,
10
]
]
},
"keyword": "ejr-CV,grants",
"note": "$1 937 624, ($1 188 710 GA Tech portion)",
"number": "NSF ACI-1339745",
"publisher": "Georgia Institute of Technology",
"title": "SI2-SSI: Collaborative: The XScala project: A community repository for model-driven design and tuning of data-intensive applications for extreme-scale accelerator-based systems",
"title-short": "SI2-SSI",
"type": "report"
},
{
"DOI": "10.1109/HPEC.2014.7040980",
"abstract": "Applications of high-performance graph analysis range from computational biology to network security and even transportation. These applications often consider graphs under rapid change and are moving beyond HPC platforms into energy-constrained embedded systems. This paper optimizes one successful and demanding analysis kernel, betweenness centrality, for NVIDIA GPU accelerators in both environments. Our algorithm for static analysis is capable of exceeding 2 million traversed edges per second per watt (MTEPS/W). Optimizing the parallel algorithm and treating the dynamic problem directly achieves a 6.39× average speed-up and 84% average reduction in energy consumption.",
"author": [
{
"family": "McLaughlin",
"given": "Adam"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "The IEEE high performance extreme computing conference (HPEC)",
"id": "2014mclaughlinOptimizingEnergyConsumption",
"issued": {
"date-parts": [
[
2014,
9
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings",
"note": "“Rising Stars” section",
"publisher-place": "Waltham, MA",
"title": "Optimizing energy consumption and parallel performance for betweenness centrality using GPUs",
"type": "paper-conference"
},
{
"URL": "http://www.slideshare.net/jasonriedy/cmg-20141104",
"abstract": "High-performance graph analysis is unlocking knowledge in problems like anomaly detection in computer security, community structure in social networks, and many other data integration areas. While graphs provide a convenient abstraction, real-world problems’ sparsity and lack of locality challenge current systems. This talk will cover current trends ranging from massive scales to low-power, low-latency systems and summarize opportunities and directions for graphs and computing systems.",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "CMG performance and capacity",
"event-place": "Atlanta, GA",
"id": "2014riedyGraphAnalysisTrends",
"issued": {
"date-parts": [
[
2014,
11
]
]
},
"keyword": "ejr-CV,graph analysis,high performance data analysis,invited-presentation,parallel algorithm,streaming data",
"note": "Invited presentationInvited presentation",
"title": "Graph analysis trends and opportunities",
"type": "speech"
},
{
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "San Francisco, CA",
"id": "2014riedySTINGERAnalyzingMassive",
"issued": {
"date-parts": [
[
2014,
7
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,presentation,streaming data",
"title": "STINGER: Analyzing massive, streaming graphs",
"title-short": "STINGER",
"type": "speech"
},
{
"URL": "http://www.slideshare.net/jasonriedy/stinger-multithreaded-graph-streaming",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "Graph Algorithms Building Blocks (GABB 2014)",
"event-place": "Phoeniz, AZ",
"id": "2014riedySTINGERMultithreadedGraph",
"issued": {
"date-parts": [
[
2014,
5
]
]
},
"keyword": "ejr-CV,graph analysis,high performance data analysis,invited-presentation,parallel algorithm,streaming data",
"note": "Invited presentation and panelist. (Workshop with IPDPS 2014)Invited presentation and panelist. (Workshop with IPDPS 2014)",
"title": "STINGER: Multi-threaded graph streaming",
"title-short": "STINGER",
"type": "speech"
},
{
"URL": "http://www.slideshare.net/jasonriedy/intel-20140117",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
},
{
"family": "Ediger",
"given": "David"
},
{
"family": "McColl",
"given": "Rob"
},
{
"family": "Mattson",
"given": "Timothy G."
}
],
"id": "2014riedySTINGSpatiotemporalInteraction",
"issued": {
"date-parts": [
[
2014,
1
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,presentation,streaming data",
"title": "STING: Spatio-temporal interaction networks and graphs for Intel platforms",
"title-short": "STING",
"type": "manuscript"
},
{
"author": [
{
"literal": "Jack Dongarra (PI UTK)"
},
{
"literal": "Jason Riedy (coPI GT)"
},
{
"literal": "Richard Vuduc (coPI GT)"
},
{
"literal": "Piotr Luszczek (coPI UTK)"
},
{
"literal": "David A. Bader (PI GT)"
}
],
"id": "2015jackdongarrapiutkCollaborativeResearchEMBRACE",
"issued": {
"date-parts": [
[
2015,
9
]
]
},
"keyword": "ejr-CV,grants",
"note": "$125 000",
"number": "NSF 1535058",
"publisher": "Georgia Institute of Technology",
"title": "Collaborative Research: EMBRACE: Evolvable Methods for Benchmarking Realism through Application and Community Engagement",
"title-short": "Collaborative Research",
"type": "report"
},
{
"abstract": "Optimized GPU kernels are sufficiently complicated to write that they often are specialized to specific input data, target architectures, or applications. This paper presents a multi-search abstraction for computing multiple breadth-first searches in parallel and demonstrates a high-performance, general implementation. Our abstraction removes the burden of orchestrating graph traversal from the user while providing high performance and low energy usage, an often overlooked component of algorithm design. Energy consumption has become a first-class hardware design constraint for both massive and embedded computing platforms. Our abstraction can be applied to such problems as the all-pairs shortest-path problem, community detection, reachability querying, and others. To map graph traversal efficiently to NVIDIA GPUs, our hybrid implementation chooses between processing active vertices with a single thread or an entire warp based on vertex outdegree. For a set of twelve varied graphs, the implementation of our abstraction saves 42% time and 62% energy on average compared to representative implementations of specific applications from existing literature.",
"author": [
{
"family": "McLaughlin",
"given": "Adam"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "The IEEE high performance extreme computing conference (HPEC)",
"id": "2015mclaughlinEnergyefficientAbstractionSimultaneous",
"issued": {
"date-parts": [
[
2015,
9
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings",
"publisher-place": "Waltham, MA",
"title": "An energy-efficient abstraction for simultaneous breadth-first searches",
"type": "paper-conference"
},
{
"URL": "http://www.slideshare.net/jasonriedy/graph-analysis-beyond-linear-algebra",
"abstract": "High-performance graph analysis is unlocking knowledge in computer security, bioinformatics, social networks, and many other data integration areas. Graphs provide a convenient abstraction for many data problems beyond linear algebra. Some problems map directly to linear algebra. Others, like community detection, look eerily similar to sparse linear algebra techniques. And then there are algorithms that strongly resist attempts at making them look like linear algebra. This talk will cover recent results with an emphasis on streaming graph problems where the graph changes and results need updated with minimal latency. We’ll also touch on issues of sensitivity and reliability where graph analysis needs to learn from numerical analysis and linear algebra.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2015riedyGraphAnalysisLinear",
"issued": {
"date-parts": [
[
2015,
10
]
]
},
"keyword": "blas,ejr-CV,graph analysis,lapack,linear algebra,presentation,streaming data",
"note": "Invited presentation",
"title": "Graph analysis beyond linear algebra",
"type": "manuscript"
},
{
"URL": "http://www.slideshare.net/jasonriedy/network-challenge-error-and-sensitivity-analysis",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Vancouver, BC",
"id": "2015riedyNetworkChallengeError",
"issued": {
"date-parts": [
[
2015,
5
]
]
},
"keyword": "ejr-CV,graph analysis,invited-presentation,sensitivity",
"note": "Invited panelist",
"title": "Network challenge: Error and sensitivity analysis",
"title-short": "Network challenge",
"type": "speech"
},
{
"abstract": "In 2013 a paper was offered to the CAA concerning archaeological legacy data and semantic database applications, with some preliminary results for a study conducted into the Samtavro cemetery, situated in the South Caucasus in the modern republic of Georgia. The present paper presents further research outcomes of data mining the Samtavro material. Over four thousand graves were excavated at this site, used most intensively during the Late Bronze and Iron Ages, and later in the Roman and Late Antique periods. The current project focuses on the latter period—and the legacy of Soviet and post-Soviet excavations—in a collaborative effort between computer scientists based at the Georgia Institute of Technology, USA, and archaeologists at the University of Melbourne and Monash University, Australia. Data for 1075 tombs, 1249 individuals, and 5842 grave accoutrements were collected across 74 data fields, resulting in the identification of 9 tomb types, 37 artefact types and 320 artefact subtypes. Methods tested against the Samtavro material culture included the application of clustering techniques to understand associations of related items based on patterns of co-occurrence, using traditional data mining (hierarchical link clustering) and spectral graph theory—focusing on tomb types in relation to artefact types. The other method calculated the probability of each event occurring and comparing this to what we would expect if these were truly random—focusing on artefact types in relation to biological sex and age brackets. In some instances, our work confirmed previously established relationships, but it likewise revealed new results concerning particular entities. The project demonstrates that although sites for which comprehensive archival records exist can benefit from these types of approaches, often the greatest limitation in taking a “big data” approach is the relative scarcity of archaeological data.",
"author": [
{
"family": "Bader",
"given": "David"
},
{
"family": "Michalewicz",
"given": "Aleksandra"
},
{
"family": "Green",
"given": "Oded"
},
{
"family": "Birkett-Rees",
"given": "Jessie"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Fairbanks",
"given": "James"
},
{
"family": "Zakrzewska",
"given": "Anita"
}
],
"container-title": "The 44th computer applications and quantitative methods in archaeology conference (CAA)",
"id": "2016baderSemanticDatabaseApplications",
"issued": {
"date-parts": [
[
2016,
3
]
]
},
"keyword": "archaeology,ejr-CV,graph analysis,proceedings",
"publisher-place": "Oslo, Norway",
"title": "Semantic database applications at the Samtavro Cemetery, Georgia",
"type": "paper-conference"
},
{
"URL": "https://blogs.fau.de/hager/files/2016/06/pmma2016-slides_Dukhan.pdf",
"abstract": "We propose a new instruction (FPADDRE) that computes the round-off error in floating-point addition. We explain how this instruction benefits high-precision arithmetic operations in applications where double precision is not sufficient. Performance estimates on Intel Haswell, Intel Skylake, and AMD Steamroller processors, as well as Intel Knights Corner co-processor, demonstrate that such an instruction would improve the latency of double-double addition by up to 55% and increase double-double addition throughput by up to 103%, with smaller, but non-negligible benefits for double-double multiplication. The new instruction delivers up to 2x speedups on three benchmarks that use high-precision floating-point arithmetic: double-double matrix-matrix multiplication, compensated dot product, and polynomial evaluation via the compensated Horner scheme.",
"author": [
{
"family": "Dukhan",
"given": "Marat"
},
{
"family": "Vuduc",
"given": "Richard"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "The 2nd international workshop on performance modeling: Methods and applications (PMMA16)",
"id": "2016dukhanWantedFloatingpointAdd",
"issued": {
"date-parts": [
[
2016,
6
]
]
},
"keyword": "ejr-CV,floating point,ieee754,proceedings",
"note": "(Workshop with ISC High Performance)",
"publisher-place": "Frankfurt, Germany",
"title": "Wanted: Floating-point add round-off error instruction",
"title-short": "Wanted",
"type": "paper-conference"
},
{
"URL": "http://arxiv.org/abs/1603.00491",
"author": [
{
"family": "Dukhan",
"given": "Marat"
},
{
"family": "Vuduc",
"given": "Richard W."
},
{
"family": "Riedy",
"given": "E. Jason"
}
],
"id": "2016dukhanWantedFloatingpointAdda",
"issued": {
"date-parts": [
[
2016
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "Wanted: Floating-point add round-off error instruction",
"title-short": "Wanted",
"type": "webpage"
},
{
"author": [
{
"literal": "E. Jason Riedy (PI)"
},
{
"literal": "David Bader"
}
],
"id": "2016e.jasonriedypiSupportDataAnalytics",
"issued": {
"date-parts": [
[
2016,
7
]
]
},
"keyword": "ejr-CV,grants",
"note": "$194 150",
"publisher": "Georgia Institute of Technology",
"title": "Support for data analytics for CSE programs and courses",
"type": "report"
},
{
"DOI": "10.1145/2980765.2980770",
"ISSN": "1931-0145",
"author": [
{
"family": "Holder",
"given": "Lawrence B."
},
{
"family": "Caceres",
"given": "Rajmonda"
},
{
"family": "Gleich",
"given": "David F."
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Khan",
"given": "Maleq"
},
{
"family": "Chawla",
"given": "Nitesh V."
},
{
"family": "Kumar",
"given": "Ravi"
},
{
"family": "Wu",
"given": "Yinghui"
},
{
"family": "Klymko",
"given": "Christine"
},
{
"family": "Eliassi-Rad",
"given": "Tina"
},
{
"family": "Prakash",
"given": "Aditya"
}
],
"container-title": "SIGKDD Explorations Newsletter",
"id": "2016holderCurrentFutureChallenges",
"issue": "1",
"issued": {
"date-parts": [
[
2016,
8
]
]
},
"keyword": "big data,challenges,ejr-CV,graph mining,Network mining,technical-report",
"page": "39-45",
"publisher": "ACM",
"publisher-place": "New York, NY, USA",
"title": "Current and future challenges in mining large networks: Report on the second SDM workshop on mining networks and graphs",
"title-short": "Current and future challenges in mining large networks",
"type": "article-journal",
"volume": "18"
},
{
"URL": "http://www.slideshare.net/jasonriedy/scalable-and-efficient-algorithms-for-analysis-of-massive-streaming-graphs-60975076",
"abstract": "Graph analysis provides tools for analyzing the irregular data sets common in health informatics, computational biology, climate science, sociology, security, finance, and many other fields. These graphs possess different structures than typical finite element meshes. Scaling graph analysis to the scales of data being gathered and created has spawned many directions of exciting new research. This minisymposium includes talks on massive graph generation for testing and evaluating parallel algorithms, novel streaming techniques, and parallel graph algorithms for new and existing problems. It also covers existing parallel frameworks and interdisciplinary applications, e.g. the analysis of climate networks.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"event-place": "Paris, France",
"id": "2016riedyScalableNetworkAnalysis",
"issued": {
"date-parts": [
[
2016,
4
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"note": "Minisymposium organizer with Henning Meyerhenke and David A. Bader.",
"title": "Scalable network analysis: Tools, algorithms, applications",
"title-short": "Scalable network analysis",
"type": "speech"
},
{
"abstract": "Incremental graph algorithms can respond quickly to small changes in massive graphs by updating rather than recomputing analysis metrics. Here we use the linear system formulation of PageRank and ideas from iterative refinement to compute the update to a PageRank vector accurately and quickly. The core idea is to express the residual of the original solution with respect to the updated matrix representing the graph. The update to the residual is sparse. Solving for the solution update with a straight-forward iterative method spreads the change outward from the change locations but converges before traversing the entire graph. We achieve speed-ups of 2× to over 40× relative to a restarted, highly parallel PageRank iteration for small, low-latency batches of edge insertions. These cases traverse 2× to nearly 10 000× fewer edges than the restarted PageRank iteration. This provides an interesting test case for the ongoing GraphBLAS effort: Can the APIs support our incremental algorithms cleanly and efficiently?",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "Graph Algorithms Building Blocks (GABB 2016)",
"id": "2016riedyUpdatingPageRankStreaming",
"issued": {
"date-parts": [
[
2016,
5
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings,streaming data",
"note": "(Workshop with IPDPS 2016)",
"publisher-place": "Chicago, IL",
"title": "Updating PageRank for streaming graphs",
"type": "paper-conference"
},
{
"URL": "https://goo.gl/hvDu3d",
"author": [
{
"family": "Demmel",
"given": "James"
},
{
"family": "Gates",
"given": "Mark"
},
{
"family": "Henry",
"given": "Greg"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Tang",
"given": "P. T. Peter"
}
],
"id": "2017demmelProposalNextgenerationBLAS",
"issued": {
"date-parts": [
[
2017,
11
]
]
},
"keyword": "blas,ejr-CV,lapack,linear algebra,unpublished",
"note": "(living document, being updated)",
"title": "A proposal for a next-generation BLAS",
"type": "manuscript"
},
{
"URL": "http://www.netlib.org/utk/people/JackDongarra/WEB-PAGES/Batched-BLAS-2017/talk05-demmel.pdf",
"author": [
{
"family": "Demmel",
"given": "James"
},
{
"family": "Henry",
"given": "Greg"
},
{
"family": "Li",
"given": "Xiaoye"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Tang",
"given": "Peter"
}
],
"event-place": "Atlanta, Georgia",
"id": "2017demmelProposalNextgenerationBLASa",
"issued": {
"date-parts": [
[
2017,
2
]
]
},
"keyword": "blas,ejr-CV,linear algebra,presentation",
"title": "A proposal for a next-generation BLAS",
"type": "speech"
},
{
"author": [
{
"literal": "E. Jason Riedy (PI)"
},
{
"literal": "David A. Bader"
},
{
"literal": "Thomas M. Conte"
}
],
"id": "2017e.jasonriedypiEvaluatingMemorycentricArchitectures",
"issued": {
"date-parts": [
[
2017,
8
]
]
},
"keyword": "ejr-CV,grants",
"note": "$662 525",
"publisher": "Georgia Institute of Technology",
"title": "Evaluating memory-centric architectures for high performance data analysis",
"type": "report"
},
{
"author": [
{
"literal": "Jeffrey S. Young (PI)"
},
{
"literal": "Jason Riedy (coPI)"
},
{
"literal": "Richard Vuduc (coPI)"
}
],
"id": "2017jeffreys.youngpiCDSESuperSTARLUSTacked",
"issued": {
"date-parts": [
[
2017,
8
]
]
},
"keyword": "ejr-CV,grants",
"note": "$500 000",
"number": "NSF 1710371",
"publisher": "Georgia Institute of Technology",
"title": "CDS&E: SuperSTARLU - STacked, AcceleRated Algorithms for sparse linear systems",
"title-short": "CDS&E",
"type": "report"
},
{
"DOI": "10.3390/a10030102",
"ISSN": "1999-4893",
"abstract": "Analyzing massive graphs poses challenges due to the vast amount of data available. Extracting smaller relevant subgraphs allows for further visualization and analysis that would otherwise be too computationally intensive. Furthermore, many real data sets are constantly changing, and require algorithms to update as the graph evolves. This work addresses the topic of local community detection, or seed set expansion, using personalized centrality measures, specifically PageRank and Katz centrality. We present a method to efficiently update local communities in dynamic graphs. By updating the personalized ranking vectors, we can incrementally update the corresponding local community. Applying our methods on real-world graphs, we are able to obtain speedups of up to 60× compared to static recomputation while maintaining an average recall of 0.94 of the highly ranked vertices returned. Next, we investigate how approximations of a centrality vector affect the resulting local community. Specifically, our method that guarantees that the vertices returned in the community are the highly ranked vertices from a personalized centrality metric.",
"author": [
{
"family": "Nathan",
"given": "Eisha"
},
{
"family": "Zakrzewska",
"given": "Anita"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "Algorithms",
"id": "2017nathanLocalCommunityDetection",
"issue": "3",
"issued": {
"date-parts": [
[
2017,
8
]
]
},
"keyword": "ejr-CV,refereed",
"title": "Local community detection in dynamic graphs using personalized centrality",
"type": "article-journal",
"volume": "10"
},
{
"abstract": "Applications in computer network security, social media analysis, and other areas rely on analyzing a changing environment. The data is rich in relationships and lends itself to graph analysis. Traditional static graph analysis cannot keep pace with network security applications analyzing nearly one million events per second and social networks like Facebook collecting 500 thousand comments per second. Streaming frameworks like STINGER support ingesting up three million of edge changes per second but there are few streaming analysis kernels that keep up with these rates. Here we introduce a new, non-stop model and use it to decouple the analysis from the data ingest.",
"author": [
{
"family": "Nathan",
"given": "Eisha"
},
{
"family": "Zakrzewska",
"given": "Anita"
},
{
"family": "Yin",
"given": "Chunxing"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Honolulu, HI",
"id": "2017nathanNewDirectionStreaming",
"issued": {
"date-parts": [
[
2017,
9
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,memory-centric,novel architectures,presentation,streaming data",
"title": "A new direction for streaming graph analysis",
"type": "speech"
},
{
"URL": "https://www.slideshare.net/jasonriedy/highperformance-analysis-of-streaming-graphs-77348572",
"abstract": "Graph-structured data in social networks, finance, network security, and others not only are massive but also under continual change. These changes often are scattered across the graph. Stopping the world to run a single, static query is infeasible. Repeating complex global analyses on massive snapshots to capture only what has changed is inefficient. We discuss requirements for single-shot queries on changing graphs as well as recent high-performance algorithms that update rather than recompute results. These algorithms are incorporated into our software framework for streaming graph analysis, STINGER.",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Hanover, MD",
"id": "2017riedyHighperformanceAnalysisStreaming",
"issued": {
"date-parts": [
[
2017,
6
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,memory-centric,novel architectures,presentation,streaming data",
"title": "High-performance analysis of streaming graphs",
"type": "speech"
},
{
"URL": "https://www.slideshare.net/jasonriedy/highperformance-analysis-of-streaming-graphs",
"abstract": "Graph-structured data in social networks, finance, network security, and others not only are massive but also under continual change. These changes often are scattered across the graph. Stopping the world to run a single, static query is infeasible. Repeating complex global analyses on massive snapshots to capture only what has changed is inefficient. We discuss requirements for single-shot queries on changing graphs as well as recent high-performance algorithms that update rather than recompute results. These algorithms are incorporated into our software framework for streaming graph analysis, STING (Spatio-Temporal Interaction Networks and Graphs).",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"event-place": "Atlanta, GA",
"id": "2017riedyHighperformanceAnalysisStreaminga",
"issued": {
"date-parts": [
[
2017,
3
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,presentation,streaming data",
"note": "Minisymposium organizer with Henning Meyerhenke.",
"title": "High-performance analysis of streaming graphs",
"type": "speech"
},
{
"URL": "https://www.slideshare.net/jasonriedy/a-new-algorithm-model-for-massivescale-streaming-graph-analysis",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Yin",
"given": "Chunxing"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "SIAM workshop on network science",
"id": "2017riedyNewAlgorithmModel",
"issued": {
"date-parts": [
[
2017,
7
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,proceedings,streaming data",
"publisher-place": "Pittsburgh, PA",
"title": "A new algorithm model for massive-scale streaming graph analysis",
"type": "paper-conference"
},
{
"URL": "http://icl.utk.edu/bblas/sc17/files/bblas-sc17-riedy.pdf",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Henry",
"given": "Greg"
},
{
"family": "Demmel",
"given": "James"
},
{
"family": "Gates",
"given": "Mark"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Tang",
"given": "Ping Tak P."
}
],
"id": "2017riedyProposalNextgenerationBLAS",
"issued": {
"date-parts": [
[
2017,
11
]
]
},
"keyword": "blas,ejr-CV,linear algebra,presentation",
"title": "A proposal for a next-generation BLAS",
"type": "manuscript"
},
{
"URL": "https://sinews.siam.org/Details-Page/reproducible-blas-make-addition-associative-again",
"author": [
{
"family": "Demmel",
"given": "James"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Ahrens",
"given": "Peter"
}
],
"container-title": "SIAM News",
"id": "2018demmelReproducibleBLASMake",
"issue": "8",
"issued": {
"date-parts": [
[
2018,
10
]
]
},
"keyword": "ejr-CV,floating point,ieee754,linear algebra,trade-pub",
"page": "8",
"title": "Reproducible BLAS: Make addition associative again!",
"title-short": "Reproducible BLAS",
"type": "article-newspaper",
"volume": "51"
},
{
"DOI": "10.1109/IPDPSW.2018.00097",
"ISBN": "978-1-5386-5555-9",
"abstract": "The Emu Chick is a prototype system designed around the concept of migratory memory-side processing. Rather than transferring large amounts of data across power-hungry, high-latency interconnects, the Emu Chick moves lightweight thread contexts to near-memory cores before the beginning of each memory read. The current prototype hardware uses FPGAs to implement cache-less \"Gossamer\" cores for doing computational work and a stationary core to run basic operating system functions and migrate threads between nodes. In this initial characterization of the Emu Chick, we study the memory bandwidth characteristics of the system through benchmarks like STREAM, pointer chasing, and sparse matrix vector multiply. We compare the Emu Chick hardware to architectural simulation and Intel Xeon-based platforms. While it is difficult to accurately compare prototype hardware with existing systems, our initial evaluation demonstrates that the Emu Chick uses available memory bandwidth more efficiently than a more traditional, cache-based architecture. Moreover, the Emu Chick provides stable, predictable performance with 80% bandwidth utilization on a random-access pointer chasing benchmark with weak locality.",
"author": [
{
"family": "Hein",
"given": "Eric"
},
{
"family": "Conte",
"given": "Tom"
},
{
"family": "Young",
"given": "Jeffrey S."
},
{
"family": "Eswar",
"given": "Srinivas"
},
{
"family": "Li",
"given": "Jiajia"
},
{
"family": "Lavin",
"given": "Patrick"
},
{
"family": "Vuduc",
"given": "Richard"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "The eighth international workshop on accelerators and hybrid exascale systems (AsHES)",
"id": "2018heinInitialCharacterizationEmu",
"issued": {
"date-parts": [
[
2018,
5
]
]
},
"keyword": "Bandwidth,Benchmark testing,benchmarking,computer architecture,Computer architecture,ejr-CV,emu,Hardware,hpda,Instruction sets,Kernel,memory-centric,novel architectures,proceedings,Prototypes,sparse tensors,streaming graphs",
"page": "579-588",
"title": "An initial characterization of the Emu Chick",
"type": "paper-conference"
},
{
"author": [
{
"literal": "Jason Riedy (coPI)"
},
{
"family": "Park",
"given": "Haesun"
},
{
"literal": "David A. Bader (PI)"
}
],
"id": "2018jasonriedycopiHighperformanceDataAnalytics",
"issued": {
"date-parts": [
[
2018,
1
]
]
},
"keyword": "ejr-CV,grants",
"note": "$100 000",
"publisher": "Georgia Institute of Technology",
"title": "High-performance data analytics (HPDA) research topics",
"type": "report"
},
{
"URL": "http://arxiv.org/abs/1811.03743",
"abstract": "Recent characterizations of data movement performance have evaluated optimizations for dense and blocked accesses used by accelerators like GPUs and Xeon Phi, but sparse access patterns like scatter and gather are still not well understood across current and emerging architectures. We propose a tunable benchmark suite, Spatter, that allows users to characterize scatter, gather, and related sparse access patterns at a low level across multiple backends, including CUDA, OpenCL, and OpenMP. Spatter also allows users to vary the block size and amount of data that is moved to create a more comprehensive picture of sparse access patterns and to model patterns that are found in real applications. With Spatter we aim to characterize the performance of memory systems in a novel way by evaluating how the density of accesses compares against real-world effective memory bandwidths (measured by STREAM) and how it can be compared across widely varying architectures including GPUs and x86, ARM, and Power CPUs. We demonstrate how Spatter can be used to generate analysis plots comparing different architectures and show that current GPU systems achieve up to 65% of STREAM bandwidth for sparse accesses and are more energy efficient in doing so for several different sparsity patterns. Our future plans for the spatter benchmark are to use these results to predict the impact of new memory access primitives on various architectures, develop backends for novel hardware like FPGAs and the Emu Chick, and automate testing so that users can perform their own sparse access studies.",
"author": [
{
"family": "Lavin",
"given": "Patrick"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Vuduc",
"given": "Rich"
},
{
"family": "Young",
"given": "Jeffrey"
}
],
"id": "2018lavinSpatterBenchmarkSuite",
"issued": {
"date-parts": [
[
2018
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "Spatter: A benchmark suite for evaluating sparse access patterns",
"title-short": "Spatter",
"type": "webpage"
},
{
"URL": "http://arxiv.org/abs/1808.06334",
"abstract": "The Rogues Gallery is a new experimental testbed that is focused on tackling \"rogue\" architectures for the Post-Moore era of computing. While some of these devices have roots in the embedded and high-performance computing spaces, managing current and emerging technologies provides a challenge for system administration that are not always foreseen in traditional data center environments. We present an overview of the motivations and design of the initial Rogues Gallery testbed and cover some of the unique challenges that we have seen and foresee with upcoming hardware prototypes for future post-Moore research. Specifically, we cover the networking, identity management, scheduling of resources, and tools and sensor access aspects of the Rogues Gallery and techniques we have developed to manage these new platforms.",
"author": [
{
"family": "Powell",
"given": "Will"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Young",
"given": "Jeffrey S."
},
{
"family": "Conte",
"given": "Thomas M."
}
],
"id": "2018powellWranglingRoguesManaging",
"issued": {
"date-parts": [
[
2018
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "Wrangling rogues: Managing experimental post-moore architectures",
"title-short": "Wrangling rogues",
"type": "webpage"
},
{
"DOI": "10.1109/ARITH.2018.8464813",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Demmel",
"given": "James"
}
],
"container-title": "25th IEEE symposium on computer arithmetic (ARITH 25)",
"id": "2018riedyAugmentedArithmeticOperations",
"issued": {
"date-parts": [
[
2018,
6
]
]
},
"keyword": "ejr-CV,floating point,ieee754,proceedings",
"title": "Augmented arithmetic operations proposed for IEEE-754 2018",
"type": "paper-conference"
},
{
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Tokyo, Japan",
"id": "2018riedyGraphAnalysisNew",
"issued": {
"date-parts": [
[
2018,
3
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,memory-centric,novel architectures,presentation,streaming data",
"note": "Minisymposium organizer with Oded Green and David A. Bader.",
"title": "Graph analysis: New algorithm models, new architectures",
"title-short": "Graph analysis",
"type": "speech"
},
{
"URL": "https://www.slideshare.net/jasonriedy/plans-for-ieee-standard-7542028",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "25th IEEE symposium on computer arithmetic (ARITH 25)",
"id": "2018riedyPlansIEEEStandard",
"issued": {
"date-parts": [
[
2018,
6
]
]
},
"keyword": "ejr-CV,floating point,ieee754,invited-presentation,linear algebra,memory centric",
"note": "Invited talkInvited talk",
"title": "Plans for IEEE standard 754-2028",
"type": "manuscript"
},
{
"URL": "https://www.slideshare.net/jasonriedy/graph-analysis-new-algorithm-models-new-architectures",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "ACM international conference on computing frontiers",
"id": "2018riedyStreamingGraphAnalysis",
"issued": {
"date-parts": [
[
2018,
5
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,invited-presentation,memory-centric,novel architectures,streaming data",
"note": "Invited talk.Invited talk.",
"title": "Streaming graph analysis: New models, new architectures",
"title-short": "Streaming graph analysis",
"type": "manuscript"
},
{
"URL": "http://icl.utk.edu/bblas/sc18/files/NG_BLAS_SC18.pdf",
"abstract": "The classic BLAS interface is concise and mostly predictable. The BLAS Technical Forum produced a 301-page document in 2001 that incorporated mixed precision and extended operations. And now we face different implementations for reproducibility, even more precisions, and the batched interfaces. The explosion of interfaces causes problems for platform optimization and interface generation. The \"Next-Generation BLAS Proposal\" provides a unified naming scheme and semantic requirements for extensions. Inspired by the BLIS project, we also consider a minimal set of microkernels to provide a smaller optimization surface.",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Henry",
"given": "Greg"
},
{
"family": "Demmel",
"given": "James"
},
{
"family": "Gates",
"given": "Mark"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Tang",
"given": "Ping Tak P."
}
],
"id": "2018riedyUpdatedProposalNextgeneration",
"issued": {
"date-parts": [
[
2018,
11
]
]
},
"keyword": "blas,ejr-CV,linear algebra,presentation",
"title": "Updated proposal for a next-generation BLAS",
"type": "manuscript"
},
{
"URL": "http://www.mlgworkshop.org/2018/papers/MLG2018_paper_23.pdf",
"author": [
{
"family": "Yin",
"given": "Chunxing"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Bader",
"given": "David A."
}
],
"container-title": "Proceedings of the 14th international workshop on mining and learning with graphs (MLG)",
"id": "2018yinNewAlgorithmicModel",
"issued": {
"date-parts": [
[
2018,
5
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,proceedings,streaming data",
"title": "A new algorithmic model for graph analysis of streaming data",
"type": "paper-conference"
},
{
"URL": "http://arxiv.org/abs/1809.07696",
"author": [
{
"family": "Young",
"given": "Jeffrey"
},
{
"family": "Hein",
"given": "Eric R."
},
{
"family": "Eswar",
"given": "Srinivas"
},
{
"family": "Lavin",
"given": "Patrick"
},
{
"family": "Li",
"given": "Jiajia"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Vuduc",
"given": "Richard W."
},
{
"family": "Conte",
"given": "Tom"
}
],
"id": "2018youngMicrobenchmarkCharacterizationEmu",
"issued": {
"date-parts": [
[
2018
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "A microbenchmark characterization of the Emu Chick",
"type": "webpage"
},
{
"URL": "https://hpc.pnl.gov/armbof/",
"author": [
{
"family": "Donofrio",
"given": "David"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Austin, TX",
"id": "2019donofrioSpecializingArchitecturesData",
"issued": {
"date-parts": [
[
2019,
9
]
]
},
"keyword": "ejr-CV,presentation",
"note": "Introduction to invited panel on \"We can’t build specialized architectures for graphs that can work efficiently with other workloads, so we just need to hand-optimize each and every algorithm for each and every architecture\"",
"title": "Specializing architectures for data analytics",
"type": "speech"
},
{
"URL": "http://icl.utk.edu/bblas/siam-cse19/",
"author": [
{
"family": "Gates",
"given": "Mark"
},
{
"family": "Demmel",
"given": "James W."
},
{
"family": "Henry",
"given": "Greg"
},
{
"family": "Li",
"given": "Xiaoye S."
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Tang",
"given": "Peter"
}
],
"event-place": "Spokane, WA",
"id": "2019gatesProposalNextgenerationBLAS",
"issued": {
"date-parts": [
[
2019,
2
]
]
},
"keyword": "ejr-CV,presentation",
"title": "A proposal for next-generation BLAS",
"type": "speech"
},
{
"URL": "http://arxiv.org/abs/1901.02775",
"abstract": "The Emu Chick prototype implements migratory memory-side processing in a novel hardware system. Rather than transferring large amounts of data across the system interconnect, the Emu Chick moves lightweight thread contexts to near-memory cores before the beginning of each remote memory read. Previous work has characterized the performance of the Chick prototype in terms of memory bandwidth and programming differences from more typical, non-migratory platforms, but there has not yet been an analysis of algorithms on this system. This work evaluates irregular algorithms that could benefit from the lightweight, memory-side processing of the Chick and demonstrates techniques and optimization strategies for achieving performance in sparse matrix-vector multiply operation (SpMV), breadth-first search (BFS), and graph alignment across up to eight distributed nodes encompassing 64 nodelets in the Chick system. We also define and justify relative metrics to compare prototype FPGA-based hardware with established ASIC architectures. The Chick currently supports up to 68x scaling for graph alignment, 80 MTEPS for BFS on balanced graphs, and 50% of measured STREAM bandwidth for SpMV.",
"author": [
{
"family": "Hein",
"given": "Eric R."
},
{
"family": "Eswar",
"given": "Srinivas"
},
{
"family": "Yasar",
"given": "Abdurrahman"
},
{
"family": "Li",
"given": "Jiajia"
},
{
"family": "Young",
"given": "Jeffrey S."
},
{
"family": "Conte",
"given": "Thomas M."
},
{
"family": "Çatalyürek",
"given": "Ümit V."
},
{
"family": "Vuduc",
"given": "Rich"
},
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Uçar",
"given": "Bora"
}
],
"id": "2019heinProgrammingStrategiesIrregular",
"issued": {
"date-parts": [
[
2019
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "Programming strategies for irregular algorithms on the Emu Chick",
"type": "webpage"
},
{
"ISBN": "978-1-5044-5897-9",
"URL": "https://ieeexplore.ieee.org/servlet/opac?punumber=8739148",
"author": [
{
"literal": "IEEE 754 Committee"
}
],
"genre": "IEEE Std",
"id": "2019ieee754committeeIEEEStandardFloatingpoint",
"issued": {
"date-parts": [
[
2019
]
]
},
"keyword": "754-2008,arithmetic,arithmetic formats,binary,computer,computer programming,decimal,decimal floating-point arithmetic,ejr-CV,exponent,floating point arithmetic,floating-point,format,IEEE standard,IEEE standards,interchange,NaN,number,programming,rounding,significand,subnormal,technical-report",
"note": "(committee member and contributor)",
"number": "754–2019",
"publisher": "Microprocessor Standards Committee of the IEEE Computer Society",
"publisher-place": "New York, NY",
"title": "IEEE standard for floating-point arithmetic",
"type": "legislation"
},
{
"author": [
{
"literal": "Jason Riedy (PI)"
},
{
"literal": "Will Powell"
},
{
"literal": "David Bader"
}
],
"id": "2019jasonriedypiPOWERSystemsData",
"issued": {
"date-parts": [
[
2019,
7
]
]
},
"keyword": "ejr-CV,grants",
"note": "$96 272",
"publisher": "Georgia Institute of Technology",
"title": "POWER systems for data analysis and HPC classes",
"type": "report"
},
{
"DOI": "10.1145/3332186.3332223",
"author": [
{
"family": "Powell",
"given": "Will"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Young",
"given": "Jeffrey S."
},
{
"family": "Conte",
"given": "Tom"
}
],
"container-title": "Practice and experience in advanced research computing (PEARC ’19)",
"id": "2019powellWranglingRoguesCase",
"issued": {
"date-parts": [
[
2019,
7
]
]
},
"keyword": "ejr-CV,proceedings",
"publisher-place": "Chicago, IL",
"title": "Wrangling Rogues: A case study on managing experimental post-Moore architectures",
"title-short": "Wrangling Rogues",
"type": "paper-conference"
},
{
"author": [
{
"family": "Riedy",
"given": "E. Jason"
}
],
"event-place": "Catonsville, MD",
"id": "2019riedyCharacterizationEmuMicrobenchmarks",
"issued": {
"date-parts": [
[
2019,
1
]
]
},
"keyword": "ejr-CV,presentation",
"title": "Characterization of Emu with microbenchmarks",
"type": "speech"
},
{
"URL": "http://www.crnch.gatech.edu/content/siam-cse-2019-go-bananas",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Young",
"given": "Jeffrey"
},
{
"family": "Conte",
"given": "Tom"
}
],
"event-place": "Spokane, WA",
"id": "2019riedyNovelArchitecturesApplications",
"issued": {
"date-parts": [
[
2019,
3
]
]
},
"keyword": "ejr-CV,presentation",
"note": "Minisymposium organizer with Jeffrey Young and Tom Conte.",
"title": "Novel architectures for applications in data science and beyond",
"type": "speech"
},
{
"URL": "https://crnch-rg.gitlab.io/pearc-2019/",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Young",
"given": "Jeffrey S."
}
],
"container-title": "Practice and experience in advanced research computing (PEARC)",
"event-place": "Chicago, IL",
"id": "2019riedyProgrammingNovelArchitectures",
"issued": {
"date-parts": [
[
2019,
7
]
]
},
"keyword": "ejr-CV,tutorial",
"note": "https://crnch-rg.gitlab.io/pearc-2019/",
"title": "Programming novel architectures in the post-Moore era with the Rogues Gallery",
"type": "speech"
},
{
"URL": "https://crnch-rg.gitlab.io/asplos-2019/",
"author": [
{
"family": "Riedy",
"given": "E. Jason"
},
{
"family": "Young",
"given": "Jeffrey S."
}
],
"container-title": "24th ACM international conference on architectural support for programming languages and operating systems (ASPLOS)",
"event-place": "Providence, RI",
"id": "2019riedyProgrammingNovelArchitecturesa",
"issued": {
"date-parts": [
[
2019,
4
]
]
},
"keyword": "ejr-CV,tutorial",
"note": "https://crnch-rg.gitlab.io/asplos-2019/",
"title": "Programming novel architectures in the post-Moore era with the Rogues Gallery",
"type": "speech"
},
{
"URL": "https://www.slideshare.net/jasonriedy/reproducible-linear-algebra-from-application-to-architecture",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Demmel",
"given": "James"
},
{
"family": "Ahrens",
"given": "Peter"
}
],
"event-place": "Valencia, Spain",
"id": "2019riedyReproducibleLinearAlgebra",
"issued": {
"date-parts": [
[
2019,
7
]
]
},
"keyword": "ejr-CV,presentation",
"title": "Reproducible linear algebra from application to architecture",
"type": "speech"
},
{
"DOI": "10.1109/HPEC.2019.8916572",
"author": [
{
"family": "Yin",
"given": "Chunxing"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "The IEEE high performance extreme computing conference (HPEC)",
"id": "2019yinConcurrentKatzCentrality",
"issued": {
"date-parts": [
[
2019,
9
]
]
},
"keyword": "ejr-CV,graph analysis,hpda,parallel algorithm,proceedings",
"publisher-place": "Waltham, MA",
"title": "Concurrent Katz centrality for streaming graphs",
"type": "paper-conference"
},
{
"URL": "https://www.slideshare.net/jasonriedy/a-new-algorithm-model-for-massivescale-streaming-graph-analysis-156808819",
"author": [
{
"family": "Yin",
"given": "Chunxing"
},
{
"family": "Riedy",
"given": "Jason"
}
],
"event-place": "Valencia, Spain",
"id": "2019yinNewAlgorithmModel",
"issued": {
"date-parts": [
[
2019,
7
]
]
},
"keyword": "ejr-CV,presentation",
"title": "A new algorithm model for massive-scale streaming graph analysis",
"type": "speech"
},
{
"DOI": "10.1109/ICRC.2019.8914707",
"author": [
{
"family": "Young",
"given": "Jeffrey"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Conte",
"given": "Tom"
},
{
"family": "Sarkar",
"given": "Vivek"
},
{
"family": "Chatarasi",
"given": "Prasanth"
},
{
"family": "Srikanth",
"given": "Srisehan"
}
],
"container-title": "IEEE international conference on rebooting computing (ICRC19)",
"id": "2019youngExperimentalInsightsRogues",
"issued": {
"date-parts": [
[
2019,
11
]
]
},
"keyword": "ejr-CV,proceedings",
"publisher-place": "San Mateo, CA",
"title": "Experimental insights from the Rogues Gallery testbed",
"type": "paper-conference"
},
{
"DOI": "10.1016/j.parco.2019.04.012",
"abstract": "The Emu Chick is a prototype system designed around the concept of migratory memory-side processing. Rather than transferring large amounts of data across power-hungry, high-latency interconnects, the Emu Chick moves lightweight thread contexts to near-memory cores before the beginning of each memory read. The current prototype hardware uses FPGAs to implement cache-less “Gossamer” cores for doing computational work and a stationary core to run basic operating system functions and migrate threads between nodes. In this multi-node characterization of the Emu Chick, we extend an earlier single-node investigation of the the memory bandwidth characteristics of the system through benchmarks like STREAM, pointer chasing, and sparse matrix-vector multiplication. We compare the Emu Chick hardware to architectural simulation and an Intel Xeon-based platform. Our results demonstrate that for many basic operations the Emu Chick can use available memory bandwidth more efficiently than a more traditional, cache-based architecture although bandwidth usage suffers for computationally intensive workloads like SpMV. Moreover, the Emu Chick provides stable, predictable performance with up to 65% of the peak bandwidth utilization on a random-access pointer chasing benchmark with weak locality.",
"author": [
{
"family": "Young",
"given": "Jeffrey"
},
{
"family": "Hein",
"given": "Eric"
},
{
"family": "Eswar",
"given": "Srinivas"
},
{
"family": "Lavin",
"given": "Patrick"
},
{
"family": "Li",
"given": "Jiajia"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Vuduc",
"given": "Richard"
},
{
"family": "Conte",
"given": "Thomas M."
}
],
"container-title": "Parallel Computing",
"id": "2019youngMicrobenchmarkCharacterizationEmu",
"issued": {
"date-parts": [
[
2019,
9
]
]
},
"keyword": "ejr-CV,refereed",
"title": "A microbenchmark characterization of the Emu Chick",
"type": "article-journal"
},
{
"DOI": "10.1145/3418077",
"ISSN": "2329-4949",
"abstract": "The Emu Chick prototype implements migratory memory-side processing in a novel hardware system. Rather than transferring large amounts of data across the system interconnect, the Emu Chick moves lightweight thread contexts to near-memory cores before the beginning of each remote memory read. Previous work has characterized the performance of the Chick prototype in terms of memory bandwidth and programming differences from more typical, non-migratory platforms, but there has not yet been an analysis of algorithms on this system.This work evaluates irregular algorithms that could benefit from the lightweight, memory-side processing of the Chick and demonstrates techniques and optimization strategies for achieving performance in sparse matrix-vector multiply operation (SpMV), breadth-first search (BFS), and graph alignment across up to eight distributed nodes encompassing 64 nodelets in the Chick system. We also define and justify relative metrics to compare prototype FPGA-based hardware with established ASIC architectures. The Chick currently supports up to 68x scaling for graph alignment, 80 MTEPS for BFS on balanced graphs, and 50% of measured STREAM bandwidth for SpMV.",
"author": [
{
"family": "Hein",
"given": "Eric R."
},
{
"family": "Eswar",
"given": "Srinivas"
},
{
"family": "Yaşar",
"given": "Abdurrahman"
},
{
"family": "Li",
"given": "Jiajia"
},
{
"family": "Young",
"given": "Jeffrey S."
},
{
"family": "Conte",
"given": "Thomas M."
},
{
"family": "Çatalyürek",
"given": "Ümit V."
},
{
"family": "Vuduc",
"given": "Richard"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Uçar",
"given": "Bora"
}
],
"container-title": "ACM Transactions on Parallel Computing",
"container-title-short": "ACM Trans. Parallel Comput.",
"id": "2020heinProgrammingStrategiesIrregular",
"issue": "4",
"issued": {
"date-parts": [
[
2020,
10
]
]
},
"keyword": "ejr-CV,EMU architecture,refereed",
"publisher": "Association for Computing Machinery",
"publisher-place": "New York, NY, USA",
"title": "Programming strategies for irregular algorithms on the Emu Chick",
"type": "article-journal",
"volume": "7"
},
{
"author": [
{
"literal": "Jeffery Young (PI)"
},
{
"literal": "Hyesoon Kim"
},
{
"literal": "Jason Riedy"
},
{
"literal": "Lee Lerner"
}
],
"id": "2020jefferyyoungpiReconfigurableClusterInitiative",
"issued": {
"date-parts": [
[
2020,
7
]
]
},
"keyword": "ejr-CV,grants",
"note": "$74 905",
"publisher": "Georgia Institute of Technology",
"title": "Reconfigurable cluster initiative",
"type": "report"
},
{
"author": [
{
"literal": "Jeffrey Young (PI)"
},
{
"literal": "Jennifer Hasler"
},
{
"literal": "Ada Gavrilovska"
},
{
"literal": "Thomas Conte"
},
{
"literal": "Jason Riedy"
}
],
"id": "2020jeffreyyoungpiCCRIMediumRogues",
"issued": {
"date-parts": [
[
2020,
9
]
]
},
"keyword": "ejr-CV,grants",
"note": "$1 351 699",
"number": "NSF 2016701",
"publisher": "Georgia Institute of Technology",
"title": "CCRI: Medium: Rogues gallery: A community research infrastructure for post-moore computing",
"title-short": "CCRI",
"type": "report"
},
{
"DOI": "10.1145/3422575.3422794",
"URL": "http://dx.doi.org/10.1145/3422575.3422794",
"author": [
{
"family": "Lavin",
"given": "Patrick"
},
{
"family": "Young",
"given": "Jeffrey"
},
{
"family": "Vuduc",
"given": "Richard"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Vose",
"given": "Aaron"
},
{
"family": "Ernst",
"given": "Daniel"
}
],
"container-title": "The International Symposium on Memory Systems (MEMSYS)",
"id": "2020lavinEvaluatingGatherScatter",
"issued": {
"date-parts": [
[
2020,
9
]
]
},
"keyword": "ejr-CV,proceedings",
"publisher": "ACM",
"publisher-place": "Washington, DC",
"title": "Evaluating gather and scatter performance on cpus and gpus",
"type": "article-journal"
},
{
"URL": "https://www2.slideshare.net/jasonriedy/graph-analysis-and-novel-architectures",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"id": "2020riedyGraphAnalysisNovel",
"issued": {
"date-parts": [
[
2020,
9
]
]
},
"keyword": "ejr-CV,presentation",
"title": "Graph analysis and novel architectures",
"type": "manuscript"
},
{
"URL": "https://www2.slideshare.net/jasonriedy/graphblas-and-emus",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"id": "2020riedyGraphBLASEmus",
"issued": {
"date-parts": [
[
2020,
9
]
]
},
"keyword": "ejr-CV,presentation",
"title": "GraphBLAS and Emus",
"type": "manuscript"
},
{
"URL": "https://icerm.brown.edu/materials/Slides/htw-20-vp/Potential_Directions_for_Moving_IEEE-754_Forward_%5D_Jason_Riedy,_Georgia_Institute_of_Technology.pdf",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"id": "2020riedyPotentialDirectionsMoving",
"issued": {
"date-parts": [
[
2020,
5
]
]
},
"keyword": "ejr-CV,presentation",
"title": "Potential directions for moving IEEE-754 forward",
"type": "manuscript"
},
{
"URL": "https://www.slideshare.net/jasonriedy/reproducible-linear-algebra-from-application-to-architecture-228263588",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Demmel",
"given": "James"
},
{
"family": "Ahrens",
"given": "Peter"
}
],
"event-place": "Seattle, WA",
"id": "2020riedyReproducibleLinearAlgebra",
"issued": {
"date-parts": [
[
2020,
2
]
]
},
"keyword": "ejr-CV,presentation",
"title": "Reproducible linear algebra from application to architecture",
"type": "speech"
},
{
"author": [
{
"literal": "Martin Deneroff (PI)"
},
{
"literal": "Jason Riedy (coPI)"
}
],
"id": "2021martindeneroffpiNonblockingUpdatesGraph",
"issued": {
"date-parts": [
[
2021,
8
]
]
},
"keyword": "ejr-CV,grants",
"note": "$255 916",
"number": "NSF 2105977",
"publisher": "Lucata Corporation",
"title": "Non-blocking updates to graph databases",
"type": "report"
},
{
"URL": "https://www.slideshare.net/jasonriedy/lagraph-20211013",
"author": [
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Kuntz",
"given": "Shannon"
}
],
"id": "2021riedyLightningTalksUpdates",
"issued": {
"date-parts": [
[
2021,
10
]
]
},
"keyword": "ejr-CV,presentation",
"title": "Lightning talks: Updates/news from the GraphBLAS implementers",
"title-short": "Lightning talks",
"type": "manuscript"
},
{
"URL": "https://www.slideshare.net/jasonriedy/lucata-at-the-hpec-graphblas-bof-250439305",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"id": "2021riedyLightningTalksUpdatesa",
"issued": {
"date-parts": [
[
2021,
9
]
]
},
"keyword": "ejr-CV,presentation",
"title": "Lightning talks: Updates/news from the GraphBLAS implementers",
"title-short": "Lightning talks",
"type": "manuscript"
},
{
"URL": "http://arxiv.org/abs/2207.09281v1",
"abstract": "Numerical exceptions, which may be caused by overflow, operations like division by 0 or sqrt(-1), or convergence failures, are unavoidable in many cases, in particular when software is used on unforeseen and difficult inputs. As more aspects of society become automated, e.g., self-driving cars, health monitors, and cyber-physical systems more generally, it is becoming increasingly important to design software that is resilient to exceptions, and that responds to them in a consistent way. Consistency is needed to allow users to build higher-level software that is also resilient and consistent (and so on recursively). In this paper we explore the design space of consistent exception handling for the widely used BLAS and LAPACK linear algebra libraries, pointing out a variety of instances of inconsistent exception handling in the current versions, and propose a new design that balances consistency, complexity, ease of use, and performance. Some compromises are needed, because there are preexisting inconsistencies that are outside our control, including in or between existing vendor BLAS implementations, different programming languages, and even compilers for the same programming language. And user requests from our surveys are quite diverse. We also propose our design as a possible model for other numerical software, and welcome comments on our design choices.",
"author": [
{
"family": "Demmel",
"given": "James"
},
{
"family": "Dongarra",
"given": "Jack"
},
{
"family": "Gates",
"given": "Mark"
},
{
"family": "Henry",
"given": "Greg"
},
{
"family": "Langou",
"given": "Julien"
},
{
"family": "Li",
"given": "Xiaoye"
},
{
"family": "Luszczek",
"given": "Piotr"
},
{
"family": "Pereira",
"given": "Weslley"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Rubio-González",
"given": "Cindy"
}
],
"id": "2022demmelProposedConsistentException",
"issued": {
"date-parts": [
[
2022
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "Proposed consistent exception handling for the BLAS and LAPACK",
"type": "webpage"
},
{
"URL": "https://bostonarch.github.io/2022/BARC23.pdf",
"author": [
{
"family": "Riedy",
"given": "Jason"
}
],
"container-title": "Boston area architecture workshop (BARC)",
"id": "2022riedyProgrammingLucataDatafirst",
"issued": {
"date-parts": [
[
2022,
1
]
]
},
"keyword": "ejr-CV,invited-presentation",
"note": "KeynoteKeynote",
"title": "Programming on the Lucata data-first architecture",
"type": "manuscript"
},
{
"URL": "http://arxiv.org/abs/2209.11889v1",
"abstract": "High-performance analysis of unstructured data like graphs now is critical for applications ranging from business intelligence to genome analysis. Towards this, data centers hold large graphs in memory to serve multiple concurrent queries from different users. Even a single analysis often explores multiple options. Current computing architectures often are not the most time- or energy-efficient solutions. The novel Lucata Pathfinder architecture tackles this problem, combining migratory threads for low-latency reading with memory-side processing for high-performance accumulation. One hundred to 750 concurrent breadth-first searches (BFS) all achieve end-to-end speed-ups of 81 % to 97 % over one-at-a-time queries on a graph with 522M edges. Comparing to RedisGraph running on a large Intel-based server, the Pathfinder achieves a 19× speed-up running 128 BFS queries concurrently. The Pathfinder also efficiently supports a mix of concurrent analyses, demonstrated with connected components and BFS.",
"author": [
{
"family": "Smith",
"given": "Emory"
},
{
"family": "Kuntz",
"given": "Shannon"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Deneroff",
"given": "Martin"
}
],
"id": "2022smithConcurrentGraphQueries",
"issued": {
"date-parts": [
[
2022
]
]
},
"keyword": "ejr-CV,technical-report",
"status": "pre-published",
"title": "Concurrent graph queries on the Lucata Pathfinder",
"type": "webpage"
},
{
"URL": "https://github.com/gt-crnch-rg/lucata-pathfinder-tutorial",
"author": [
{
"family": "Young",
"given": "Jeffrey"
},
{
"family": "Lavin",
"given": "Patrick"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Eswar",
"given": "Srinivas"
}
],
"container-title": "IEEE high performance extreme computing (HPEC)",
"id": "2022youngExploringGraphAnalysis",
"issued": {
"date-parts": [
[
2022,
9
]
]
},
"keyword": "ejr-CV,tutorial",
"note": "https://crnch-rg.gitlab.io/pearc-2019/,",
"title": "Exploring graph analysis for HPC with near-memory accelerators",
"type": "manuscript"
},
{
"URL": "https://arxiv.org/abs/2411.13259",
"abstract": "The standardization of an interface for dense linear algebra operations in the BLAS standard has enabled interoperability between different linear algebra libraries, thereby boosting the success of scientific computing, in particular in scientific HPC. Despite numerous efforts in the past, the community has not yet agreed on a standardization for sparse linear algebra operations due to numerous reasons. One is the fact that sparse linear algebra objects allow for many different storage formats, and different hardware may favor different storage formats. This makes the definition of a FORTRAN-style all-circumventing interface extremely challenging. Another reason is that opposed to dense linear algebra functionality, in sparse linear algebra, the size of the sparse data structure for the operation result is not always known prior to the information. Furthermore, as opposed to the standardization effort for dense linear algebra, we are late in the technology readiness cycle, and many production-ready software libraries using sparse linear algebra routines have implemented and committed to their own sparse BLAS interface. At the same time, there exists a demand for standardization that would improve interoperability, and sustainability, and allow for easier integration of building blocks. In an inclusive, cross-institutional effort involving numerous academic institutions, US National Labs, and industry, we spent two years designing a hardware-portable interface for basic sparse linear algebra functionality that serves the user needs and is compatible with the different interfaces currently used by different vendors. In this paper, we present a C++ API for sparse linear algebra functionality, discuss the design choices, and detail how software developers preserve a lot of freedom in terms of how to implement functionality behind this API.",
"author": [
{
"family": "Abdelfattah",
"given": "Ahmad"
},
{
"family": "Ahrens",
"given": "Willow"
},
{
"family": "Anzt",
"given": "Hartwig"
},
{
"family": "Armstrong",
"given": "Chris"
},
{
"family": "Brock",
"given": "Ben"
},
{
"family": "Buluc",
"given": "Aydin"
},
{
"family": "Busato",
"given": "Federico"
},
{
"family": "Cojean",
"given": "Terry"
},
{
"family": "Davis",
"given": "Tim"
},
{
"family": "Demmel",
"given": "Jim"
},
{
"family": "Dinh",
"given": "Grace"
},
{
"family": "Gardener",
"given": "David"
},
{
"family": "Fiala",
"given": "Jan"
},
{
"family": "Gates",
"given": "Mark"
},
{
"family": "Haider",
"given": "Azzam"
},
{
"family": "Imamura",
"given": "Toshiyuki"
},
{
"family": "Lara",
"given": "Pedro Valero"
},
{
"family": "Moreira",
"given": "Jose"
},
{
"family": "Li",
"given": "Sherry"
},
{
"family": "Luszczek",
"given": "Piotr"
},
{
"family": "Melichenko",
"given": "Max"
},
{
"family": "Moeira",
"given": "Jose"
},
{
"family": "Mokwinski",
"given": "Yvan"
},
{
"family": "Murray",
"given": "Riley"
},
{
"family": "Patty",
"given": "Spencer"
},
{
"family": "Peles",
"given": "Slaven"
},
{
"family": "Ribizel",
"given": "Tobias"
},
{
"family": "Riedy",
"given": "Jason"
},
{
"family": "Rajamanickam",
"given": "Siva"
},
{
"family": "Sao",
"given": "Piyush"
},
{
"family": "Shantharam",
"given": "Manu"
},
{
"family": "Teranishi",
"given": "Keita"
},
{
"family": "Tomov",
"given": "Stan"
},
{
"family": "Tsai",
"given": "Yu-Hsiang"
},
{
"family": "Weichelt",
"given": "Heiko"
}
],
"id": "2024abdelfattahInterfaceSparseLinear",
"issued": {
"date-parts": [
[
2024
]
]
},
"keyword": "ejr-CV,technical-report",
"title": "Interface for sparse linear algebra operations",
"type": ""
},
{
"URL": "https://www.google.com/search?q=wikimedia+%22Jason+Riedy%22",
"author": [
{
"literal": "Wikimedia"
}
],
"id": "wikimediaVariousUses",
"keyword": "ejr-CV,mention",
"title": "Various uses",
"type": "webpage"
}
]