You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
author = {Ruan, Zhenyuan and Li, Shihang and Fan, Kaiyan and Aguilera, Marcos K. and Belay, Adam and Park, Seo Jin and Schwarzkopf, Malte},
20
20
title = {Quicksand: Harnessing Stranded Datacenter Resources with Granular Computing},
21
21
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
22
-
year = 2025,
22
+
year = {2025},
23
23
abbr = {NSDI},
24
24
selected = {yes}
25
25
}
26
26
27
+
@inproceedings{shin2024recap,
28
+
title={RECAP: 3D Traffic Reconstruction},
29
+
author={Christina Shin and Weiwu Pang and Chuan Li and Fan Bai and Fawad Ahmad and Jeongyeup Paek and Ramesh Govindan},
30
+
booktitle={Proceedings of the 30th Annual International Conference on Mobile Computing and Networking (MobiCom 24)},
31
+
abbr={MobiCom},
32
+
abstract={On-vehicle 3D sensing technologies, such as LiDARs and stereo cameras, enable a novel capability, 3D traffic reconstruction. This produces a volumetric video consisting of a sequence of 3D frames capturing the time evolution of road traffic. 3D traffic reconstruction can help trained investigators reconstruct the scene of an accident. In this paper, we describe the design and implementation of RECAP, a system that continuously and opportunistically produces 3D traffic reconstructions from multiple vehicles. RECAP builds upon prior work on point cloud registration, but adapts it to settings with minimal point cloud overlap (both in the spatial and temporal sense) and develops techniques to minimize error and computation time in multi-way registration. On-road experiments and trace-driven simulations show that RECAP can, within minutes, generate highly accurate reconstructions that have 2× or more lower errors than competing approaches.},
33
+
selected ={yes}
34
+
}
35
+
27
36
@inproceedings{namyar2024learning,
28
37
title={End-to-End Performance Analysis of Learning-enabled Systems},
29
38
author={Namyar, Pooria and Schapira, Michael and Govindan, Ramesh and Segarra, Santiago and Beckett, Ryan and Kakarla, Siva Kesava Reddy and Arzani, Behnaz},
Copy file name to clipboardExpand all lines: _bibliography/people/christina.bib
+16-17Lines changed: 16 additions & 17 deletions
Original file line number
Diff line number
Diff line change
@@ -1,24 +1,23 @@
1
-
@inproceedings{recap,
2
-
doi = {},
3
-
url = {},
4
-
title = {RECAP: 3D Traffic Reconstruction},
1
+
@inproceedings{shin2024recap,
2
+
doi={10.1145/3636534.3690691},
3
+
url={},
4
+
title={RECAP: 3D Traffic Reconstruction},
5
5
author={Christina Shin and Weiwu Pang and Chuan Li and Fan Bai and Fawad Ahmad and Jeongyeup Paek and Ramesh Govindan},
6
-
booktitle={International Conference on Mobile Computing and Networking (MobiCom)},
7
-
abbr={MobiCom},
6
+
booktitle={Proceedings of the 30th Annual International Conference on Mobile Computing and Networking (MobiCom)},
8
7
volume={},
9
8
number={},
10
9
pages={},
11
10
year={2024},
12
11
publisher={ACM},
13
-
abstract={}
12
+
abstract={On-vehicle 3D sensing technologies, such as LiDARs and stereo cameras, enable a novel capability, 3D traffic reconstruction. This produces a volumetric video consisting of a sequence of 3D frames capturing the time evolution of road traffic. 3D traffic reconstruction can help trained investigators reconstruct the scene of an accident. In this paper, we describe the design and implementation of RECAP, a system that continuously and opportunistically produces 3D traffic reconstructions from multiple vehicles. RECAP builds upon prior work on point cloud registration, but adapts it to settings with minimal point cloud overlap (both in the spatial and temporal sense) and develops techniques to minimize error and computation time in multi-way registration. On-road experiments and trace-driven simulations show that RECAP can, within minutes, generate highly accurate reconstructions that have 2× or more lower errors than competing approaches.}
author={Fawad Ahmad and Christina Shin and Weiwu Pang and Branden Leong and Pradipta Ghosh and Ramesh Govindan},
21
-
booktitle={ACM/IEEE Conference on Internet of Things Design and Implementation},
20
+
booktitle={ACM/IEEE Conference on Internet of Things Design and Implementation (IoTDI)},
22
21
abbr={IoTDI},
23
22
volume={},
24
23
number={},
@@ -28,12 +27,12 @@ @inproceedings{cip
28
27
abstract={Recent works have considered two qualitatively different approaches to overcome line-of-sight limitations of 3D sensors used for perception: cooperative perception and infrastructure-augmented perception. In this paper, motivated by increasing deployments of infrastructure LiDARs, we explore a third approach – cooperative infrastructure perception. This approach generates perception outputs by fusing outputs of multiple infrastructure sensors, but, to be useful, must do so quickly and accurately. We describe the design, implementation and evaluation of Cooperative Infrastructure Perception (CIP), which uses a combination of novel algorithms and systems optimizations. It produces perception outputs within 100 ms using modest computing resources and with accuracy comparable to the state-of-the-art. CIP, when used to augment vehicle perception, can improve safety. When used in conjunction with offloaded planning, CIP can increase traffic throughput at intersections.}
0 commit comments