Skip to content

Commit 9dcc786

Browse files
Update christina
1 parent 35ca890 commit 9dcc786

File tree

5 files changed

+28
-20
lines changed

5 files changed

+28
-20
lines changed

_bibliography/papers.bib

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,20 @@ @inproceedings{quicksand-nsdi25
1919
author = {Ruan, Zhenyuan and Li, Shihang and Fan, Kaiyan and Aguilera, Marcos K. and Belay, Adam and Park, Seo Jin and Schwarzkopf, Malte},
2020
title = {Quicksand: Harnessing Stranded Datacenter Resources with Granular Computing},
2121
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
22-
year = 2025,
22+
year = {2025},
2323
abbr = {NSDI},
2424
selected = {yes}
2525
}
2626
27+
@inproceedings{shin2024recap,
28+
title={RECAP: 3D Traffic Reconstruction},
29+
author={Christina Shin and Weiwu Pang and Chuan Li and Fan Bai and Fawad Ahmad and Jeongyeup Paek and Ramesh Govindan},
30+
booktitle={Proceedings of the 30th Annual International Conference on Mobile Computing and Networking (MobiCom 24)},
31+
abbr={MobiCom},
32+
abstract={On-vehicle 3D sensing technologies, such as LiDARs and stereo cameras, enable a novel capability, 3D traffic reconstruction. This produces a volumetric video consisting of a sequence of 3D frames capturing the time evolution of road traffic. 3D traffic reconstruction can help trained investigators reconstruct the scene of an accident. In this paper, we describe the design and implementation of RECAP, a system that continuously and opportunistically produces 3D traffic reconstructions from multiple vehicles. RECAP builds upon prior work on point cloud registration, but adapts it to settings with minimal point cloud overlap (both in the spatial and temporal sense) and develops techniques to minimize error and computation time in multi-way registration. On-road experiments and trace-driven simulations show that RECAP can, within minutes, generate highly accurate reconstructions that have 2× or more lower errors than competing approaches.},
33+
selected ={yes}
34+
}
35+
2736
@inproceedings{namyar2024learning,
2837
title={End-to-End Performance Analysis of Learning-enabled Systems},
2938
author={Namyar, Pooria and Schapira, Michael and Govindan, Ramesh and Segarra, Santiago and Beckett, Ryan and Kakarla, Siva Kesava Reddy and Arzani, Behnaz},

_bibliography/people/christina.bib

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,23 @@
1-
@inproceedings{recap,
2-
doi = {},
3-
url = {},
4-
title = {RECAP: 3D Traffic Reconstruction},
1+
@inproceedings{shin2024recap,
2+
doi={10.1145/3636534.3690691},
3+
url={},
4+
title={RECAP: 3D Traffic Reconstruction},
55
author={Christina Shin and Weiwu Pang and Chuan Li and Fan Bai and Fawad Ahmad and Jeongyeup Paek and Ramesh Govindan},
6-
booktitle={International Conference on Mobile Computing and Networking (MobiCom)},
7-
abbr={MobiCom},
6+
booktitle={Proceedings of the 30th Annual International Conference on Mobile Computing and Networking (MobiCom)},
87
volume={},
98
number={},
109
pages={},
1110
year={2024},
1211
publisher={ACM},
13-
abstract={}
12+
abstract={On-vehicle 3D sensing technologies, such as LiDARs and stereo cameras, enable a novel capability, 3D traffic reconstruction. This produces a volumetric video consisting of a sequence of 3D frames capturing the time evolution of road traffic. 3D traffic reconstruction can help trained investigators reconstruct the scene of an accident. In this paper, we describe the design and implementation of RECAP, a system that continuously and opportunistically produces 3D traffic reconstructions from multiple vehicles. RECAP builds upon prior work on point cloud registration, but adapts it to settings with minimal point cloud overlap (both in the spatial and temporal sense) and develops techniques to minimize error and computation time in multi-way registration. On-road experiments and trace-driven simulations show that RECAP can, within minutes, generate highly accurate reconstructions that have 2× or more lower errors than competing approaches.}
1413
}
1514

16-
@inproceedings{cip,
17-
doi = {10.1109/IoTDI61053.2024.00010},
18-
url = {https://doi.ieeecomputersociety.org/10.1109/IoTDI61053.2024.00010},
19-
title = {Cooperative Infrastructure Perception},
15+
@inproceedings{ahmad2024cip,
16+
doi={10.1109/IoTDI61053.2024.00010},
17+
url={https://doi.ieeecomputersociety.org/10.1109/IoTDI61053.2024.00010},
18+
title={Cooperative Infrastructure Perception},
2019
author={Fawad Ahmad and Christina Shin and Weiwu Pang and Branden Leong and Pradipta Ghosh and Ramesh Govindan},
21-
booktitle={ACM/IEEE Conference on Internet of Things Design and Implementation},
20+
booktitle={ACM/IEEE Conference on Internet of Things Design and Implementation (IoTDI)},
2221
abbr={IoTDI},
2322
volume={},
2423
number={},
@@ -28,12 +27,12 @@ @inproceedings{cip
2827
abstract={Recent works have considered two qualitatively different approaches to overcome line-of-sight limitations of 3D sensors used for perception: cooperative perception and infrastructure-augmented perception. In this paper, motivated by increasing deployments of infrastructure LiDARs, we explore a third approach – cooperative infrastructure perception. This approach generates perception outputs by fusing outputs of multiple infrastructure sensors, but, to be useful, must do so quickly and accurately. We describe the design, implementation and evaluation of Cooperative Infrastructure Perception (CIP), which uses a combination of novel algorithms and systems optimizations. It produces perception outputs within 100 ms using modest computing resources and with accuracy comparable to the state-of-the-art. CIP, when used to augment vehicle perception, can improve safety. When used in conjunction with offloaded planning, CIP can increase traffic throughput at intersections.}
2928
}
3029

31-
@inproceedings{aerotraj,
32-
doi = {10.1145/3610911},
33-
url = {https://dl.acm.org/doi/abs/10.1145/3610911},
34-
title = {AeroTraj: Trajectory Planning for Fast, and Accurate 3D Reconstruction Using a Drone-based LiDAR},
30+
@inproceedings{ahmad2023aerotraj,
31+
doi={10.1145/3610911},
32+
url={https://dl.acm.org/doi/abs/10.1145/3610911},
33+
title={AeroTraj: Trajectory Planning for Fast, and Accurate 3D Reconstruction Using a Drone-based LiDAR},
3534
author={Fawad Ahmad and Christina Shin and Rajrup Ghosh and John D'Ambrosio and Eugene Chai and Karthikeyan Sundaresan and Ramesh Govindan},
36-
booktitle={Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies},
35+
booktitle={Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (Ubicomp/IMWUT)},
3736
abbr={Ubicomp/IMWUT},
3837
volume={7},
3938
number={3},

_data/people/christina.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ github: https://github.com/christina-shin
66
linkedin: https://www.linkedin.com/in/christina-shin-9477a31a1
77
twitter: https://twitter.com/christina3_3
88
scholar: https://scholar.google.com/citations?user=6VwIYFwAAAAJ&hl=en
9-
cv: Christina_CV.pdf
9+
cv: 2024-10-01-Christina-CV.pdf
1010
education:
1111
- (2019-Present) Ph.D. in Computer Science, University of Southern California, Los Angeles, USA.
1212
- (2017-2019) M.S. in Computer Science and Engineering, Ewha Womans University, Seoul, South Korea.

_pages/people/christina.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ I am a fifth year Ph.D. student in <a href="https://www.cs.usc.edu/">Computer Sc
4444
</p>
4545

4646
<p>
47-
<em><strong>Research Intern (May 2021 - Aug 2021, May 2024 - Present)</strong></em><br>
47+
<em><strong>Research Intern (May 2021 - Aug 2021, May 2024 - Aug 2024)</strong></em><br>
4848
General Motors Research and Development, Warren, USA.<br>
4949
Mentor: Chuan Li and Fan Bai<br>
5050
</p>
Binary file not shown.

0 commit comments

Comments
 (0)