Skip to content

Commit 70869ec

Browse files
authored
Update papers.bib
1 parent 3c80a38 commit 70869ec

File tree

1 file changed

+20
-6
lines changed

1 file changed

+20
-6
lines changed

_bibliography/papers.bib

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,34 @@ @inproceedings{liang2025granular
88
}
99
1010
@inproceedings{namyar2025mitigation,
11-
title={Enhancing Network Failure Mitigation with Performance-Aware Ranking},
12-
author={Namyar, Pooria and Ghavidel, Arvin and Crankshaw, Daniel and Berger, Daniel S and Hsieh, Kevin and Kandula, Srikanth and Govindan, Ramesh and Arzani, Behnaz},
11+
author = {Pooria Namyar and Arvin Ghavidel and Daniel Crankshaw and Daniel S. Berger and Kevin Hsieh and Srikanth Kandula and Ramesh Govindan and Behnaz Arzani},
12+
title = {Enhancing Network Failure Mitigation with {Performance-Aware} Ranking},
1313
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
14-
year={2025},
14+
year = {2025},
15+
isbn = {978-1-939133-46-5},
16+
address = {Philadelphia, PA},
17+
pages = {335--357},
18+
url = {https://www.usenix.org/conference/nsdi25/presentation/namyar},
19+
publisher = {USENIX Association},
20+
month = apr,
1521
abbr={NSDI},
1622
selected = {yes},
23+
abstract={Cloud providers install mitigations to reduce the impact of network failures within their datacenters. Existing network mitigation systems rely on simple local criteria or global proxy metrics to determine the best action. In this paper, we show that we can support a broader range of actions and select more effective mitigations by directly optimizing end-to-end flow-level metrics and analyzing actions holistically. To achieve this, we develop novel techniques to quickly estimate the impact of different mitigations and rank them with high fidelity. Our results on incidents from a large cloud provider show orders of magnitude improvements in flow completion time and throughput. We also show our approach scales to large datacenters.}
1724
}
1825
1926
@inproceedings{alcoz2025packs,
20-
title={Everything Matters in Programmable Packet Scheduling},
21-
author={Alcoz, Albert Gran and Vass, Balázs and Namyar, Pooria and Arzani, Behnaz and Rétvári, Gábor and Vanbever, Laurent},
27+
author = {Albert Gran Alcoz and Bal{\'a}zs Vass and Pooria Namyar and Behnaz Arzani and Gabor Retvari and Laurent Vanbever},
28+
title = {Everything Matters in Programmable Packet Scheduling},
2229
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
23-
year={2025},
30+
year = {2025},
31+
isbn = {978-1-939133-46-5},
32+
address = {Philadelphia, PA},
33+
pages = {1467--1485},
34+
url = {https://www.usenix.org/conference/nsdi25/presentation/alcoz},
35+
publisher = {USENIX Association},
36+
month = apr,
2437
abbr={NSDI},
38+
abstract={Operators can deploy any scheduler they desire on existing switches through programmable packet schedulers: they tag packets with ranks (which indicate their priority) and schedule them in the order of these ranks. The ideal programmable scheduler is the Push-In First-Out (PIFO) queue, which schedules packets in a perfectly sorted order by “pushing” packets into any position of the queue based on their ranks. However, it is hard to implement PIFO queues in hardware due to their need to sort packets at line rate (based on their ranks). Recent proposals approximate PIFO behaviors on existing data-planes. While promising, they fail to simultaneously capture both of the necessary behaviors of PIFO queues: their scheduling behavior and admission control. We introduce PACKS, an approximate PIFO scheduler that addresses this problem. PACKS runs on top of a set of priority queues and uses packet-rank information and queue-occupancy levels during enqueue to determine whether to admit each incoming packet and to which queue it should be mapped. We fully implement PACKS in P4 and evaluate it on real workloads. We show that PACKS better approximates PIFO than state-of-the-art approaches. Specifically, PACKS reduces the rank inversions by up to 7× and 15× with respect to SP-PIFO and AIFO, and the number of packet drops by up to 60% compared to SP-PIFO. Under pFabric ranks, PACKS reduces the mean FCT across small flows by up to 33% and 2.6×, compared to SP-PIFO and AIFO. We also show that PACKS runs at line rate on existing hardware (Intel Tofino).}
2539
}
2640
2741
@inproceedings{quicksand-nsdi25,

0 commit comments

Comments
 (0)