Skip to content

Commit 8414ed0

Browse files
2 parents 7c6a1d7 + ec25620 commit 8414ed0

File tree

13 files changed

+313
-22
lines changed

13 files changed

+313
-22
lines changed

.github/workflows/deploy.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
with:
2222
ruby-version: '3.0.6'
2323
- name: Enable bundler cache
24-
uses: actions/cache@v2
24+
uses: actions/cache@v4
2525
with:
2626
path: vendor/bundle
2727
key: ${{ runner.os }}-gems-${{ hashFiles('**/Gemfile.lock') }}

.gitignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,5 @@ _site
88
.tweet-cache
99
Gemfile.lock
1010
vendor
11-
*workspace
11+
*workspace
12+
.DS_Store

_bibliography/papers.bib

Lines changed: 31 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,41 @@
1+
@inproceedings{liang2025granular,
2+
title={Granular Resource Demand Heterogeneity},
3+
author={Liang, Yizhuo and Govindan, Ramesh and Park, Seo Jin},
4+
booktitle={Proceedings of the 20th Workshop on Hot Topics in Operating Systems},
5+
year={2025},
6+
abbr={HotOS},
7+
selected = {yes},
8+
}
9+
110
@inproceedings{namyar2025mitigation,
2-
title={Enhancing Network Failure Mitigation with Performance-Aware Ranking},
3-
author={Namyar, Pooria and Ghavidel, Arvin and Crankshaw, Daniel and Berger, Daniel S and Hsieh, Kevin and Kandula, Srikanth and Govindan, Ramesh and Arzani, Behnaz},
11+
author = {Pooria Namyar and Arvin Ghavidel and Daniel Crankshaw and Daniel S. Berger and Kevin Hsieh and Srikanth Kandula and Ramesh Govindan and Behnaz Arzani},
12+
title = {Enhancing Network Failure Mitigation with {Performance-Aware} Ranking},
413
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
5-
year={2025},
14+
year = {2025},
15+
isbn = {978-1-939133-46-5},
16+
address = {Philadelphia, PA},
17+
pages = {335--357},
18+
url = {https://www.usenix.org/conference/nsdi25/presentation/namyar},
19+
publisher = {USENIX Association},
20+
month = apr,
621
abbr={NSDI},
722
selected = {yes},
23+
abstract={Cloud providers install mitigations to reduce the impact of network failures within their datacenters. Existing network mitigation systems rely on simple local criteria or global proxy metrics to determine the best action. In this paper, we show that we can support a broader range of actions and select more effective mitigations by directly optimizing end-to-end flow-level metrics and analyzing actions holistically. To achieve this, we develop novel techniques to quickly estimate the impact of different mitigations and rank them with high fidelity. Our results on incidents from a large cloud provider show orders of magnitude improvements in flow completion time and throughput. We also show our approach scales to large datacenters.}
824
}
925
1026
@inproceedings{alcoz2025packs,
11-
title={Everything Matters in Programmable Packet Scheduling},
12-
author={Alcoz, Albert Gran and Vass, Balázs and Namyar, Pooria and Arzani, Behnaz and Rétvári, Gábor and Vanbever, Laurent},
27+
author = {Albert Gran Alcoz and Bal{\'a}zs Vass and Pooria Namyar and Behnaz Arzani and Gabor Retvari and Laurent Vanbever},
28+
title = {Everything Matters in Programmable Packet Scheduling},
1329
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
14-
year={2025},
30+
year = {2025},
31+
isbn = {978-1-939133-46-5},
32+
address = {Philadelphia, PA},
33+
pages = {1467--1485},
34+
url = {https://www.usenix.org/conference/nsdi25/presentation/alcoz},
35+
publisher = {USENIX Association},
36+
month = apr,
1537
abbr={NSDI},
38+
abstract={Operators can deploy any scheduler they desire on existing switches through programmable packet schedulers: they tag packets with ranks (which indicate their priority) and schedule them in the order of these ranks. The ideal programmable scheduler is the Push-In First-Out (PIFO) queue, which schedules packets in a perfectly sorted order by “pushing” packets into any position of the queue based on their ranks. However, it is hard to implement PIFO queues in hardware due to their need to sort packets at line rate (based on their ranks). Recent proposals approximate PIFO behaviors on existing data-planes. While promising, they fail to simultaneously capture both of the necessary behaviors of PIFO queues: their scheduling behavior and admission control. We introduce PACKS, an approximate PIFO scheduler that addresses this problem. PACKS runs on top of a set of priority queues and uses packet-rank information and queue-occupancy levels during enqueue to determine whether to admit each incoming packet and to which queue it should be mapped. We fully implement PACKS in P4 and evaluate it on real workloads. We show that PACKS better approximates PIFO than state-of-the-art approaches. Specifically, PACKS reduces the rank inversions by up to 7× and 15× with respect to SP-PIFO and AIFO, and the number of packet drops by up to 60% compared to SP-PIFO. Under pFabric ranks, PACKS reduces the mean FCT across small flows by up to 33% and 2.6×, compared to SP-PIFO and AIFO. We also show that PACKS runs at line rate on existing hardware (Intel Tofino).}
1639
}
1740
1841
@inproceedings{quicksand-nsdi25,
@@ -80,7 +103,8 @@ @inproceedings{he2024rpslyzer
80103
year = "2024",
81104
abbr = "IMC",
82105
code = "https://github.com/SichangHe/internet\_route\_verification",
83-
url = "https://github.com/SichangHe/internet\_route\_verification/releases/tag/imc-camera-ready"
106+
url = "https://github.com/SichangHe/internet\_route\_verification/releases/tag/imc-camera-ready",
107+
slides = "https://github.com/SichangHe/internet_route_verification/releases/tag/imc24-slides"
84108
}
85109
86110
@InProceedings{namyar2024metaopt,

_bibliography/people/pooria.bib

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@ @inproceedings{namyar2025mitigation
44
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
55
year={2025},
66
abbr={NSDI},
7-
url={https://arxiv.org/abs/2305.13792},
8-
abstract={Cloud providers install mitigations to reduce the impact of network failures in their datacenters. To determine the best action, existing automatic network mitigation systems rely on simple local criteria or global proxy metrics. In this paper, we show that we can explicitly optimize end-to-end flow-level metrics and analyze actions holistically to support a broader range of actions and select much more effective mitigations. To this end, we develop novel techniques to quickly estimate the impact of different mitigations and rank them with high fidelity. Our results on incidents from a large cloud provider show orders of magnitude improvements in flow completion time and throughput. We also show our approach scales to large datacenters.}
7+
url={https://drive.google.com/file/d/1kXOCrUBKsEAAyjfDI8fCCt-7poA3hPuP/view},
8+
abstract={Cloud providers install mitigations to reduce the impact of network failures within their datacenters. Existing network mitigation systems rely on simple local criteria or global proxy metrics to determine the best action. In this paper, we show that we can support a broader range of actions and select more effective mitigations by directly optimizing end-to-end flow-level metrics and analyzing actions holistically. To achieve this, we develop novel techniques to quickly estimate the impact of different mitigations and rank them with high fidelity. Our results on incidents from a large cloud provider show orders of magnitude improvements in flow completion time and throughput. We also show our approach scales to large datacenters.}
99
}
1010

1111
@inproceedings{alcoz2025packs,
@@ -14,6 +14,8 @@ @inproceedings{alcoz2025packs
1414
booktitle = {22nd USENIX Symposium on Networked Systems Design and Implementation (NSDI 25)},
1515
year={2025},
1616
abbr={NSDI},
17+
url={https://www.research-collection.ethz.ch/handle/20.500.11850/625335},
18+
abstract={Operators can deploy any scheduler they desire on existing switches through programmable packet schedulers: they tag packets with ranks (which indicate their priority) and schedule them in the order of these ranks. The ideal programmable scheduler is the Push-In First-Out (PIFO) queue, which schedules packets in a perfectly sorted order by “pushing” packets into any position of the queue based on their ranks. However, it is hard to implement PIFO queues in hardware due to their need to sort packets at line rate (based on their ranks). Recent proposals approximate PIFO behaviors on existing data-planes. While promising, they fail to simultaneously capture both of the necessary behaviors of PIFO queues: their scheduling behavior and admission control. We introduce PACKS, an approximate PIFO scheduler that addresses this problem. PACKS runs on top of a set of priority queues and uses packet-rank information and queue-occupancy levels during enqueue to determine whether to admit each incoming packet and to which queue it should be mapped. We fully implement PACKS in P4 and evaluate it on real workloads. We show that PACKS better approximates PIFO than state-of-the-art approaches. Specifically, PACKS reduces the rank inversions by up to 7× and 15× with respect to SP-PIFO and AIFO, and the number of packet drops by up to 60% compared to SP-PIFO. Under pFabric ranks, PACKS reduces the mean FCT across small flows by up to 33% and 2.6×, compared to SP-PIFO and AIFO. We also show that PACKS runs at line rate on existing hardware (Intel Tofino).}
1719
}
1820

1921
@inproceedings{namyar2024learning,

0 commit comments

Comments
 (0)