Skip to content

Commit 88b9a85

Browse files
committed
Refactor: dev_tx_queue_set_rl_rate()
Signed-off-by: Kasiewicz, Marek <marek.kasiewicz@intel.com>
1 parent ba8e062 commit 88b9a85

File tree

1 file changed

+122
-55
lines changed

1 file changed

+122
-55
lines changed

lib/src/dev/mt_dev.c

Lines changed: 122 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -687,8 +687,14 @@ static int dev_init_ratelimit_all(struct mt_interface* inf) {
687687
return 0;
688688
}
689689

690-
static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
691-
uint64_t bytes_per_sec) {
690+
/* Note: IAVF behavior in DPDK 25.03:
691+
* 1. If the hierarchy is committed on a started device, nodes cannot be removed after
692+
* the commit.
693+
* 2. After committing the hierarchy, the device must be restarted for changes to take
694+
* effect.
695+
*/
696+
static int dev_tx_queue_set_rl_rate_vf(struct mt_interface* inf, uint16_t queue,
697+
uint64_t bytes_per_sec) {
692698
uint16_t port_id = inf->port_id;
693699
enum mtl_port port = inf->port;
694700
struct mt_tx_queue* tx_queue = &inf->tx_queues[queue];
@@ -700,13 +706,6 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
700706

701707
memset(&error, 0, sizeof(error));
702708

703-
if (!bps) { /* default */
704-
bps = ST_DEFAULT_RL_BPS;
705-
}
706-
707-
/* not changed */
708-
if (bps == tx_queue->bps) return 0;
709-
710709
ret = mt_pthread_rwlock_wrlock(&inf->rl_rwlock);
711710
if (ret) {
712711
err("%s(%d), failed to acquire write lock, ret %d\n", __func__, port, ret);
@@ -716,7 +715,7 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
716715
ret = rte_eth_dev_stop(port_id);
717716
if (ret) {
718717
err("%s(%d), stop port %d fail %d\n", __func__, port, port_id, ret);
719-
goto exit;
718+
goto error_unlock_rwlock;
720719
}
721720

722721
/* delete old queue node */
@@ -725,7 +724,7 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
725724
if (ret < 0) {
726725
err("%s(%d), node %d delete fail %d(%s)\n", __func__, port, queue, ret,
727726
mt_string_safe(error.message));
728-
goto exit;
727+
goto error_unlock_rwlock;
729728
}
730729
tx_queue->rl_shapers_mapping = -1;
731730
}
@@ -735,80 +734,148 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
735734
if (!shaper) {
736735
err("%s(%d), rl shaper get fail for q %d\n", __func__, port, queue);
737736
ret = -EIO;
738-
goto exit;
737+
goto error_unlock_rwlock;
739738
}
740739
memset(&qp, 0, sizeof(qp));
741740
qp.shaper_profile_id = shaper->shaper_profile_id;
742741
qp.leaf.cman = RTE_TM_CMAN_TAIL_DROP;
743742
qp.leaf.wred.wred_profile_id = RTE_TM_WRED_PROFILE_ID_NONE;
744-
if (inf->drv_info.drv_type == MT_DRV_IAVF) {
745-
ret = rte_tm_node_add(port_id, queue, ST_TM_LAST_NONLEAF_NODE_ID_VF, 0, 1,
746-
ST_TM_NONLEAF_NODES_NUM_VF, &qp, &error);
747-
} else {
748-
ret = rte_tm_node_add(port_id, queue, ST_TM_LAST_NONLEAF_NODE_ID_PF, 0, 1,
749-
ST_TM_NONLEAF_NODES_NUM_PF, &qp, &error);
750-
}
743+
ret = rte_tm_node_add(port_id, queue, ST_TM_LAST_NONLEAF_NODE_ID_VF, 0, 1,
744+
ST_TM_NONLEAF_NODES_NUM_VF, &qp, &error);
751745
if (ret) {
752746
err("%s(%d), q %d add fail %d(%s)\n", __func__, port, queue, ret,
753747
mt_string_safe(error.message));
754-
goto exit;
748+
goto error_unlock_rwlock;
755749
}
756750

757751
tx_queue->rl_shapers_mapping = shaper->idx;
758752
info("%s(%d), q %d link to shaper id %d(%" PRIu64 ")\n", __func__, port, queue,
759753
shaper->shaper_profile_id, shaper->rl_bps);
760754
}
755+
/* start the port so that vf->tm_conf.committed in IAVF is not set to true If it is
756+
* set, the node cannot be removed afterwards. stupid, yes I know. */
757+
ret = rte_eth_dev_start(port_id);
758+
if (ret) {
759+
err("%s(%d), start port %d fail %d\n", __func__, port, port_id, ret);
760+
goto error_unlock_rwlock;
761+
}
761762

762-
if (inf->drv_info.drv_type == MT_DRV_IAVF) {
763-
/* Note: IAVF behavior in DPDK 25.03:
764-
* 1. If the hierarchy is committed on a started device, nodes cannot be removed after
765-
* the commit.
766-
* 2. After committing the hierarchy, the device must be restarted for changes to take
767-
* effect.
768-
*/
769-
ret = rte_eth_dev_start(port_id);
770-
if (ret) {
771-
err("%s(%d), start port %d fail %d\n", __func__, port, port_id, ret);
772-
goto exit;
773-
}
763+
ret = rte_tm_hierarchy_commit(port_id, 1, &error);
764+
if (ret) {
765+
err("%s(%d), commit error (%d)%s\n", __func__, port, ret,
766+
mt_string_safe(error.message));
767+
goto error_unlock_rwlock;
768+
}
774769

775-
ret = rte_tm_hierarchy_commit(port_id, 1, &error);
776-
if (ret) {
777-
err("%s(%d), commit error (%d)%s\n", __func__, port, ret,
770+
/* restart the port to apply the new rate limit */
771+
ret = rte_eth_dev_stop(port_id);
772+
if (ret) {
773+
err("%s(%d), stop port %d fail %d\n", __func__, port, port_id, ret);
774+
goto error_unlock_rwlock;
775+
}
776+
777+
ret = rte_eth_dev_start(port_id);
778+
if (ret) {
779+
err("%s(%d), start port %d fail %d\n", __func__, port, port_id, ret);
780+
goto error_unlock_rwlock;
781+
}
782+
783+
tx_queue->bps = bps;
784+
785+
error_unlock_rwlock:
786+
ret = mt_pthread_rwlock_unlock(&inf->rl_rwlock);
787+
if (ret) err("%s(%d), failed to release write lock, ret %d\n", __func__, port, ret);
788+
789+
return ret;
790+
}
791+
792+
static int dev_tx_queue_set_rl_rate_pf(struct mt_interface* inf, uint16_t queue,
793+
uint64_t bytes_per_sec) {
794+
uint16_t port_id = inf->port_id;
795+
enum mtl_port port = inf->port;
796+
struct mt_tx_queue* tx_queue = &inf->tx_queues[queue];
797+
uint64_t bps = bytes_per_sec;
798+
int ret;
799+
struct rte_tm_error error;
800+
struct rte_tm_node_params qp;
801+
struct mt_rl_shaper* shaper;
802+
803+
memset(&error, 0, sizeof(error));
804+
805+
/* delete old queue node */
806+
if (tx_queue->rl_shapers_mapping >= 0) {
807+
ret = rte_tm_node_delete(port_id, queue, &error);
808+
if (ret < 0) {
809+
err("%s(%d), node %d delete fail %d(%s)\n", __func__, port, queue, ret,
778810
mt_string_safe(error.message));
779-
goto exit;
811+
return ret;
780812
}
813+
tx_queue->rl_shapers_mapping = -1;
814+
}
781815

782-
ret = rte_eth_dev_stop(port_id);
783-
if (ret) {
784-
err("%s(%d), stop port %d fail %d\n", __func__, port, port_id, ret);
785-
goto exit;
816+
if (bps) {
817+
shaper = dev_rl_shaper_get(inf, bps);
818+
if (!shaper) {
819+
err("%s(%d), rl shaper get fail for q %d\n", __func__, port, queue);
820+
return -EIO;
786821
}
822+
memset(&qp, 0, sizeof(qp));
823+
qp.shaper_profile_id = shaper->shaper_profile_id;
824+
qp.leaf.cman = RTE_TM_CMAN_TAIL_DROP;
825+
qp.leaf.wred.wred_profile_id = RTE_TM_WRED_PROFILE_ID_NONE;
826+
827+
ret = rte_tm_node_add(port_id, queue, ST_TM_LAST_NONLEAF_NODE_ID_PF, 0, 1,
828+
ST_TM_NONLEAF_NODES_NUM_PF, &qp, &error);
787829

788-
} else {
789-
ret = rte_tm_hierarchy_commit(port_id, 1, &error);
790830
if (ret) {
791-
err("%s(%d), commit error (%d)%s\n", __func__, port, ret,
831+
err("%s(%d), q %d add fail %d(%s)\n", __func__, port, queue, ret,
792832
mt_string_safe(error.message));
793-
goto exit;
833+
return ret;
794834
}
835+
836+
tx_queue->rl_shapers_mapping = shaper->idx;
837+
info("%s(%d), q %d link to shaper id %d(%" PRIu64 ")\n", __func__, port, queue,
838+
shaper->shaper_profile_id, shaper->rl_bps);
795839
}
796840

797-
ret = rte_eth_dev_start(port_id);
841+
ret = mt_pthread_rwlock_wrlock(&inf->rl_rwlock);
798842
if (ret) {
799-
err("%s(%d), start port %d fail %d\n", __func__, port, port_id, ret);
800-
goto exit;
843+
err("%s(%d), failed to acquire write lock, ret %d\n", __func__, port, ret);
844+
return ret;
801845
}
802846

803-
tx_queue->bps = bps;
847+
ret = rte_tm_hierarchy_commit(port_id, 1, &error);
848+
if (ret) {
849+
err("%s(%d), commit error (%d)%s\n", __func__, port, ret,
850+
mt_string_safe(error.message));
851+
} else {
852+
tx_queue->bps = bps;
853+
}
804854

805-
exit:
806855
ret = mt_pthread_rwlock_unlock(&inf->rl_rwlock);
807856
if (ret) err("%s(%d), failed to release write lock, ret %d\n", __func__, port, ret);
808857

809858
return ret;
810859
}
811860

861+
static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
862+
uint64_t bytes_per_sec) {
863+
struct mt_tx_queue* tx_queue = &inf->tx_queues[queue];
864+
uint64_t bps = bytes_per_sec;
865+
866+
if (!bps) { /* default */
867+
bps = ST_DEFAULT_RL_BPS;
868+
}
869+
870+
/* not changed */
871+
if (bps == tx_queue->bps) return 0;
872+
873+
if (inf->drv_info.drv_type == MT_DRV_IAVF)
874+
return dev_tx_queue_set_rl_rate_vf(inf, queue, bps);
875+
else
876+
return dev_tx_queue_set_rl_rate_pf(inf, queue, bps);
877+
}
878+
812879
static int dev_stop_port(struct mt_interface* inf) {
813880
int ret;
814881
uint16_t port_id = inf->port_id;
@@ -1564,6 +1631,7 @@ struct mt_tx_queue* mt_dev_get_tx_queue(struct mtl_main_impl* impl, enum mtl_por
15641631
struct mt_interface* inf = mt_if(impl, port);
15651632
uint64_t bytes_per_sec = flow->bytes_per_sec;
15661633
struct mt_tx_queue* tx_queue;
1634+
float bps_g;
15671635
int ret;
15681636

15691637
if (mt_user_shared_txq(impl, port)) {
@@ -1598,16 +1666,15 @@ struct mt_tx_queue* mt_dev_get_tx_queue(struct mtl_main_impl* impl, enum mtl_por
15981666
if (ret < 0) {
15991667
err("%s(%d), fallback to tsc as rl fail\n", __func__, port);
16001668
inf->tx_pacing_way = ST21_TX_PACING_WAY_TSC;
1669+
} else {
1670+
bps_g = (float)tx_queue->bps * 8 / (1000 * 1000 * 1000);
1671+
info("%s(%d), q %d with speed %fg bps\n", __func__, port, q, bps_g);
16011672
}
1602-
}
1603-
tx_queue->active = true;
1604-
mt_pthread_mutex_unlock(&inf->tx_queues_mutex);
1605-
if (inf->tx_pacing_way == ST21_TX_PACING_WAY_RL) {
1606-
float bps_g = (float)tx_queue->bps * 8 / (1000 * 1000 * 1000);
1607-
info("%s(%d), q %d with speed %fg bps\n", __func__, port, q, bps_g);
16081673
} else {
16091674
info("%s(%d), q %d without rl\n", __func__, port, q);
16101675
}
1676+
tx_queue->active = true;
1677+
mt_pthread_mutex_unlock(&inf->tx_queues_mutex);
16111678
return tx_queue;
16121679
}
16131680
mt_pthread_mutex_unlock(&inf->tx_queues_mutex);

0 commit comments

Comments
 (0)