@@ -687,8 +687,14 @@ static int dev_init_ratelimit_all(struct mt_interface* inf) {
687
687
return 0 ;
688
688
}
689
689
690
- static int dev_tx_queue_set_rl_rate (struct mt_interface * inf , uint16_t queue ,
691
- uint64_t bytes_per_sec ) {
690
+ /* Note: IAVF behavior in DPDK 25.03:
691
+ * 1. If the hierarchy is committed on a started device, nodes cannot be removed after
692
+ * the commit.
693
+ * 2. After committing the hierarchy, the device must be restarted for changes to take
694
+ * effect.
695
+ */
696
+ static int dev_tx_queue_set_rl_rate_vf (struct mt_interface * inf , uint16_t queue ,
697
+ uint64_t bytes_per_sec ) {
692
698
uint16_t port_id = inf -> port_id ;
693
699
enum mtl_port port = inf -> port ;
694
700
struct mt_tx_queue * tx_queue = & inf -> tx_queues [queue ];
@@ -700,13 +706,6 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
700
706
701
707
memset (& error , 0 , sizeof (error ));
702
708
703
- if (!bps ) { /* default */
704
- bps = ST_DEFAULT_RL_BPS ;
705
- }
706
-
707
- /* not changed */
708
- if (bps == tx_queue -> bps ) return 0 ;
709
-
710
709
ret = mt_pthread_rwlock_wrlock (& inf -> rl_rwlock );
711
710
if (ret ) {
712
711
err ("%s(%d), failed to acquire write lock, ret %d\n" , __func__ , port , ret );
@@ -716,7 +715,7 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
716
715
ret = rte_eth_dev_stop (port_id );
717
716
if (ret ) {
718
717
err ("%s(%d), stop port %d fail %d\n" , __func__ , port , port_id , ret );
719
- goto exit ;
718
+ goto error_unlock_rwlock ;
720
719
}
721
720
722
721
/* delete old queue node */
@@ -725,7 +724,7 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
725
724
if (ret < 0 ) {
726
725
err ("%s(%d), node %d delete fail %d(%s)\n" , __func__ , port , queue , ret ,
727
726
mt_string_safe (error .message ));
728
- goto exit ;
727
+ goto error_unlock_rwlock ;
729
728
}
730
729
tx_queue -> rl_shapers_mapping = -1 ;
731
730
}
@@ -735,80 +734,148 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
735
734
if (!shaper ) {
736
735
err ("%s(%d), rl shaper get fail for q %d\n" , __func__ , port , queue );
737
736
ret = - EIO ;
738
- goto exit ;
737
+ goto error_unlock_rwlock ;
739
738
}
740
739
memset (& qp , 0 , sizeof (qp ));
741
740
qp .shaper_profile_id = shaper -> shaper_profile_id ;
742
741
qp .leaf .cman = RTE_TM_CMAN_TAIL_DROP ;
743
742
qp .leaf .wred .wred_profile_id = RTE_TM_WRED_PROFILE_ID_NONE ;
744
- if (inf -> drv_info .drv_type == MT_DRV_IAVF ) {
745
- ret = rte_tm_node_add (port_id , queue , ST_TM_LAST_NONLEAF_NODE_ID_VF , 0 , 1 ,
746
- ST_TM_NONLEAF_NODES_NUM_VF , & qp , & error );
747
- } else {
748
- ret = rte_tm_node_add (port_id , queue , ST_TM_LAST_NONLEAF_NODE_ID_PF , 0 , 1 ,
749
- ST_TM_NONLEAF_NODES_NUM_PF , & qp , & error );
750
- }
743
+ ret = rte_tm_node_add (port_id , queue , ST_TM_LAST_NONLEAF_NODE_ID_VF , 0 , 1 ,
744
+ ST_TM_NONLEAF_NODES_NUM_VF , & qp , & error );
751
745
if (ret ) {
752
746
err ("%s(%d), q %d add fail %d(%s)\n" , __func__ , port , queue , ret ,
753
747
mt_string_safe (error .message ));
754
- goto exit ;
748
+ goto error_unlock_rwlock ;
755
749
}
756
750
757
751
tx_queue -> rl_shapers_mapping = shaper -> idx ;
758
752
info ("%s(%d), q %d link to shaper id %d(%" PRIu64 ")\n" , __func__ , port , queue ,
759
753
shaper -> shaper_profile_id , shaper -> rl_bps );
760
754
}
755
+ /* start the port so that vf->tm_conf.committed in IAVF is not set to true If it is
756
+ * set, the node cannot be removed afterwards. stupid, yes I know. */
757
+ ret = rte_eth_dev_start (port_id );
758
+ if (ret ) {
759
+ err ("%s(%d), start port %d fail %d\n" , __func__ , port , port_id , ret );
760
+ goto error_unlock_rwlock ;
761
+ }
761
762
762
- if (inf -> drv_info .drv_type == MT_DRV_IAVF ) {
763
- /* Note: IAVF behavior in DPDK 25.03:
764
- * 1. If the hierarchy is committed on a started device, nodes cannot be removed after
765
- * the commit.
766
- * 2. After committing the hierarchy, the device must be restarted for changes to take
767
- * effect.
768
- */
769
- ret = rte_eth_dev_start (port_id );
770
- if (ret ) {
771
- err ("%s(%d), start port %d fail %d\n" , __func__ , port , port_id , ret );
772
- goto exit ;
773
- }
763
+ ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
764
+ if (ret ) {
765
+ err ("%s(%d), commit error (%d)%s\n" , __func__ , port , ret ,
766
+ mt_string_safe (error .message ));
767
+ goto error_unlock_rwlock ;
768
+ }
774
769
775
- ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
776
- if (ret ) {
777
- err ("%s(%d), commit error (%d)%s\n" , __func__ , port , ret ,
770
+ /* restart the port to apply the new rate limit */
771
+ ret = rte_eth_dev_stop (port_id );
772
+ if (ret ) {
773
+ err ("%s(%d), stop port %d fail %d\n" , __func__ , port , port_id , ret );
774
+ goto error_unlock_rwlock ;
775
+ }
776
+
777
+ ret = rte_eth_dev_start (port_id );
778
+ if (ret ) {
779
+ err ("%s(%d), start port %d fail %d\n" , __func__ , port , port_id , ret );
780
+ goto error_unlock_rwlock ;
781
+ }
782
+
783
+ tx_queue -> bps = bps ;
784
+
785
+ error_unlock_rwlock :
786
+ ret = mt_pthread_rwlock_unlock (& inf -> rl_rwlock );
787
+ if (ret ) err ("%s(%d), failed to release write lock, ret %d\n" , __func__ , port , ret );
788
+
789
+ return ret ;
790
+ }
791
+
792
+ static int dev_tx_queue_set_rl_rate_pf (struct mt_interface * inf , uint16_t queue ,
793
+ uint64_t bytes_per_sec ) {
794
+ uint16_t port_id = inf -> port_id ;
795
+ enum mtl_port port = inf -> port ;
796
+ struct mt_tx_queue * tx_queue = & inf -> tx_queues [queue ];
797
+ uint64_t bps = bytes_per_sec ;
798
+ int ret ;
799
+ struct rte_tm_error error ;
800
+ struct rte_tm_node_params qp ;
801
+ struct mt_rl_shaper * shaper ;
802
+
803
+ memset (& error , 0 , sizeof (error ));
804
+
805
+ /* delete old queue node */
806
+ if (tx_queue -> rl_shapers_mapping >= 0 ) {
807
+ ret = rte_tm_node_delete (port_id , queue , & error );
808
+ if (ret < 0 ) {
809
+ err ("%s(%d), node %d delete fail %d(%s)\n" , __func__ , port , queue , ret ,
778
810
mt_string_safe (error .message ));
779
- goto exit ;
811
+ return ret ;
780
812
}
813
+ tx_queue -> rl_shapers_mapping = -1 ;
814
+ }
781
815
782
- ret = rte_eth_dev_stop (port_id );
783
- if (ret ) {
784
- err ("%s(%d), stop port %d fail %d\n" , __func__ , port , port_id , ret );
785
- goto exit ;
816
+ if (bps ) {
817
+ shaper = dev_rl_shaper_get (inf , bps );
818
+ if (!shaper ) {
819
+ err ("%s(%d), rl shaper get fail for q %d\n" , __func__ , port , queue );
820
+ return - EIO ;
786
821
}
822
+ memset (& qp , 0 , sizeof (qp ));
823
+ qp .shaper_profile_id = shaper -> shaper_profile_id ;
824
+ qp .leaf .cman = RTE_TM_CMAN_TAIL_DROP ;
825
+ qp .leaf .wred .wred_profile_id = RTE_TM_WRED_PROFILE_ID_NONE ;
826
+
827
+ ret = rte_tm_node_add (port_id , queue , ST_TM_LAST_NONLEAF_NODE_ID_PF , 0 , 1 ,
828
+ ST_TM_NONLEAF_NODES_NUM_PF , & qp , & error );
787
829
788
- } else {
789
- ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
790
830
if (ret ) {
791
- err ("%s(%d), commit error (%d)%s \n" , __func__ , port , ret ,
831
+ err ("%s(%d), q %d add fail %d(%s) \n" , __func__ , port , queue , ret ,
792
832
mt_string_safe (error .message ));
793
- goto exit ;
833
+ return ret ;
794
834
}
835
+
836
+ tx_queue -> rl_shapers_mapping = shaper -> idx ;
837
+ info ("%s(%d), q %d link to shaper id %d(%" PRIu64 ")\n" , __func__ , port , queue ,
838
+ shaper -> shaper_profile_id , shaper -> rl_bps );
795
839
}
796
840
797
- ret = rte_eth_dev_start ( port_id );
841
+ ret = mt_pthread_rwlock_wrlock ( & inf -> rl_rwlock );
798
842
if (ret ) {
799
- err ("%s(%d), start port %d fail %d\n" , __func__ , port , port_id , ret );
800
- goto exit ;
843
+ err ("%s(%d), failed to acquire write lock, ret %d\n" , __func__ , port , ret );
844
+ return ret ;
801
845
}
802
846
803
- tx_queue -> bps = bps ;
847
+ ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
848
+ if (ret ) {
849
+ err ("%s(%d), commit error (%d)%s\n" , __func__ , port , ret ,
850
+ mt_string_safe (error .message ));
851
+ } else {
852
+ tx_queue -> bps = bps ;
853
+ }
804
854
805
- exit :
806
855
ret = mt_pthread_rwlock_unlock (& inf -> rl_rwlock );
807
856
if (ret ) err ("%s(%d), failed to release write lock, ret %d\n" , __func__ , port , ret );
808
857
809
858
return ret ;
810
859
}
811
860
861
+ static int dev_tx_queue_set_rl_rate (struct mt_interface * inf , uint16_t queue ,
862
+ uint64_t bytes_per_sec ) {
863
+ struct mt_tx_queue * tx_queue = & inf -> tx_queues [queue ];
864
+ uint64_t bps = bytes_per_sec ;
865
+
866
+ if (!bps ) { /* default */
867
+ bps = ST_DEFAULT_RL_BPS ;
868
+ }
869
+
870
+ /* not changed */
871
+ if (bps == tx_queue -> bps ) return 0 ;
872
+
873
+ if (inf -> drv_info .drv_type == MT_DRV_IAVF )
874
+ return dev_tx_queue_set_rl_rate_vf (inf , queue , bps );
875
+ else
876
+ return dev_tx_queue_set_rl_rate_pf (inf , queue , bps );
877
+ }
878
+
812
879
static int dev_stop_port (struct mt_interface * inf ) {
813
880
int ret ;
814
881
uint16_t port_id = inf -> port_id ;
@@ -1564,6 +1631,7 @@ struct mt_tx_queue* mt_dev_get_tx_queue(struct mtl_main_impl* impl, enum mtl_por
1564
1631
struct mt_interface * inf = mt_if (impl , port );
1565
1632
uint64_t bytes_per_sec = flow -> bytes_per_sec ;
1566
1633
struct mt_tx_queue * tx_queue ;
1634
+ float bps_g ;
1567
1635
int ret ;
1568
1636
1569
1637
if (mt_user_shared_txq (impl , port )) {
@@ -1598,16 +1666,15 @@ struct mt_tx_queue* mt_dev_get_tx_queue(struct mtl_main_impl* impl, enum mtl_por
1598
1666
if (ret < 0 ) {
1599
1667
err ("%s(%d), fallback to tsc as rl fail\n" , __func__ , port );
1600
1668
inf -> tx_pacing_way = ST21_TX_PACING_WAY_TSC ;
1669
+ } else {
1670
+ bps_g = (float )tx_queue -> bps * 8 / (1000 * 1000 * 1000 );
1671
+ info ("%s(%d), q %d with speed %fg bps\n" , __func__ , port , q , bps_g );
1601
1672
}
1602
- }
1603
- tx_queue -> active = true;
1604
- mt_pthread_mutex_unlock (& inf -> tx_queues_mutex );
1605
- if (inf -> tx_pacing_way == ST21_TX_PACING_WAY_RL ) {
1606
- float bps_g = (float )tx_queue -> bps * 8 / (1000 * 1000 * 1000 );
1607
- info ("%s(%d), q %d with speed %fg bps\n" , __func__ , port , q , bps_g );
1608
1673
} else {
1609
1674
info ("%s(%d), q %d without rl\n" , __func__ , port , q );
1610
1675
}
1676
+ tx_queue -> active = true;
1677
+ mt_pthread_mutex_unlock (& inf -> tx_queues_mutex );
1611
1678
return tx_queue ;
1612
1679
}
1613
1680
mt_pthread_mutex_unlock (& inf -> tx_queues_mutex );
0 commit comments