@@ -683,13 +683,8 @@ static int dev_init_ratelimit_all(struct mt_interface* inf) {
683
683
shaper -> shaper_profile_id );
684
684
}
685
685
686
- ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
687
- if (ret < 0 )
688
- err ("%s(%d), commit error (%d)%s\n" , __func__ , port , ret ,
689
- mt_string_safe (error .message ));
690
-
691
686
dbg ("%s(%d), succ\n" , __func__ , port );
692
- return ret ;
687
+ return 0 ;
693
688
}
694
689
695
690
static int dev_tx_queue_set_rl_rate (struct mt_interface * inf , uint16_t queue ,
@@ -712,13 +707,22 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
712
707
/* not changed */
713
708
if (bps == tx_queue -> bps ) return 0 ;
714
709
710
+ rte_atomic32_set (& inf -> resetting , true);
711
+ mt_pthread_mutex_lock (& inf -> vf_cmd_mutex );
712
+
713
+ ret = rte_eth_dev_stop (port_id );
714
+ if (ret ) {
715
+ err ("%s(%d), stop port %d fail %d\n" , __func__ , port , port_id , ret );
716
+ goto exit ;
717
+ }
718
+
715
719
/* delete old queue node */
716
720
if (tx_queue -> rl_shapers_mapping >= 0 ) {
717
721
ret = rte_tm_node_delete (port_id , queue , & error );
718
722
if (ret < 0 ) {
719
723
err ("%s(%d), node %d delete fail %d(%s)\n" , __func__ , port , queue , ret ,
720
724
mt_string_safe (error .message ));
721
- return ret ;
725
+ goto exit ;
722
726
}
723
727
tx_queue -> rl_shapers_mapping = -1 ;
724
728
}
@@ -727,7 +731,8 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
727
731
shaper = dev_rl_shaper_get (inf , bps );
728
732
if (!shaper ) {
729
733
err ("%s(%d), rl shaper get fail for q %d\n" , __func__ , port , queue );
730
- return - EIO ;
734
+ ret = - EIO ;
735
+ goto exit ;
731
736
}
732
737
memset (& qp , 0 , sizeof (qp ));
733
738
qp .shaper_profile_id = shaper -> shaper_profile_id ;
@@ -740,29 +745,64 @@ static int dev_tx_queue_set_rl_rate(struct mt_interface* inf, uint16_t queue,
740
745
ret = rte_tm_node_add (port_id , queue , ST_TM_LAST_NONLEAF_NODE_ID_PF , 0 , 1 ,
741
746
ST_TM_NONLEAF_NODES_NUM_PF , & qp , & error );
742
747
}
743
- if (ret < 0 ) {
748
+ if (ret ) {
744
749
err ("%s(%d), q %d add fail %d(%s)\n" , __func__ , port , queue , ret ,
745
750
mt_string_safe (error .message ));
746
- return ret ;
751
+ goto exit ;
747
752
}
753
+
748
754
tx_queue -> rl_shapers_mapping = shaper -> idx ;
749
755
info ("%s(%d), q %d link to shaper id %d(%" PRIu64 ")\n" , __func__ , port , queue ,
750
756
shaper -> shaper_profile_id , shaper -> rl_bps );
751
757
}
752
- rte_atomic32_set (& inf -> resetting , true);
753
- mt_pthread_mutex_lock (& inf -> vf_cmd_mutex );
754
- ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
755
- mt_pthread_mutex_unlock (& inf -> vf_cmd_mutex );
756
- rte_atomic32_set (& inf -> resetting , false);
757
- if (ret < 0 ) {
758
- err ("%s(%d), commit error (%d)%s\n" , __func__ , port , ret ,
759
- mt_string_safe (error .message ));
760
- return ret ;
758
+
759
+ if (inf -> drv_info .drv_type == MT_DRV_IAVF ) {
760
+ /* Note: IAVF behavior in DPDK 25.03:
761
+ * 1. If the hierarchy is committed on a started device, nodes cannot be removed after
762
+ * the commit.
763
+ * 2. After committing the hierarchy, the device must be restarted for changes to take
764
+ * effect.
765
+ */
766
+ ret = rte_eth_dev_start (port_id );
767
+ if (ret ) {
768
+ err ("%s(%d), start port %d fail %d\n" , __func__ , port , port_id , ret );
769
+ goto exit ;
770
+ }
771
+
772
+ ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
773
+ if (ret ) {
774
+ err ("%s(%d), commit error (%d)%s\n" , __func__ , port , ret ,
775
+ mt_string_safe (error .message ));
776
+ goto exit ;
777
+ }
778
+
779
+ ret = rte_eth_dev_stop (port_id );
780
+ if (ret ) {
781
+ err ("%s(%d), stop port %d fail %d\n" , __func__ , port , port_id , ret );
782
+ goto exit ;
783
+ }
784
+
785
+ } else {
786
+ ret = rte_tm_hierarchy_commit (port_id , 1 , & error );
787
+ if (ret ) {
788
+ err ("%s(%d), commit error (%d)%s\n" , __func__ , port , ret ,
789
+ mt_string_safe (error .message ));
790
+ goto exit ;
791
+ }
792
+ }
793
+
794
+ ret = rte_eth_dev_start (port_id );
795
+ if (ret ) {
796
+ err ("%s(%d), start port %d fail %d\n" , __func__ , port , port_id , ret );
797
+ goto exit ;
761
798
}
762
799
763
800
tx_queue -> bps = bps ;
764
801
765
- return 0 ;
802
+ exit :
803
+ mt_pthread_mutex_unlock (& inf -> vf_cmd_mutex );
804
+ rte_atomic32_set (& inf -> resetting , false);
805
+ return ret ;
766
806
}
767
807
768
808
static int dev_stop_port (struct mt_interface * inf ) {
@@ -1420,13 +1460,11 @@ static int dev_if_init_pacing(struct mt_interface* inf) {
1420
1460
return ret ;
1421
1461
}
1422
1462
/* IAVF require all q config with RL */
1423
- if (inf -> drv_info .drv_type == MT_DRV_IAVF ) {
1463
+ if (inf -> drv_info .drv_type == MT_DRV_IAVF )
1424
1464
ret = dev_init_ratelimit_all (inf );
1425
- } else {
1465
+ else
1426
1466
ret = dev_tx_queue_set_rl_rate (inf , 0 , ST_DEFAULT_RL_BPS );
1427
- if (ret >= 0 ) dev_tx_queue_set_rl_rate (inf , 0 , 0 );
1428
- }
1429
- if (ret < 0 ) { /* fallback to tsc if no rl */
1467
+ if (ret ) { /* fallback to tsc if no rl */
1430
1468
if (auto_detect ) {
1431
1469
warn ("%s(%d), fallback to tsc as rl init fail\n" , __func__ , port );
1432
1470
inf -> tx_pacing_way = ST21_TX_PACING_WAY_TSC ;
0 commit comments