Skip to content

Commit 1651a07

Browse files
xinWeiWei24fuweid
andauthored
feat: Add batch-add command to split large node pool into smaller one (#175)
This update introduces a new command that enables splitting a large node pool into smaller ones, helping to prevent issues like oversized manifests and timeouts. --------- Co-authored-by: Fu Wei <fuweid89@gmail.com>
1 parent 5e498d9 commit 1651a07

File tree

1 file changed

+113
-1
lines changed

1 file changed

+113
-1
lines changed

cmd/kperf/commands/virtualcluster/nodepool.go

Lines changed: 113 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515
"helm.sh/helm/v3/pkg/release"
1616

1717
"github.com/urfave/cli"
18+
"k8s.io/klog/v2"
1819
)
1920

2021
var nodepoolCommand = cli.Command{
@@ -29,11 +30,15 @@ var nodepoolCommand = cli.Command{
2930
},
3031
Subcommands: []cli.Command{
3132
nodepoolAddCommand,
33+
nodepoolBatchAddCommand,
3234
nodepoolDelCommand,
3335
nodepoolListCommand,
3436
},
3537
}
3638

39+
// maxNodesPerPool is the maximum number of nodes suggested for a single node pool.
40+
const maxNodesPerPool = 300
41+
3742
var nodepoolAddCommand = cli.Command{
3843
Name: "add",
3944
Usage: "Add a virtual node pool",
@@ -99,12 +104,17 @@ var nodepoolAddCommand = cli.Command{
99104
return fmt.Errorf("failed to parse node-labels: %w", err)
100105
}
101106

107+
nodes := cliCtx.Int("nodes")
108+
if nodes > maxNodesPerPool {
109+
klog.Warningf("Creating a node pool with a large number of nodes may cause performance issues. Consider using batch-add command for large node pools.")
110+
}
111+
102112
return virtualcluster.CreateNodepool(context.Background(),
103113
kubeCfgPath,
104114
nodepoolName,
105115
virtualcluster.WithNodepoolCPUOpt(cliCtx.Int("cpu")),
106116
virtualcluster.WithNodepoolMemoryOpt(cliCtx.Int("memory")),
107-
virtualcluster.WithNodepoolCountOpt(cliCtx.Int("nodes")),
117+
virtualcluster.WithNodepoolCountOpt(nodes),
108118
virtualcluster.WithNodepoolMaxPodsOpt(cliCtx.Int("max-pods")),
109119
virtualcluster.WithNodepoolNodeControllerAffinity(affinityLabels),
110120
virtualcluster.WithNodepoolLabelsOpt(nodeLabels),
@@ -113,6 +123,108 @@ var nodepoolAddCommand = cli.Command{
113123
},
114124
}
115125

126+
var nodepoolBatchAddCommand = cli.Command{
127+
Name: "batch-add",
128+
Usage: "Add nodes in batch to multiple virtual node pools instead of one node pool with a large number of nodes",
129+
ArgsUsage: "NAME",
130+
Flags: []cli.Flag{
131+
cli.IntFlag{
132+
Name: "nodes",
133+
Usage: "The number of virtual nodes",
134+
Value: 10,
135+
},
136+
cli.IntFlag{
137+
Name: "cpu",
138+
Usage: "The allocatable CPU resource per node",
139+
Value: 8,
140+
},
141+
cli.IntFlag{
142+
Name: "memory",
143+
Usage: "The allocatable Memory resource per node (GiB)",
144+
Value: 16,
145+
},
146+
cli.IntFlag{
147+
Name: "max-pods",
148+
Usage: "The maximum Pods per node",
149+
Value: 110,
150+
},
151+
cli.StringSliceFlag{
152+
Name: "affinity",
153+
Usage: "Deploy controllers to the nodes with a specific labels (FORMAT: KEY=VALUE[,VALUE])",
154+
},
155+
cli.StringSliceFlag{
156+
Name: "node-labels",
157+
Usage: "Additional labels to node (FORMAT: KEY=VALUE)",
158+
},
159+
cli.StringFlag{
160+
Name: "shared-provider-id",
161+
Usage: "Force all the virtual nodes using one provider ID",
162+
Hidden: true,
163+
},
164+
cli.IntFlag{
165+
Name: "batch-size",
166+
Usage: "Maximum number of nodes to create in one batch, default is 300",
167+
Value: 300,
168+
},
169+
},
170+
Action: func(cliCtx *cli.Context) error {
171+
if cliCtx.NArg() != 1 {
172+
return fmt.Errorf("expected exactly one argument as name prefix for nodepool: %v", cliCtx.Args())
173+
}
174+
nodepoolName := strings.TrimSpace(cliCtx.Args().Get(0))
175+
if len(nodepoolName) == 0 {
176+
return fmt.Errorf("nodepool name prefix should not be empty")
177+
}
178+
179+
kubeCfgPath := cliCtx.GlobalString("kubeconfig")
180+
181+
if err := utils.ApplyPriorityLevelConfiguration(kubeCfgPath); err != nil {
182+
return fmt.Errorf("failed to apply priority level configuration: %w", err)
183+
}
184+
185+
affinityLabels, err := utils.KeyValuesMap(cliCtx.StringSlice("affinity"))
186+
if err != nil {
187+
return fmt.Errorf("failed to parse affinity labels: %w", err)
188+
}
189+
190+
nodeLabels, err := utils.KeyValueMap(cliCtx.StringSlice("node-labels"))
191+
if err != nil {
192+
return fmt.Errorf("failed to parse node labels: %w", err)
193+
}
194+
195+
totalNodes := cliCtx.Int("nodes")
196+
batchSize := cliCtx.Int("batch-size")
197+
if batchSize <= 0 {
198+
return fmt.Errorf("batch-size must be greater than zero")
199+
}
200+
201+
for i := 0; i < totalNodes; i += batchSize {
202+
currentBatchSize := batchSize
203+
if i+currentBatchSize > totalNodes {
204+
currentBatchSize = totalNodes - i
205+
}
206+
207+
batchNodepoolName := fmt.Sprintf("%s-%d", nodepoolName, i/batchSize)
208+
if err := virtualcluster.CreateNodepool(context.Background(),
209+
kubeCfgPath,
210+
batchNodepoolName,
211+
virtualcluster.WithNodepoolCPUOpt(cliCtx.Int("cpu")),
212+
virtualcluster.WithNodepoolMemoryOpt(cliCtx.Int("memory")),
213+
virtualcluster.WithNodepoolCountOpt(currentBatchSize),
214+
virtualcluster.WithNodepoolMaxPodsOpt(cliCtx.Int("max-pods")),
215+
virtualcluster.WithNodepoolNodeControllerAffinity(affinityLabels),
216+
virtualcluster.WithNodepoolLabelsOpt(nodeLabels),
217+
virtualcluster.WithNodepoolSharedProviderID(cliCtx.String("shared-provider-id")),
218+
); err != nil {
219+
return fmt.Errorf("failed to create nodepool batch %s: %w", batchNodepoolName, err)
220+
}
221+
klog.Infof("Created nodepool batch %s with %d nodes", batchNodepoolName, currentBatchSize)
222+
}
223+
224+
return nil
225+
},
226+
}
227+
116228
var nodepoolDelCommand = cli.Command{
117229
Name: "delete",
118230
ShortName: "del",

0 commit comments

Comments
 (0)