Skip to content

[CIR][LoweringPrepare] Wrap cir.va_arg lowered code in a cir.scope #1768

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Aug 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,18 @@ mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64(
builder, datalayout, valist, ty, loc),
ty));

auto currentBlock = builder.getInsertionBlock();
mlir::OpBuilder::InsertPoint scopeIP;
auto scopeOp = builder.create<cir::ScopeOp>(
loc,
[&](mlir::OpBuilder &opBuilder, mlir::Type &yieldTy, mlir::Location loc) {
scopeIP = opBuilder.saveInsertionPoint();
yieldTy = op.getType();
});

mlir::Block *contBlock = scopeIP.getBlock();

mlir::Block *currentBlock = builder.createBlock(contBlock);
builder.setInsertionPointToEnd(currentBlock);

// AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
// general purpose registers needed to pass type and num_fp to hold
Expand Down Expand Up @@ -163,7 +174,6 @@ mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64(
inRegs = inRegs ? builder.createAnd(inRegs, fitsInFP) : fitsInFP;
}

mlir::Block *contBlock = currentBlock->splitBlock(op);
mlir::Block *inRegBlock = builder.createBlock(contBlock);
mlir::Block *inMemBlock = builder.createBlock(contBlock);
builder.setInsertionPointToEnd(currentBlock);
Expand Down Expand Up @@ -338,14 +348,19 @@ mlir::Value LoweringPrepareX86CXXABI::lowerVAArgX86_64(
buildX86_64VAArgFromMemory(builder, datalayout, valist, ty, loc);
builder.create<BrOp>(loc, mlir::ValueRange{memAddr}, contBlock);

// Return the appropriate result.
// Yield the appropriate result.
builder.setInsertionPointToStart(contBlock);
mlir::Value res_addr = contBlock->addArgument(regAddr.getType(), loc);

return alignment
? builder.createAlignedLoad(
loc, builder.createPtrBitcast(res_addr, ty), alignment)
: builder.createLoad(loc, builder.createPtrBitcast(res_addr, ty));
mlir::Value result =
alignment
? builder.createAlignedLoad(
loc, builder.createPtrBitcast(res_addr, ty), alignment)
: builder.createLoad(loc, builder.createPtrBitcast(res_addr, ty));

builder.create<cir::YieldOp>(loc, result);

return scopeOp.getResult(0);
}
} // namespace

Expand Down
25 changes: 24 additions & 1 deletion clang/test/CIR/Lowering/var-arg-x86_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ double f1(int n, ...) {
// CIR: [[VASTED_VA_LIST:%.+]] = cir.cast(array_to_ptrdecay, [[VA_LIST_ALLOCA]]
// CIR: cir.va.start [[VASTED_VA_LIST]]
// CIR: [[VASTED_VA_LIST:%.+]] = cir.cast(array_to_ptrdecay, [[VA_LIST_ALLOCA]]
// CIR: [[VAARG_RESULT:%.+]] = cir.scope
// CIR: [[FP_OFFSET_P:%.+]] = cir.get_member [[VASTED_VA_LIST]][1] {name = "fp_offset"}
// CIR: [[FP_OFFSET:%.+]] = cir.load [[FP_OFFSET_P]]
// CIR: [[OFFSET_CONSTANT:%.+]] = cir.const #cir.int<160>
Expand Down Expand Up @@ -75,7 +76,9 @@ double f1(int n, ...) {
// CIR: ^[[ContBlock]]([[ARG:.+]]: !cir.ptr
// CIR: [[CASTED_ARG_P:%.+]] = cir.cast(bitcast, [[ARG]]
// CIR: [[CASTED_ARG:%.+]] = cir.load align(16) [[CASTED_ARG_P]]
// CIR: cir.store{{.*}} [[CASTED_ARG]], [[RES]]
// CIR: cir.yield [[CASTED_ARG]]
//
// CIR: cir.store{{.*}} [[VAARG_RESULT]], [[RES]]
long double f2(int n, ...) {
va_list valist;
va_start(valist, n);
Expand Down Expand Up @@ -185,3 +188,23 @@ const char *f3(va_list args) {

// ...
// CIR: cir.return

void f4(va_list args) {
for (; va_arg(args, int); );
}
// CIR-LABEL: cir.func dso_local @f4
// CIR: cir.for : cond {
// CIR: %[[VALIST:.*]] = cir.load align(8) %[[VALIST_VAR]] : !cir.ptr<!cir.ptr<!rec___va_list_tag>>, !cir.ptr<!rec___va_list_tag>
// CIR: %[[VAARG_RESULT:.*]] = cir.scope {
// ... // The contents are tested elsewhere.
// CIR: cir.yield {{.*}} : !s32i
// CIR: } : !s32i
// CIR: %[[CMP:.*]] = cir.cast(int_to_bool, %[[VAARG_RESULT]] : !s32i), !cir.bool
// CIR: cir.condition(%[[CMP]])
// CIR: } body {
// CIR: cir.yield
// CIR: } step {
// CIR: cir.yield
// CIR: }
// CIR: cir.return
// CIR: }
Loading