Skip to content

Commit

Permalink
[CIR][CodeGen] Adds clobbers to inline assembly (#469)
Browse files Browse the repository at this point in the history
One more tiny step!
This a tiny PR that adds clobbers to constraint string. 
Note, that `~{dirflag},~{fpsr},~{flags}` is a
[X86](https://github.com/llvm/clangir/blob/main/clang/lib/Basic/Targets/X86.h#L281)
dependent clobbers.

Basically, the next things remain:
- lowering
- store the results of the `cir.asm`
  • Loading branch information
gitoleg authored Feb 16, 2024
1 parent b27ad0f commit b9cd201
Show file tree
Hide file tree
Showing 4 changed files with 78 additions and 10 deletions.
6 changes: 3 additions & 3 deletions clang/include/clang/CIR/Dialect/IR/CIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -2837,9 +2837,9 @@ def CIR_InlineAsmOp : CIR_Op<"asm", [RecursiveMemoryEffects]> {
...
%2 = cir.load %0 : cir.ptr <!s32i>, !s32i
%3 = cir.load %1 : cir.ptr <!s32i>, !s32i
cir.asm(x86_att, {"foo" ""} : () -> ()
cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1"} %2 : (!s32i) -> ()
cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1"} %3, %2 : (!s32i, !s32i) -> ()
cir.asm(x86_att, {"foo" "~{dirflag},~{fpsr},~{flags}"} : () -> ()
cir.asm(x86_att, {"bar $$42 $0" "=r,=&r,1,~{dirflag},~{fpsr},~{flags}"} %2 : (!s32i) -> ()
cir.asm(x86_att, {"baz $$42 $0" "=r,=&r,0,1,~{dirflag},~{fpsr},~{flags}"} %3, %2 : (!s32i, !s32i) -> ()
```
}];

Expand Down
69 changes: 68 additions & 1 deletion clang/lib/CIR/CodeGen/CIRAsm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,64 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
}

static void collectClobbers(const CIRGenFunction &cgf, const AsmStmt &S,
std::string &constraints, bool &hasUnwindClobber,
bool &readOnly, bool readNone) {

hasUnwindClobber = false;
auto &cgm = cgf.getCIRGenModule();

// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
StringRef clobber = S.getClobber(i);
if (clobber == "memory")
readOnly = readNone = false;
else if (clobber == "unwind") {
hasUnwindClobber = true;
continue;
} else if (clobber != "cc") {
clobber = cgf.getTarget().getNormalizedGCCRegisterName(clobber);
if (cgm.getCodeGenOpts().StackClashProtector &&
cgf.getTarget().isSPRegName(clobber)) {
cgm.getDiags().Report(S.getAsmLoc(),
diag::warn_stack_clash_protection_inline_asm);
}
}

if (isa<MSAsmStmt>(&S)) {
if (clobber == "eax" || clobber == "edx") {
if (constraints.find("=&A") != std::string::npos)
continue;
std::string::size_type position1 =
constraints.find("={" + clobber.str() + "}");
if (position1 != std::string::npos) {
constraints.insert(position1 + 1, "&");
continue;
}
std::string::size_type position2 = constraints.find("=A");
if (position2 != std::string::npos) {
constraints.insert(position2 + 1, "&");
continue;
}
}
}
if (!constraints.empty())
constraints += ',';

constraints += "~{";
constraints += clobber;
constraints += '}';
}

// Add machine specific clobbers
std::string_view machineClobbers = cgf.getTarget().getClobbers();
if (!machineClobbers.empty()) {
if (!constraints.empty())
constraints += ',';
constraints += machineClobbers;
}
}

using constraintInfos = SmallVector<TargetInfo::ConstraintInfo, 4>;

static void collectInOutConstrainsInfos(const CIRGenFunction &cgf,
Expand Down Expand Up @@ -217,7 +275,13 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) {
// Keep track of out constraints for tied input operand.
std::vector<std::string> OutputConstraints;

assert(!S.getNumClobbers() && "asm clobbers operands are NYI");
// An inline asm can be marked readonly if it meets the following conditions:
// - it doesn't have any sideeffects
// - it doesn't clobber memory
// - it doesn't return a value by-reference
// It can be marked readnone if it doesn't have any input memory constraints
// in addition to meeting the conditions listed above.
bool ReadOnly = true, ReadNone = true;

for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
Expand Down Expand Up @@ -351,6 +415,9 @@ mlir::LogicalResult CIRGenFunction::buildAsmStmt(const AsmStmt &S) {
}
Constraints += InOutConstraints;

bool HasUnwindClobber = false;
collectClobbers(*this, S, Constraints, HasUnwindClobber, ReadOnly, ReadNone);

mlir::Type ResultType;

if (ResultRegTypes.size() == 1)
Expand Down
1 change: 1 addition & 0 deletions clang/lib/CIR/CodeGen/CIRGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,7 @@ class CIRGenFunction : public CIRGenTypeCache {
CIRGenBuilderTy &getBuilder() { return builder; }

CIRGenModule &getCIRGenModule() { return CGM; }
const CIRGenModule &getCIRGenModule() const { return CGM; }

mlir::Block *getCurFunctionEntryBlock() {
auto Fn = dyn_cast<mlir::cir::FuncOp>(CurFn);
Expand Down
12 changes: 6 additions & 6 deletions clang/test/CIR/CodeGen/asm.c
Original file line number Diff line number Diff line change
@@ -1,32 +1,32 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir-enable -emit-cir %s -o %t.cir
// RUN: FileCheck --input-file=%t.cir %s

//CHECK: cir.asm(x86_att, {"" ""}) : () -> ()
//CHECK: cir.asm(x86_att, {"" "~{dirflag},~{fpsr},~{flags}"}) : () -> ()
void empty1() {
__asm__ volatile("" : : : );
}

//CHECK: cir.asm(x86_att, {"xyz" ""}) : () -> ()
//CHECK: cir.asm(x86_att, {"xyz" "~{dirflag},~{fpsr},~{flags}"}) : () -> ()
void empty2() {
__asm__ volatile("xyz" : : : );
}

//CHECK: cir.asm(x86_att, {"" "=*m,*m"}) %0, %0 : (!cir.ptr<!s32i>, !cir.ptr<!s32i>) -> ()
//CHECK: cir.asm(x86_att, {"" "=*m,*m,~{dirflag},~{fpsr},~{flags}"}) %0, %0 : (!cir.ptr<!s32i>, !cir.ptr<!s32i>) -> ()
void t1(int x) {
__asm__ volatile("" : "+m"(x));
}

//CHECK: cir.asm(x86_att, {"" "*m"}) %0 : (!cir.ptr<!s32i>) -> ()
//CHECK: cir.asm(x86_att, {"" "*m,~{dirflag},~{fpsr},~{flags}"}) %0 : (!cir.ptr<!s32i>) -> ()
void t2(int x) {
__asm__ volatile("" : : "m"(x));
}

//CHECK: cir.asm(x86_att, {"" "=*m"}) %0 : (!cir.ptr<!s32i>) -> ()
//CHECK: cir.asm(x86_att, {"" "=*m,~{dirflag},~{fpsr},~{flags}"}) %0 : (!cir.ptr<!s32i>) -> ()
void t3(int x) {
__asm__ volatile("" : "=m"(x));
}

//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1"}) %1 : (!s32i) -> ()
//CHECK: cir.asm(x86_att, {"" "=&r,=&r,1,~{dirflag},~{fpsr},~{flags}"}) %1 : (!s32i) -> ()
void t4(int x) {
__asm__ volatile("" : "=&r"(x), "+&r"(x));
}

0 comments on commit b9cd201

Please sign in to comment.