-
Notifications
You must be signed in to change notification settings - Fork 14.6k
[RISCV] Remove -riscv-enable-vl-optimizer flag #149349
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
The RISCVVLOptimizer has been enabled by default for a while now and I'm not aware of any outstanding issues that might need it to be disabled. This removes it to reduce the number of configurations we have to support.
@llvm/pr-subscribers-backend-risc-v Author: Luke Lau (lukel97) ChangesThe RISCVVLOptimizer has been enabled by default for a while now and I'm not aware of any outstanding issues that might need it to be disabled. Patch is 324.17 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/149349.diff 6 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index b43b915d0ad4f..da6ac2f6f31e9 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -104,11 +104,6 @@ static cl::opt<bool> EnablePostMISchedLoadStoreClustering(
cl::desc("Enable PostRA load and store clustering in the machine scheduler"),
cl::init(true));
-static cl::opt<bool>
- EnableVLOptimizer("riscv-enable-vl-optimizer",
- cl::desc("Enable the RISC-V VL Optimizer pass"),
- cl::init(true), cl::Hidden);
-
static cl::opt<bool> DisableVectorMaskMutation(
"riscv-disable-vector-mask-mutation",
cl::desc("Disable the vector mask scheduling mutation"), cl::init(false),
@@ -617,8 +612,7 @@ void RISCVPassConfig::addPreRegAlloc() {
addPass(createRISCVPreRAExpandPseudoPass());
if (TM->getOptLevel() != CodeGenOptLevel::None) {
addPass(createRISCVMergeBaseOffsetOptPass());
- if (EnableVLOptimizer)
- addPass(createRISCVVLOptimizerPass());
+ addPass(createRISCVVLOptimizerPass());
}
addPass(createRISCVInsertReadWriteCSRPass());
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 317ad0c124e73..d575a0f9fe50c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1,3164 +1,1912 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer=false -verify-machineinstrs | FileCheck %s --check-prefixes=NOVLOPT
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer=false -verify-machineinstrs | FileCheck %s --check-prefixes=NOVLOPT
-; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer -verify-machineinstrs | FileCheck %s --check-prefixes=VLOPT
-; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvfbfwma -riscv-enable-vl-optimizer -verify-machineinstrs | FileCheck %s --check-prefixes=VLOPT
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbb,+zvfbfwma -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbb,+zvfbfwma -verify-machineinstrs | FileCheck %s
; The purpose of this file is to check the behavior of specific instructions as it relates to the VL optimizer
define <vscale x 4 x i32> @vadd_vi(<vscale x 4 x i32> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vadd_vi:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vi v10, v8, 5
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vadd_vi:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vadd.vi v10, v8, 5
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vadd_vi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vi v10, v8, 5
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vadd_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
-; NOVLOPT-LABEL: vadd_vv:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v8, v10
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v8, v10
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vadd_vv:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vadd.vv v8, v8, v10
-; VLOPT-NEXT: vadd.vv v8, v8, v10
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vadd_vv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vadd_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
-; NOVLOPT-LABEL: vadd_vx:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vx v10, v8, a0
-; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vadd_vx:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; VLOPT-NEXT: vadd.vx v10, v8, a0
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vadd_vx:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vadd.vx v10, v8, a0
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vsub_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
-; NOVLOPT-LABEL: vsub_vv:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vsub.vv v8, v8, v10
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v8, v10
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vsub_vv:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vsub.vv v8, v8, v10
-; VLOPT-NEXT: vadd.vv v8, v8, v10
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vsub_vv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
-; NOVLOPT-LABEL: vsub_vx:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vsub.vx v10, v8, a0
-; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vsub_vx:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; VLOPT-NEXT: vsub.vx v10, v8, a0
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vsub_vx:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsub.vx v10, v8, a0
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vrsub_vi(<vscale x 4 x i32> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vrsub_vi:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vrsub.vi v10, v8, 5
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vrsub_vi:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vrsub.vi v10, v8, 5
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vrsub_vi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vrsub.vi v10, v8, 5
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vrsub_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
-; NOVLOPT-LABEL: vrsub_vx:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vrsub.vx v10, v8, a0
-; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vrsub_vx:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; VLOPT-NEXT: vrsub.vx v10, v8, a0
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vrsub_vx:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vrsub.vx v10, v8, a0
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vrsub.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vand_vi(<vscale x 4 x i32> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vand_vi:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vand.vi v10, v8, 5
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vand_vi:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vand.vi v10, v8, 5
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vand_vi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vand.vi v10, v8, 5
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vand_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
-; NOVLOPT-LABEL: vand_vv:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vand.vv v8, v8, v10
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v8, v10
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vand_vv:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vand.vv v8, v8, v10
-; VLOPT-NEXT: vadd.vv v8, v8, v10
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vand_vv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vand.vv v8, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vand_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
-; NOVLOPT-LABEL: vand_vx:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vand.vx v10, v8, a0
-; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vand_vx:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; VLOPT-NEXT: vand.vx v10, v8, a0
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vand_vx:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vand.vx v10, v8, a0
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vand.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vor_vi(<vscale x 4 x i32> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vor_vi:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vor.vi v10, v8, 5
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vor_vi:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vor.vi v10, v8, 5
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vor_vi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vor.vi v10, v8, 5
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vor_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
-; NOVLOPT-LABEL: vor_vv:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vor.vv v8, v8, v10
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v8, v10
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vor_vv:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vor.vv v8, v8, v10
-; VLOPT-NEXT: vadd.vv v8, v8, v10
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vor_vv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vor.vv v8, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vor_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
-; NOVLOPT-LABEL: vor_vx:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vor.vx v10, v8, a0
-; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vor_vx:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; VLOPT-NEXT: vor.vx v10, v8, a0
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vor_vx:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vor.vx v10, v8, a0
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vor.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vxor_vi(<vscale x 4 x i32> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vxor_vi:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vxor.vi v10, v8, 5
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vxor_vi:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vxor.vi v10, v8, 5
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vxor_vi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vxor.vi v10, v8, 5
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 5, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vxor_vv(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen %vl) {
-; NOVLOPT-LABEL: vxor_vv:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vxor.vv v8, v8, v10
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v8, v10
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vxor_vv:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vxor.vv v8, v8, v10
-; VLOPT-NEXT: vadd.vv v8, v8, v10
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vxor_vv:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vxor.vv v8, v8, v10
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %b, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vxor_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
-; NOVLOPT-LABEL: vxor_vx:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a2, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vxor.vx v10, v8, a0
-; NOVLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vxor_vx:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; VLOPT-NEXT: vxor.vx v10, v8, a0
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vxor_vx:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vxor.vx v10, v8, a0
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.riscv.vxor.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %a, i32 %b, iXLen -1)
%2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %a, iXLen %vl)
ret <vscale x 4 x i32> %2
}
define <vscale x 4 x i32> @vsll_vi(<vscale x 4 x i32> %a, iXLen %vl) {
-; NOVLOPT-LABEL: vsll_vi:
-; NOVLOPT: # %bb.0:
-; NOVLOPT-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; NOVLOPT-NEXT: vsll.vi v10, v8, 5
-; NOVLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; NOVLOPT-NEXT: vadd.vv v8, v10, v8
-; NOVLOPT-NEXT: ret
-;
-; VLOPT-LABEL: vsll_vi:
-; VLOPT: # %bb.0:
-; VLOPT-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; VLOPT-NEXT: vsll.vi v10, v8, 5
-; VLOPT-NEXT: vadd.vv v8, v10, v8
-; VLOPT-NEXT: ret
+; CHECK-LABEL: vsll_vi:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT: vsll.vi v10, v8, 5
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: ret
%1 = call <vscale x 4 x i32> @llvm.ri...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM!
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
…-vl-optimizer-flag
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/162/builds/27551 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/11/builds/20333 Here is the relevant piece of the build log for the reference
|
LLVM Buildbot has detected a new failure on builder Full details are available at: https://lab.llvm.org/buildbot/#/builders/88/builds/14317 Here is the relevant piece of the build log for the reference
|
The RISCVVLOptimizer has been enabled by default for a while now and I'm not aware of any outstanding issues that might need it to be disabled. This removes the -riscv-enable-vl-optimizer flag to reduce the number of configurations we have to support.
The RISCVVLOptimizer has been enabled by default for a while now and I'm not aware of any outstanding issues that might need it to be disabled.
This removes the -riscv-enable-vl-optimizer flag to reduce the number of configurations we have to support.