blob: 0d240e90820f7918bed3fd7fa339184cd14ece04 [file] [log] [blame]
Andrew Walbran16937d02019-10-22 13:54:20 +01001//===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01002//
Andrew Walbran16937d02019-10-22 13:54:20 +01003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Andrew Scull5e1ddfa2018-08-14 10:06:54 +01006//
7//===--------------------------------------------------------------------===//
8//
9/// This contains common combine transformations that may be used in a combine
10/// pass,or by the target elsewhere.
11/// Targets can pick individual opcode transformations from the helper or use
12/// tryCombine which invokes all transformations. All of the transformations
13/// return true if the MachineInstruction changed and false otherwise.
14//
15//===--------------------------------------------------------------------===//
16
17#ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
18#define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
19
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020020#include "llvm/ADT/APFloat.h"
Andrew Walbran3d2c1972020-04-07 12:24:26 +010021#include "llvm/CodeGen/LowLevelType.h"
22#include "llvm/CodeGen/Register.h"
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020023#include "llvm/Support/Alignment.h"
Andrew Walbran3d2c1972020-04-07 12:24:26 +010024
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010025namespace llvm {
26
Andrew Walbran16937d02019-10-22 13:54:20 +010027class GISelChangeObserver;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010028class MachineIRBuilder;
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020029class MachineInstrBuilder;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010030class MachineRegisterInfo;
31class MachineInstr;
Andrew Walbran16937d02019-10-22 13:54:20 +010032class MachineOperand;
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020033class GISelKnownBits;
34class MachineDominatorTree;
35class LegalizerInfo;
36struct LegalityQuery;
37class TargetLowering;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010038
Andrew Walbran3d2c1972020-04-07 12:24:26 +010039struct PreferredTuple {
40 LLT Ty; // The result type of the extend.
41 unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
42 MachineInstr *MI;
43};
44
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020045struct IndexedLoadStoreMatchInfo {
46 Register Addr;
47 Register Base;
48 Register Offset;
49 bool IsPre;
50};
51
52struct PtrAddChain {
53 int64_t Imm;
54 Register Base;
55};
56
57struct RegisterImmPair {
58 Register Reg;
59 int64_t Imm;
60};
61
62struct ShiftOfShiftedLogic {
63 MachineInstr *Logic;
64 MachineInstr *Shift2;
65 Register LogicNonShiftReg;
66 uint64_t ValSum;
67};
68
69using OperandBuildSteps =
70 SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
71struct InstructionBuildSteps {
72 unsigned Opcode = 0; /// The opcode for the produced instruction.
73 OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
74 InstructionBuildSteps() = default;
75 InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
76 : Opcode(Opcode), OperandFns(OperandFns) {}
77};
78
79struct InstructionStepsMatchInfo {
80 /// Describes instructions to be built during a combine.
81 SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
82 InstructionStepsMatchInfo() = default;
83 InstructionStepsMatchInfo(
84 std::initializer_list<InstructionBuildSteps> InstrsToBuild)
85 : InstrsToBuild(InstrsToBuild) {}
86};
87
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010088class CombinerHelper {
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020089protected:
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010090 MachineIRBuilder &Builder;
91 MachineRegisterInfo &MRI;
Andrew Walbran16937d02019-10-22 13:54:20 +010092 GISelChangeObserver &Observer;
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020093 GISelKnownBits *KB;
94 MachineDominatorTree *MDT;
95 const LegalizerInfo *LI;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +010096
97public:
Olivier Deprezf4ef2d02021-04-20 13:36:24 +020098 CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
99 GISelKnownBits *KB = nullptr,
100 MachineDominatorTree *MDT = nullptr,
101 const LegalizerInfo *LI = nullptr);
102
103 GISelKnownBits *getKnownBits() const {
104 return KB;
105 }
106
107 const TargetLowering &getTargetLowering() const;
108
109 /// \return true if the combine is running prior to legalization, or if \p
110 /// Query is legal on the target.
111 bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
Andrew Walbran16937d02019-10-22 13:54:20 +0100112
113 /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
Andrew Walbran3d2c1972020-04-07 12:24:26 +0100114 void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
Andrew Walbran16937d02019-10-22 13:54:20 +0100115
116 /// Replace a single register operand with a new register and inform the
117 /// observer of the changes.
118 void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
Andrew Walbran3d2c1972020-04-07 12:24:26 +0100119 Register ToReg) const;
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100120
121 /// If \p MI is COPY, try to combine it.
122 /// Returns true if MI changed.
123 bool tryCombineCopy(MachineInstr &MI);
Andrew Walbran3d2c1972020-04-07 12:24:26 +0100124 bool matchCombineCopy(MachineInstr &MI);
125 void applyCombineCopy(MachineInstr &MI);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100126
Olivier Deprezf4ef2d02021-04-20 13:36:24 +0200127 /// Returns true if \p DefMI precedes \p UseMI or they are the same
128 /// instruction. Both must be in the same basic block.
129 bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI);
130
131 /// Returns true if \p DefMI dominates \p UseMI. By definition an
132 /// instruction dominates itself.
133 ///
134 /// If we haven't been provided with a MachineDominatorTree during
135 /// construction, this function returns a conservative result that tracks just
136 /// a single basic block.
137 bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
138
Andrew Scull0372a572018-11-16 15:47:06 +0000139 /// If \p MI is extend that consumes the result of a load, try to combine it.
140 /// Returns true if MI changed.
141 bool tryCombineExtendingLoads(MachineInstr &MI);
Andrew Walbran3d2c1972020-04-07 12:24:26 +0100142 bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
143 void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
144
Olivier Deprezf4ef2d02021-04-20 13:36:24 +0200145 /// Combine \p MI into a pre-indexed or post-indexed load/store operation if
146 /// legal and the surrounding code makes it useful.
147 bool tryCombineIndexedLoadStore(MachineInstr &MI);
148 bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
149 void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
150
151 bool matchSextTruncSextLoad(MachineInstr &MI);
152 bool applySextTruncSextLoad(MachineInstr &MI);
153
154 /// Match sext_inreg(load p), imm -> sextload p
155 bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
156 bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
157
158 /// If a brcond's true block is not the fallthrough, make it so by inverting
159 /// the condition and swapping operands.
160 bool matchOptBrCondByInvertingCond(MachineInstr &MI);
161 void applyOptBrCondByInvertingCond(MachineInstr &MI);
162
163 /// If \p MI is G_CONCAT_VECTORS, try to combine it.
164 /// Returns true if MI changed.
165 /// Right now, we support:
166 /// - concat_vector(undef, undef) => undef
167 /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
168 /// build_vector(A, B, C, D)
169 ///
170 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
171 bool tryCombineConcatVectors(MachineInstr &MI);
172 /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
173 /// can be flattened into a build_vector.
174 /// In the first case \p IsUndef will be true.
175 /// In the second case \p Ops will contain the operands needed
176 /// to produce the flattened build_vector.
177 ///
178 /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
179 bool matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
180 SmallVectorImpl<Register> &Ops);
181 /// Replace \p MI with a flattened build_vector with \p Ops or an
182 /// implicit_def if IsUndef is true.
183 void applyCombineConcatVectors(MachineInstr &MI, bool IsUndef,
184 const ArrayRef<Register> Ops);
185
186 /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
187 /// Returns true if MI changed.
188 ///
189 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
190 bool tryCombineShuffleVector(MachineInstr &MI);
191 /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
192 /// concat_vectors.
193 /// \p Ops will contain the operands needed to produce the flattened
194 /// concat_vectors.
195 ///
196 /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
197 bool matchCombineShuffleVector(MachineInstr &MI,
198 SmallVectorImpl<Register> &Ops);
199 /// Replace \p MI with a concat_vectors with \p Ops.
200 void applyCombineShuffleVector(MachineInstr &MI,
201 const ArrayRef<Register> Ops);
202
203 /// Optimize memcpy intrinsics et al, e.g. constant len calls.
204 /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
205 ///
206 /// For example (pre-indexed):
207 ///
208 /// $addr = G_PTR_ADD $base, $offset
209 /// [...]
210 /// $val = G_LOAD $addr
211 /// [...]
212 /// $whatever = COPY $addr
213 ///
214 /// -->
215 ///
216 /// $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
217 /// [...]
218 /// $whatever = COPY $addr
219 ///
220 /// or (post-indexed):
221 ///
222 /// G_STORE $val, $base
223 /// [...]
224 /// $addr = G_PTR_ADD $base, $offset
225 /// [...]
226 /// $whatever = COPY $addr
227 ///
228 /// -->
229 ///
230 /// $addr = G_INDEXED_STORE $val, $base, $offset
231 /// [...]
232 /// $whatever = COPY $addr
233 bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
234
235 bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
236 bool applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
237
238 /// Fold (shift (shift base, x), y) -> (shift base (x+y))
239 bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
240 bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
241
242 /// If we have a shift-by-constant of a bitwise logic op that itself has a
243 /// shift-by-constant operand with identical opcode, we may be able to convert
244 /// that into 2 independent shifts followed by the logic op.
245 bool matchShiftOfShiftedLogic(MachineInstr &MI,
246 ShiftOfShiftedLogic &MatchInfo);
247 bool applyShiftOfShiftedLogic(MachineInstr &MI,
248 ShiftOfShiftedLogic &MatchInfo);
249
250 /// Transform a multiply by a power-of-2 value to a left shift.
251 bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
252 bool applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
253
254 // Transform a G_SHL with an extended source into a narrower shift if
255 // possible.
256 bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
257 bool applyCombineShlOfExtend(MachineInstr &MI,
258 const RegisterImmPair &MatchData);
259
260 /// Reduce a shift by a constant to an unmerge and a shift on a half sized
261 /// type. This will not produce a shift smaller than \p TargetShiftSize.
262 bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
263 unsigned &ShiftVal);
264 bool applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
265 bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
266
267 /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
268 bool
269 matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
270 SmallVectorImpl<Register> &Operands);
271 bool
272 applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
273 SmallVectorImpl<Register> &Operands);
274
275 /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
276 bool matchCombineUnmergeConstant(MachineInstr &MI,
277 SmallVectorImpl<APInt> &Csts);
278 bool applyCombineUnmergeConstant(MachineInstr &MI,
279 SmallVectorImpl<APInt> &Csts);
280
281 /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
282 bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
283 bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
284
285 /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
286 bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
287 bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
288
289 /// Transform fp_instr(cst) to constant result of the fp operation.
290 bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
291 Optional<APFloat> &Cst);
292 bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
293 Optional<APFloat> &Cst);
294
295 /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
296 bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
297 bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
298
299 /// Transform PtrToInt(IntToPtr(x)) to x.
300 bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
301 bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
302
303 /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
304 /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
305 bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
306 std::pair<Register, bool> &PtrRegAndCommute);
307 bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
308 std::pair<Register, bool> &PtrRegAndCommute);
309
310 // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
311 bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
312 bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
313
314 /// Transform anyext(trunc(x)) to x.
315 bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
316 bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
317
318 /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
319 bool matchCombineExtOfExt(MachineInstr &MI,
320 std::tuple<Register, unsigned> &MatchInfo);
321 bool applyCombineExtOfExt(MachineInstr &MI,
322 std::tuple<Register, unsigned> &MatchInfo);
323
324 /// Transform fneg(fneg(x)) to x.
325 bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
326
327 /// Match fabs(fabs(x)) to fabs(x).
328 bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
329 bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
330
331 /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
332 bool matchCombineTruncOfExt(MachineInstr &MI,
333 std::pair<Register, unsigned> &MatchInfo);
334 bool applyCombineTruncOfExt(MachineInstr &MI,
335 std::pair<Register, unsigned> &MatchInfo);
336
337 /// Transform trunc (shl x, K) to shl (trunc x),
338 /// K => K < VT.getScalarSizeInBits().
339 bool matchCombineTruncOfShl(MachineInstr &MI,
340 std::pair<Register, Register> &MatchInfo);
341 bool applyCombineTruncOfShl(MachineInstr &MI,
342 std::pair<Register, Register> &MatchInfo);
343
344 /// Transform G_MUL(x, -1) to G_SUB(0, x)
345 bool applyCombineMulByNegativeOne(MachineInstr &MI);
346
347 /// Return true if any explicit use operand on \p MI is defined by a
348 /// G_IMPLICIT_DEF.
349 bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
350
351 /// Return true if all register explicit use operands on \p MI are defined by
352 /// a G_IMPLICIT_DEF.
353 bool matchAllExplicitUsesAreUndef(MachineInstr &MI);
354
355 /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
356 bool matchUndefShuffleVectorMask(MachineInstr &MI);
357
358 /// Return true if a G_STORE instruction \p MI is storing an undef value.
359 bool matchUndefStore(MachineInstr &MI);
360
361 /// Return true if a G_SELECT instruction \p MI has an undef comparison.
362 bool matchUndefSelectCmp(MachineInstr &MI);
363
364 /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
365 /// true, \p OpIdx will store the operand index of the known selected value.
366 bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
367
368 /// Replace an instruction with a G_FCONSTANT with value \p C.
369 bool replaceInstWithFConstant(MachineInstr &MI, double C);
370
371 /// Replace an instruction with a G_CONSTANT with value \p C.
372 bool replaceInstWithConstant(MachineInstr &MI, int64_t C);
373
374 /// Replace an instruction with a G_IMPLICIT_DEF.
375 bool replaceInstWithUndef(MachineInstr &MI);
376
377 /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
378 bool replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
379
380 /// Delete \p MI and replace all of its uses with \p Replacement.
381 bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
382
383 /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
384 /// equivalent instructions.
385 bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
386
387 /// Return true if \p MOP is defined by a G_CONSTANT with a value equal to
388 /// \p C.
389 bool matchConstantOp(const MachineOperand &MOP, int64_t C);
390
391 /// Optimize (cond ? x : x) -> x
392 bool matchSelectSameVal(MachineInstr &MI);
393
394 /// Optimize (x op x) -> x
395 bool matchBinOpSameVal(MachineInstr &MI);
396
397 /// Check if operand \p OpIdx is zero.
398 bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
399
400 /// Check if operand \p OpIdx is undef.
401 bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
402
403 /// Check if operand \p OpIdx is known to be a power of 2.
404 bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
405
406 /// Erase \p MI
407 bool eraseInst(MachineInstr &MI);
408
409 /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
410 bool matchSimplifyAddToSub(MachineInstr &MI,
411 std::tuple<Register, Register> &MatchInfo);
412 bool applySimplifyAddToSub(MachineInstr &MI,
413 std::tuple<Register, Register> &MatchInfo);
414
415 /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
416 bool
417 matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
418 InstructionStepsMatchInfo &MatchInfo);
419
420 /// Replace \p MI with a series of instructions described in \p MatchInfo.
421 bool applyBuildInstructionSteps(MachineInstr &MI,
422 InstructionStepsMatchInfo &MatchInfo);
423
424 /// Match ashr (shl x, C), C -> sext_inreg (C)
425 bool matchAshrShlToSextInreg(MachineInstr &MI,
426 std::tuple<Register, int64_t> &MatchInfo);
427 bool applyAshShlToSextInreg(MachineInstr &MI,
428 std::tuple<Register, int64_t> &MatchInfo);
429 /// \return true if \p MI is a G_AND instruction whose operands are x and y
430 /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
431 ///
432 /// \param [in] MI - The G_AND instruction.
433 /// \param [out] Replacement - A register the G_AND should be replaced with on
434 /// success.
435 bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
436
437 /// \return true if \p MI is a G_OR instruction whose operands are x and y
438 /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
439 /// value.)
440 ///
441 /// \param [in] MI - The G_OR instruction.
442 /// \param [out] Replacement - A register the G_OR should be replaced with on
443 /// success.
444 bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
445
446 /// \return true if \p MI is a G_SEXT_INREG that can be erased.
447 bool matchRedundantSExtInReg(MachineInstr &MI);
448
449 /// Combine inverting a result of a compare into the opposite cond code.
450 bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
451 bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
452
453 /// Fold (xor (and x, y), y) -> (and (not x), y)
454 ///{
455 bool matchXorOfAndWithSameReg(MachineInstr &MI,
456 std::pair<Register, Register> &MatchInfo);
457 bool applyXorOfAndWithSameReg(MachineInstr &MI,
458 std::pair<Register, Register> &MatchInfo);
459 ///}
460
461 /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
462 bool matchPtrAddZero(MachineInstr &MI);
463 bool applyPtrAddZero(MachineInstr &MI);
464
465 /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
466 bool applySimplifyURemByPow2(MachineInstr &MI);
467
468 bool matchCombineInsertVecElts(MachineInstr &MI,
469 SmallVectorImpl<Register> &MatchInfo);
470
471 bool applyCombineInsertVecElts(MachineInstr &MI,
472 SmallVectorImpl<Register> &MatchInfo);
Andrew Scull0372a572018-11-16 15:47:06 +0000473
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100474 /// Try to transform \p MI by using all of the above
475 /// combine functions. Returns true if changed.
476 bool tryCombine(MachineInstr &MI);
Olivier Deprezf4ef2d02021-04-20 13:36:24 +0200477
478private:
479 // Memcpy family optimization helpers.
480 bool optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src,
481 unsigned KnownLen, Align DstAlign, Align SrcAlign,
482 bool IsVolatile);
483 bool optimizeMemmove(MachineInstr &MI, Register Dst, Register Src,
484 unsigned KnownLen, Align DstAlign, Align SrcAlign,
485 bool IsVolatile);
486 bool optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
487 unsigned KnownLen, Align DstAlign, bool IsVolatile);
488
489 /// Given a non-indexed load or store instruction \p MI, find an offset that
490 /// can be usefully and legally folded into it as a post-indexing operation.
491 ///
492 /// \returns true if a candidate is found.
493 bool findPostIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
494 Register &Offset);
495
496 /// Given a non-indexed load or store instruction \p MI, find an offset that
497 /// can be usefully and legally folded into it as a pre-indexing operation.
498 ///
499 /// \returns true if a candidate is found.
500 bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
501 Register &Offset);
Andrew Scull5e1ddfa2018-08-14 10:06:54 +0100502};
503} // namespace llvm
504
505#endif