Andrew Scull | 5e1ddfa | 2018-08-14 10:06:54 +0100 | [diff] [blame^] | 1 | //===- Intrinsics.td - Defines all LLVM intrinsics ---------*- tablegen -*-===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file defines properties of all LLVM intrinsics. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | include "llvm/CodeGen/ValueTypes.td" |
| 15 | include "llvm/CodeGen/SDNodeProperties.td" |
| 16 | |
| 17 | //===----------------------------------------------------------------------===// |
| 18 | // Properties we keep track of for intrinsics. |
| 19 | //===----------------------------------------------------------------------===// |
| 20 | |
| 21 | class IntrinsicProperty; |
| 22 | |
| 23 | // Intr*Mem - Memory properties. If no property is set, the worst case |
| 24 | // is assumed (it may read and write any memory it can get access to and it may |
| 25 | // have other side effects). |
| 26 | |
| 27 | // IntrNoMem - The intrinsic does not access memory or have any other side |
| 28 | // effects. It may be CSE'd deleted if dead, etc. |
| 29 | def IntrNoMem : IntrinsicProperty; |
| 30 | |
| 31 | // IntrReadMem - This intrinsic only reads from memory. It does not write to |
| 32 | // memory and has no other side effects. Therefore, it cannot be moved across |
| 33 | // potentially aliasing stores. However, it can be reordered otherwise and can |
| 34 | // be deleted if dead. |
| 35 | def IntrReadMem : IntrinsicProperty; |
| 36 | |
| 37 | // IntrWriteMem - This intrinsic only writes to memory, but does not read from |
| 38 | // memory, and has no other side effects. This means dead stores before calls |
| 39 | // to this intrinsics may be removed. |
| 40 | def IntrWriteMem : IntrinsicProperty; |
| 41 | |
| 42 | // IntrArgMemOnly - This intrinsic only accesses memory that its pointer-typed |
| 43 | // argument(s) points to, but may access an unspecified amount. Other than |
| 44 | // reads from and (possibly volatile) writes to memory, it has no side effects. |
| 45 | def IntrArgMemOnly : IntrinsicProperty; |
| 46 | |
| 47 | // IntrInaccessibleMemOnly -- This intrinsic only accesses memory that is not |
| 48 | // accessible by the module being compiled. This is a weaker form of IntrNoMem. |
| 49 | def IntrInaccessibleMemOnly : IntrinsicProperty; |
| 50 | |
| 51 | // IntrInaccessibleMemOrArgMemOnly -- This intrinsic only accesses memory that |
| 52 | // its pointer-typed arguments point to or memory that is not accessible |
| 53 | // by the module being compiled. This is a weaker form of IntrArgMemOnly. |
| 54 | def IntrInaccessibleMemOrArgMemOnly : IntrinsicProperty; |
| 55 | |
| 56 | // Commutative - This intrinsic is commutative: X op Y == Y op X. |
| 57 | def Commutative : IntrinsicProperty; |
| 58 | |
| 59 | // Throws - This intrinsic can throw. |
| 60 | def Throws : IntrinsicProperty; |
| 61 | |
| 62 | // NoCapture - The specified argument pointer is not captured by the intrinsic. |
| 63 | class NoCapture<int argNo> : IntrinsicProperty { |
| 64 | int ArgNo = argNo; |
| 65 | } |
| 66 | |
| 67 | // Returned - The specified argument is always the return value of the |
| 68 | // intrinsic. |
| 69 | class Returned<int argNo> : IntrinsicProperty { |
| 70 | int ArgNo = argNo; |
| 71 | } |
| 72 | |
| 73 | // ReadOnly - The specified argument pointer is not written to through the |
| 74 | // pointer by the intrinsic. |
| 75 | class ReadOnly<int argNo> : IntrinsicProperty { |
| 76 | int ArgNo = argNo; |
| 77 | } |
| 78 | |
| 79 | // WriteOnly - The intrinsic does not read memory through the specified |
| 80 | // argument pointer. |
| 81 | class WriteOnly<int argNo> : IntrinsicProperty { |
| 82 | int ArgNo = argNo; |
| 83 | } |
| 84 | |
| 85 | // ReadNone - The specified argument pointer is not dereferenced by the |
| 86 | // intrinsic. |
| 87 | class ReadNone<int argNo> : IntrinsicProperty { |
| 88 | int ArgNo = argNo; |
| 89 | } |
| 90 | |
| 91 | def IntrNoReturn : IntrinsicProperty; |
| 92 | |
| 93 | // IntrNoduplicate - Calls to this intrinsic cannot be duplicated. |
| 94 | // Parallels the noduplicate attribute on LLVM IR functions. |
| 95 | def IntrNoDuplicate : IntrinsicProperty; |
| 96 | |
| 97 | // IntrConvergent - Calls to this intrinsic are convergent and may not be made |
| 98 | // control-dependent on any additional values. |
| 99 | // Parallels the convergent attribute on LLVM IR functions. |
| 100 | def IntrConvergent : IntrinsicProperty; |
| 101 | |
| 102 | // This property indicates that the intrinsic is safe to speculate. |
| 103 | def IntrSpeculatable : IntrinsicProperty; |
| 104 | |
| 105 | // This property can be used to override the 'has no other side effects' |
| 106 | // language of the IntrNoMem, IntrReadMem, IntrWriteMem, and IntrArgMemOnly |
| 107 | // intrinsic properties. By default, intrinsics are assumed to have side |
| 108 | // effects, so this property is only necessary if you have defined one of |
| 109 | // the memory properties listed above. |
| 110 | // For this property, 'side effects' has the same meaning as 'side effects' |
| 111 | // defined by the hasSideEffects property of the TableGen Instruction class. |
| 112 | def IntrHasSideEffects : IntrinsicProperty; |
| 113 | |
| 114 | //===----------------------------------------------------------------------===// |
| 115 | // Types used by intrinsics. |
| 116 | //===----------------------------------------------------------------------===// |
| 117 | |
| 118 | class LLVMType<ValueType vt> { |
| 119 | ValueType VT = vt; |
| 120 | } |
| 121 | |
| 122 | class LLVMQualPointerType<LLVMType elty, int addrspace> |
| 123 | : LLVMType<iPTR>{ |
| 124 | LLVMType ElTy = elty; |
| 125 | int AddrSpace = addrspace; |
| 126 | } |
| 127 | |
| 128 | class LLVMPointerType<LLVMType elty> |
| 129 | : LLVMQualPointerType<elty, 0>; |
| 130 | |
| 131 | class LLVMAnyPointerType<LLVMType elty> |
| 132 | : LLVMType<iPTRAny>{ |
| 133 | LLVMType ElTy = elty; |
| 134 | } |
| 135 | |
| 136 | // Match the type of another intrinsic parameter. Number is an index into the |
| 137 | // list of overloaded types for the intrinsic, excluding all the fixed types. |
| 138 | // The Number value must refer to a previously listed type. For example: |
| 139 | // Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyfloat_ty, LLVMMatchType<0>]> |
| 140 | // has two overloaded types, the 2nd and 3rd arguments. LLVMMatchType<0> |
| 141 | // refers to the first overloaded type, which is the 2nd argument. |
| 142 | class LLVMMatchType<int num> |
| 143 | : LLVMType<OtherVT>{ |
| 144 | int Number = num; |
| 145 | } |
| 146 | |
| 147 | // Match the type of another intrinsic parameter that is expected to be based on |
| 148 | // an integral type (i.e. either iN or <N x iM>), but change the scalar size to |
| 149 | // be twice as wide or half as wide as the other type. This is only useful when |
| 150 | // the intrinsic is overloaded, so the matched type should be declared as iAny. |
| 151 | class LLVMExtendedType<int num> : LLVMMatchType<num>; |
| 152 | class LLVMTruncatedType<int num> : LLVMMatchType<num>; |
| 153 | class LLVMVectorSameWidth<int num, LLVMType elty> |
| 154 | : LLVMMatchType<num> { |
| 155 | ValueType ElTy = elty.VT; |
| 156 | } |
| 157 | class LLVMPointerTo<int num> : LLVMMatchType<num>; |
| 158 | class LLVMPointerToElt<int num> : LLVMMatchType<num>; |
| 159 | class LLVMVectorOfAnyPointersToElt<int num> : LLVMMatchType<num>; |
| 160 | |
| 161 | // Match the type of another intrinsic parameter that is expected to be a |
| 162 | // vector type, but change the element count to be half as many |
| 163 | class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>; |
| 164 | |
| 165 | def llvm_void_ty : LLVMType<isVoid>; |
| 166 | def llvm_any_ty : LLVMType<Any>; |
| 167 | def llvm_anyint_ty : LLVMType<iAny>; |
| 168 | def llvm_anyfloat_ty : LLVMType<fAny>; |
| 169 | def llvm_anyvector_ty : LLVMType<vAny>; |
| 170 | def llvm_i1_ty : LLVMType<i1>; |
| 171 | def llvm_i8_ty : LLVMType<i8>; |
| 172 | def llvm_i16_ty : LLVMType<i16>; |
| 173 | def llvm_i32_ty : LLVMType<i32>; |
| 174 | def llvm_i64_ty : LLVMType<i64>; |
| 175 | def llvm_half_ty : LLVMType<f16>; |
| 176 | def llvm_float_ty : LLVMType<f32>; |
| 177 | def llvm_double_ty : LLVMType<f64>; |
| 178 | def llvm_f80_ty : LLVMType<f80>; |
| 179 | def llvm_f128_ty : LLVMType<f128>; |
| 180 | def llvm_ppcf128_ty : LLVMType<ppcf128>; |
| 181 | def llvm_ptr_ty : LLVMPointerType<llvm_i8_ty>; // i8* |
| 182 | def llvm_ptrptr_ty : LLVMPointerType<llvm_ptr_ty>; // i8** |
| 183 | def llvm_anyptr_ty : LLVMAnyPointerType<llvm_i8_ty>; // (space)i8* |
| 184 | def llvm_empty_ty : LLVMType<OtherVT>; // { } |
| 185 | def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }* |
| 186 | def llvm_metadata_ty : LLVMType<MetadataVT>; // !{...} |
| 187 | def llvm_token_ty : LLVMType<token>; // token |
| 188 | |
| 189 | def llvm_x86mmx_ty : LLVMType<x86mmx>; |
| 190 | def llvm_ptrx86mmx_ty : LLVMPointerType<llvm_x86mmx_ty>; // <1 x i64>* |
| 191 | |
| 192 | def llvm_v2i1_ty : LLVMType<v2i1>; // 2 x i1 |
| 193 | def llvm_v4i1_ty : LLVMType<v4i1>; // 4 x i1 |
| 194 | def llvm_v8i1_ty : LLVMType<v8i1>; // 8 x i1 |
| 195 | def llvm_v16i1_ty : LLVMType<v16i1>; // 16 x i1 |
| 196 | def llvm_v32i1_ty : LLVMType<v32i1>; // 32 x i1 |
| 197 | def llvm_v64i1_ty : LLVMType<v64i1>; // 64 x i1 |
| 198 | def llvm_v512i1_ty : LLVMType<v512i1>; // 512 x i1 |
| 199 | def llvm_v1024i1_ty : LLVMType<v1024i1>; //1024 x i1 |
| 200 | |
| 201 | def llvm_v1i8_ty : LLVMType<v1i8>; // 1 x i8 |
| 202 | def llvm_v2i8_ty : LLVMType<v2i8>; // 2 x i8 |
| 203 | def llvm_v4i8_ty : LLVMType<v4i8>; // 4 x i8 |
| 204 | def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8 |
| 205 | def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8 |
| 206 | def llvm_v32i8_ty : LLVMType<v32i8>; // 32 x i8 |
| 207 | def llvm_v64i8_ty : LLVMType<v64i8>; // 64 x i8 |
| 208 | def llvm_v128i8_ty : LLVMType<v128i8>; //128 x i8 |
| 209 | def llvm_v256i8_ty : LLVMType<v256i8>; //256 x i8 |
| 210 | |
| 211 | def llvm_v1i16_ty : LLVMType<v1i16>; // 1 x i16 |
| 212 | def llvm_v2i16_ty : LLVMType<v2i16>; // 2 x i16 |
| 213 | def llvm_v4i16_ty : LLVMType<v4i16>; // 4 x i16 |
| 214 | def llvm_v8i16_ty : LLVMType<v8i16>; // 8 x i16 |
| 215 | def llvm_v16i16_ty : LLVMType<v16i16>; // 16 x i16 |
| 216 | def llvm_v32i16_ty : LLVMType<v32i16>; // 32 x i16 |
| 217 | def llvm_v64i16_ty : LLVMType<v64i16>; // 64 x i16 |
| 218 | def llvm_v128i16_ty : LLVMType<v128i16>; //128 x i16 |
| 219 | |
| 220 | def llvm_v1i32_ty : LLVMType<v1i32>; // 1 x i32 |
| 221 | def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32 |
| 222 | def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32 |
| 223 | def llvm_v8i32_ty : LLVMType<v8i32>; // 8 x i32 |
| 224 | def llvm_v16i32_ty : LLVMType<v16i32>; // 16 x i32 |
| 225 | def llvm_v32i32_ty : LLVMType<v32i32>; // 32 x i32 |
| 226 | def llvm_v64i32_ty : LLVMType<v64i32>; // 64 x i32 |
| 227 | |
| 228 | def llvm_v1i64_ty : LLVMType<v1i64>; // 1 x i64 |
| 229 | def llvm_v2i64_ty : LLVMType<v2i64>; // 2 x i64 |
| 230 | def llvm_v4i64_ty : LLVMType<v4i64>; // 4 x i64 |
| 231 | def llvm_v8i64_ty : LLVMType<v8i64>; // 8 x i64 |
| 232 | def llvm_v16i64_ty : LLVMType<v16i64>; // 16 x i64 |
| 233 | def llvm_v32i64_ty : LLVMType<v32i64>; // 32 x i64 |
| 234 | |
| 235 | def llvm_v1i128_ty : LLVMType<v1i128>; // 1 x i128 |
| 236 | |
| 237 | def llvm_v2f16_ty : LLVMType<v2f16>; // 2 x half (__fp16) |
| 238 | def llvm_v4f16_ty : LLVMType<v4f16>; // 4 x half (__fp16) |
| 239 | def llvm_v8f16_ty : LLVMType<v8f16>; // 8 x half (__fp16) |
| 240 | def llvm_v1f32_ty : LLVMType<v1f32>; // 1 x float |
| 241 | def llvm_v2f32_ty : LLVMType<v2f32>; // 2 x float |
| 242 | def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float |
| 243 | def llvm_v8f32_ty : LLVMType<v8f32>; // 8 x float |
| 244 | def llvm_v16f32_ty : LLVMType<v16f32>; // 16 x float |
| 245 | def llvm_v1f64_ty : LLVMType<v1f64>; // 1 x double |
| 246 | def llvm_v2f64_ty : LLVMType<v2f64>; // 2 x double |
| 247 | def llvm_v4f64_ty : LLVMType<v4f64>; // 4 x double |
| 248 | def llvm_v8f64_ty : LLVMType<v8f64>; // 8 x double |
| 249 | |
| 250 | def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here |
| 251 | |
| 252 | |
| 253 | //===----------------------------------------------------------------------===// |
| 254 | // Intrinsic Definitions. |
| 255 | //===----------------------------------------------------------------------===// |
| 256 | |
| 257 | // Intrinsic class - This is used to define one LLVM intrinsic. The name of the |
| 258 | // intrinsic definition should start with "int_", then match the LLVM intrinsic |
| 259 | // name with the "llvm." prefix removed, and all "."s turned into "_"s. For |
| 260 | // example, llvm.bswap.i16 -> int_bswap_i16. |
| 261 | // |
| 262 | // * RetTypes is a list containing the return types expected for the |
| 263 | // intrinsic. |
| 264 | // * ParamTypes is a list containing the parameter types expected for the |
| 265 | // intrinsic. |
| 266 | // * Properties can be set to describe the behavior of the intrinsic. |
| 267 | // |
| 268 | class Intrinsic<list<LLVMType> ret_types, |
| 269 | list<LLVMType> param_types = [], |
| 270 | list<IntrinsicProperty> intr_properties = [], |
| 271 | string name = "", |
| 272 | list<SDNodeProperty> sd_properties = []> : SDPatternOperator { |
| 273 | string LLVMName = name; |
| 274 | string TargetPrefix = ""; // Set to a prefix for target-specific intrinsics. |
| 275 | list<LLVMType> RetTypes = ret_types; |
| 276 | list<LLVMType> ParamTypes = param_types; |
| 277 | list<IntrinsicProperty> IntrProperties = intr_properties; |
| 278 | let Properties = sd_properties; |
| 279 | |
| 280 | bit isTarget = 0; |
| 281 | } |
| 282 | |
| 283 | /// GCCBuiltin - If this intrinsic exactly corresponds to a GCC builtin, this |
| 284 | /// specifies the name of the builtin. This provides automatic CBE and CFE |
| 285 | /// support. |
| 286 | class GCCBuiltin<string name> { |
| 287 | string GCCBuiltinName = name; |
| 288 | } |
| 289 | |
| 290 | class MSBuiltin<string name> { |
| 291 | string MSBuiltinName = name; |
| 292 | } |
| 293 | |
| 294 | |
| 295 | //===--------------- Variable Argument Handling Intrinsics ----------------===// |
| 296 | // |
| 297 | |
| 298 | def int_vastart : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">; |
| 299 | def int_vacopy : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [], |
| 300 | "llvm.va_copy">; |
| 301 | def int_vaend : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">; |
| 302 | |
| 303 | //===------------------- Garbage Collection Intrinsics --------------------===// |
| 304 | // |
| 305 | def int_gcroot : Intrinsic<[], |
| 306 | [llvm_ptrptr_ty, llvm_ptr_ty]>; |
| 307 | def int_gcread : Intrinsic<[llvm_ptr_ty], |
| 308 | [llvm_ptr_ty, llvm_ptrptr_ty], |
| 309 | [IntrReadMem, IntrArgMemOnly]>; |
| 310 | def int_gcwrite : Intrinsic<[], |
| 311 | [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty], |
| 312 | [IntrArgMemOnly, NoCapture<1>, NoCapture<2>]>; |
| 313 | |
| 314 | //===--------------------- Code Generator Intrinsics ----------------------===// |
| 315 | // |
| 316 | def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>; |
| 317 | def int_addressofreturnaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; |
| 318 | def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>; |
| 319 | def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty], |
| 320 | [IntrReadMem], "llvm.read_register">; |
| 321 | def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty], |
| 322 | [], "llvm.write_register">; |
| 323 | |
| 324 | // Gets the address of the local variable area. This is typically a copy of the |
| 325 | // stack, frame, or base pointer depending on the type of prologue. |
| 326 | def int_localaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; |
| 327 | |
| 328 | // Escapes local variables to allow access from other functions. |
| 329 | def int_localescape : Intrinsic<[], [llvm_vararg_ty]>; |
| 330 | |
| 331 | // Given a function and the localaddress of a parent frame, returns a pointer |
| 332 | // to an escaped allocation indicated by the index. |
| 333 | def int_localrecover : Intrinsic<[llvm_ptr_ty], |
| 334 | [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], |
| 335 | [IntrNoMem]>; |
| 336 | // Note: we treat stacksave/stackrestore as writemem because we don't otherwise |
| 337 | // model their dependencies on allocas. |
| 338 | def int_stacksave : Intrinsic<[llvm_ptr_ty]>, |
| 339 | GCCBuiltin<"__builtin_stack_save">; |
| 340 | def int_stackrestore : Intrinsic<[], [llvm_ptr_ty]>, |
| 341 | GCCBuiltin<"__builtin_stack_restore">; |
| 342 | |
| 343 | def int_get_dynamic_area_offset : Intrinsic<[llvm_anyint_ty]>; |
| 344 | |
| 345 | def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>, |
| 346 | GCCBuiltin<"__builtin_thread_pointer">; |
| 347 | |
| 348 | // IntrInaccessibleMemOrArgMemOnly is a little more pessimistic than strictly |
| 349 | // necessary for prefetch, however it does conveniently prevent the prefetch |
| 350 | // from being reordered overly much with respect to nearby access to the same |
| 351 | // memory while not impeding optimization. |
| 352 | def int_prefetch |
| 353 | : Intrinsic<[], [ llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ], |
| 354 | [ IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0> ]>; |
| 355 | def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>; |
| 356 | |
| 357 | def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>; |
| 358 | |
| 359 | // The assume intrinsic is marked as arbitrarily writing so that proper |
| 360 | // control dependencies will be maintained. |
| 361 | def int_assume : Intrinsic<[], [llvm_i1_ty], []>; |
| 362 | |
| 363 | // Stack Protector Intrinsic - The stackprotector intrinsic writes the stack |
| 364 | // guard to the correct place on the stack frame. |
| 365 | def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>; |
| 366 | def int_stackguard : Intrinsic<[llvm_ptr_ty], [], []>; |
| 367 | |
| 368 | // A counter increment for instrumentation based profiling. |
| 369 | def int_instrprof_increment : Intrinsic<[], |
| 370 | [llvm_ptr_ty, llvm_i64_ty, |
| 371 | llvm_i32_ty, llvm_i32_ty], |
| 372 | []>; |
| 373 | |
| 374 | // A counter increment with step for instrumentation based profiling. |
| 375 | def int_instrprof_increment_step : Intrinsic<[], |
| 376 | [llvm_ptr_ty, llvm_i64_ty, |
| 377 | llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], |
| 378 | []>; |
| 379 | |
| 380 | // A call to profile runtime for value profiling of target expressions |
| 381 | // through instrumentation based profiling. |
| 382 | def int_instrprof_value_profile : Intrinsic<[], |
| 383 | [llvm_ptr_ty, llvm_i64_ty, |
| 384 | llvm_i64_ty, llvm_i32_ty, |
| 385 | llvm_i32_ty], |
| 386 | []>; |
| 387 | |
| 388 | //===------------------- Standard C Library Intrinsics --------------------===// |
| 389 | // |
| 390 | |
| 391 | def int_memcpy : Intrinsic<[], |
| 392 | [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, |
| 393 | llvm_i1_ty], |
| 394 | [IntrArgMemOnly, NoCapture<0>, NoCapture<1>, |
| 395 | WriteOnly<0>, ReadOnly<1>]>; |
| 396 | def int_memmove : Intrinsic<[], |
| 397 | [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, |
| 398 | llvm_i1_ty], |
| 399 | [IntrArgMemOnly, NoCapture<0>, NoCapture<1>, |
| 400 | ReadOnly<1>]>; |
| 401 | def int_memset : Intrinsic<[], |
| 402 | [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, |
| 403 | llvm_i1_ty], |
| 404 | [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>; |
| 405 | |
| 406 | // FIXME: Add version of these floating point intrinsics which allow non-default |
| 407 | // rounding modes and FP exception handling. |
| 408 | |
| 409 | let IntrProperties = [IntrNoMem, IntrSpeculatable] in { |
| 410 | def int_fma : Intrinsic<[llvm_anyfloat_ty], |
| 411 | [LLVMMatchType<0>, LLVMMatchType<0>, |
| 412 | LLVMMatchType<0>]>; |
| 413 | def int_fmuladd : Intrinsic<[llvm_anyfloat_ty], |
| 414 | [LLVMMatchType<0>, LLVMMatchType<0>, |
| 415 | LLVMMatchType<0>]>; |
| 416 | |
| 417 | // These functions do not read memory, but are sensitive to the |
| 418 | // rounding mode. LLVM purposely does not model changes to the FP |
| 419 | // environment so they can be treated as readnone. |
| 420 | def int_sqrt : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 421 | def int_powi : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty]>; |
| 422 | def int_sin : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 423 | def int_cos : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 424 | def int_pow : Intrinsic<[llvm_anyfloat_ty], |
| 425 | [LLVMMatchType<0>, LLVMMatchType<0>]>; |
| 426 | def int_log : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 427 | def int_log10: Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 428 | def int_log2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 429 | def int_exp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 430 | def int_exp2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 431 | def int_fabs : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 432 | def int_copysign : Intrinsic<[llvm_anyfloat_ty], |
| 433 | [LLVMMatchType<0>, LLVMMatchType<0>]>; |
| 434 | def int_floor : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 435 | def int_ceil : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 436 | def int_trunc : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 437 | def int_rint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 438 | def int_nearbyint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 439 | def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; |
| 440 | def int_canonicalize : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], |
| 441 | [IntrNoMem]>; |
| 442 | } |
| 443 | |
| 444 | def int_minnum : Intrinsic<[llvm_anyfloat_ty], |
| 445 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 446 | [IntrNoMem, IntrSpeculatable, Commutative] |
| 447 | >; |
| 448 | def int_maxnum : Intrinsic<[llvm_anyfloat_ty], |
| 449 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 450 | [IntrNoMem, IntrSpeculatable, Commutative] |
| 451 | >; |
| 452 | |
| 453 | // NOTE: these are internal interfaces. |
| 454 | def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; |
| 455 | def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>; |
| 456 | def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>; |
| 457 | def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>; |
| 458 | |
| 459 | // Internal interface for object size checking |
| 460 | def int_objectsize : Intrinsic<[llvm_anyint_ty], |
| 461 | [llvm_anyptr_ty, llvm_i1_ty, llvm_i1_ty], |
| 462 | [IntrNoMem, IntrSpeculatable]>, |
| 463 | GCCBuiltin<"__builtin_object_size">; |
| 464 | |
| 465 | //===--------------- Constrained Floating Point Intrinsics ----------------===// |
| 466 | // |
| 467 | |
| 468 | let IntrProperties = [IntrInaccessibleMemOnly] in { |
| 469 | def int_experimental_constrained_fadd : Intrinsic<[ llvm_anyfloat_ty ], |
| 470 | [ LLVMMatchType<0>, |
| 471 | LLVMMatchType<0>, |
| 472 | llvm_metadata_ty, |
| 473 | llvm_metadata_ty ]>; |
| 474 | def int_experimental_constrained_fsub : Intrinsic<[ llvm_anyfloat_ty ], |
| 475 | [ LLVMMatchType<0>, |
| 476 | LLVMMatchType<0>, |
| 477 | llvm_metadata_ty, |
| 478 | llvm_metadata_ty ]>; |
| 479 | def int_experimental_constrained_fmul : Intrinsic<[ llvm_anyfloat_ty ], |
| 480 | [ LLVMMatchType<0>, |
| 481 | LLVMMatchType<0>, |
| 482 | llvm_metadata_ty, |
| 483 | llvm_metadata_ty ]>; |
| 484 | def int_experimental_constrained_fdiv : Intrinsic<[ llvm_anyfloat_ty ], |
| 485 | [ LLVMMatchType<0>, |
| 486 | LLVMMatchType<0>, |
| 487 | llvm_metadata_ty, |
| 488 | llvm_metadata_ty ]>; |
| 489 | def int_experimental_constrained_frem : Intrinsic<[ llvm_anyfloat_ty ], |
| 490 | [ LLVMMatchType<0>, |
| 491 | LLVMMatchType<0>, |
| 492 | llvm_metadata_ty, |
| 493 | llvm_metadata_ty ]>; |
| 494 | |
| 495 | def int_experimental_constrained_fma : Intrinsic<[ llvm_anyfloat_ty ], |
| 496 | [ LLVMMatchType<0>, |
| 497 | LLVMMatchType<0>, |
| 498 | LLVMMatchType<0>, |
| 499 | llvm_metadata_ty, |
| 500 | llvm_metadata_ty ]>; |
| 501 | |
| 502 | // These intrinsics are sensitive to the rounding mode so we need constrained |
| 503 | // versions of each of them. When strict rounding and exception control are |
| 504 | // not required the non-constrained versions of these intrinsics should be |
| 505 | // used. |
| 506 | def int_experimental_constrained_sqrt : Intrinsic<[ llvm_anyfloat_ty ], |
| 507 | [ LLVMMatchType<0>, |
| 508 | llvm_metadata_ty, |
| 509 | llvm_metadata_ty ]>; |
| 510 | def int_experimental_constrained_powi : Intrinsic<[ llvm_anyfloat_ty ], |
| 511 | [ LLVMMatchType<0>, |
| 512 | llvm_i32_ty, |
| 513 | llvm_metadata_ty, |
| 514 | llvm_metadata_ty ]>; |
| 515 | def int_experimental_constrained_sin : Intrinsic<[ llvm_anyfloat_ty ], |
| 516 | [ LLVMMatchType<0>, |
| 517 | llvm_metadata_ty, |
| 518 | llvm_metadata_ty ]>; |
| 519 | def int_experimental_constrained_cos : Intrinsic<[ llvm_anyfloat_ty ], |
| 520 | [ LLVMMatchType<0>, |
| 521 | llvm_metadata_ty, |
| 522 | llvm_metadata_ty ]>; |
| 523 | def int_experimental_constrained_pow : Intrinsic<[ llvm_anyfloat_ty ], |
| 524 | [ LLVMMatchType<0>, |
| 525 | LLVMMatchType<0>, |
| 526 | llvm_metadata_ty, |
| 527 | llvm_metadata_ty ]>; |
| 528 | def int_experimental_constrained_log : Intrinsic<[ llvm_anyfloat_ty ], |
| 529 | [ LLVMMatchType<0>, |
| 530 | llvm_metadata_ty, |
| 531 | llvm_metadata_ty ]>; |
| 532 | def int_experimental_constrained_log10: Intrinsic<[ llvm_anyfloat_ty ], |
| 533 | [ LLVMMatchType<0>, |
| 534 | llvm_metadata_ty, |
| 535 | llvm_metadata_ty ]>; |
| 536 | def int_experimental_constrained_log2 : Intrinsic<[ llvm_anyfloat_ty ], |
| 537 | [ LLVMMatchType<0>, |
| 538 | llvm_metadata_ty, |
| 539 | llvm_metadata_ty ]>; |
| 540 | def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ], |
| 541 | [ LLVMMatchType<0>, |
| 542 | llvm_metadata_ty, |
| 543 | llvm_metadata_ty ]>; |
| 544 | def int_experimental_constrained_exp2 : Intrinsic<[ llvm_anyfloat_ty ], |
| 545 | [ LLVMMatchType<0>, |
| 546 | llvm_metadata_ty, |
| 547 | llvm_metadata_ty ]>; |
| 548 | def int_experimental_constrained_rint : Intrinsic<[ llvm_anyfloat_ty ], |
| 549 | [ LLVMMatchType<0>, |
| 550 | llvm_metadata_ty, |
| 551 | llvm_metadata_ty ]>; |
| 552 | def int_experimental_constrained_nearbyint : Intrinsic<[ llvm_anyfloat_ty ], |
| 553 | [ LLVMMatchType<0>, |
| 554 | llvm_metadata_ty, |
| 555 | llvm_metadata_ty ]>; |
| 556 | } |
| 557 | // FIXME: Add intrinsics for fcmp, fptrunc, fpext, fptoui and fptosi. |
| 558 | // FIXME: Add intrinsics for fabs, copysign, floor, ceil, trunc and round? |
| 559 | |
| 560 | |
| 561 | //===------------------------- Expect Intrinsics --------------------------===// |
| 562 | // |
| 563 | def int_expect : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, |
| 564 | LLVMMatchType<0>], [IntrNoMem]>; |
| 565 | |
| 566 | //===-------------------- Bit Manipulation Intrinsics ---------------------===// |
| 567 | // |
| 568 | |
| 569 | // None of these intrinsics accesses memory at all. |
| 570 | let IntrProperties = [IntrNoMem, IntrSpeculatable] in { |
| 571 | def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; |
| 572 | def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; |
| 573 | def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>; |
| 574 | def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>; |
| 575 | def int_bitreverse : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; |
| 576 | } |
| 577 | |
| 578 | //===------------------------ Debugger Intrinsics -------------------------===// |
| 579 | // |
| 580 | |
| 581 | // None of these intrinsics accesses memory at all...but that doesn't |
| 582 | // mean the optimizers can change them aggressively. Special handling |
| 583 | // needed in a few places. These synthetic intrinsics have no |
| 584 | // side-effects and just mark information about their operands. |
| 585 | let IntrProperties = [IntrNoMem, IntrSpeculatable] in { |
| 586 | def int_dbg_declare : Intrinsic<[], |
| 587 | [llvm_metadata_ty, |
| 588 | llvm_metadata_ty, |
| 589 | llvm_metadata_ty]>; |
| 590 | def int_dbg_value : Intrinsic<[], |
| 591 | [llvm_metadata_ty, |
| 592 | llvm_metadata_ty, |
| 593 | llvm_metadata_ty]>; |
| 594 | def int_dbg_addr : Intrinsic<[], |
| 595 | [llvm_metadata_ty, |
| 596 | llvm_metadata_ty, |
| 597 | llvm_metadata_ty]>; |
| 598 | } |
| 599 | |
| 600 | //===------------------ Exception Handling Intrinsics----------------------===// |
| 601 | // |
| 602 | |
| 603 | // The result of eh.typeid.for depends on the enclosing function, but inside a |
| 604 | // given function it is 'const' and may be CSE'd etc. |
| 605 | def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>; |
| 606 | |
| 607 | def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>; |
| 608 | def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>; |
| 609 | |
| 610 | // eh.exceptionpointer returns the pointer to the exception caught by |
| 611 | // the given `catchpad`. |
| 612 | def int_eh_exceptionpointer : Intrinsic<[llvm_anyptr_ty], [llvm_token_ty], |
| 613 | [IntrNoMem]>; |
| 614 | |
| 615 | // Gets the exception code from a catchpad token. Only used on some platforms. |
| 616 | def int_eh_exceptioncode : Intrinsic<[llvm_i32_ty], [llvm_token_ty], [IntrNoMem]>; |
| 617 | |
| 618 | // __builtin_unwind_init is an undocumented GCC intrinsic that causes all |
| 619 | // callee-saved registers to be saved and restored (regardless of whether they |
| 620 | // are used) in the calling function. It is used by libgcc_eh. |
| 621 | def int_eh_unwind_init: Intrinsic<[]>, |
| 622 | GCCBuiltin<"__builtin_unwind_init">; |
| 623 | |
| 624 | def int_eh_dwarf_cfa : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>; |
| 625 | |
| 626 | let IntrProperties = [IntrNoMem] in { |
| 627 | def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>; |
| 628 | def int_eh_sjlj_callsite : Intrinsic<[], [llvm_i32_ty]>; |
| 629 | } |
| 630 | def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>; |
| 631 | def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; |
| 632 | def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>; |
| 633 | def int_eh_sjlj_setup_dispatch : Intrinsic<[], []>; |
| 634 | |
| 635 | //===---------------- Generic Variable Attribute Intrinsics----------------===// |
| 636 | // |
| 637 | def int_var_annotation : Intrinsic<[], |
| 638 | [llvm_ptr_ty, llvm_ptr_ty, |
| 639 | llvm_ptr_ty, llvm_i32_ty], |
| 640 | [], "llvm.var.annotation">; |
| 641 | def int_ptr_annotation : Intrinsic<[LLVMAnyPointerType<llvm_anyint_ty>], |
| 642 | [LLVMMatchType<0>, llvm_ptr_ty, llvm_ptr_ty, |
| 643 | llvm_i32_ty], |
| 644 | [], "llvm.ptr.annotation">; |
| 645 | def int_annotation : Intrinsic<[llvm_anyint_ty], |
| 646 | [LLVMMatchType<0>, llvm_ptr_ty, |
| 647 | llvm_ptr_ty, llvm_i32_ty], |
| 648 | [], "llvm.annotation">; |
| 649 | |
| 650 | // Annotates the current program point with metadata strings which are emitted |
| 651 | // as CodeView debug info records. This is expensive, as it disables inlining |
| 652 | // and is modelled as having side effects. |
| 653 | def int_codeview_annotation : Intrinsic<[], [llvm_metadata_ty], |
| 654 | [IntrInaccessibleMemOnly, IntrNoDuplicate], |
| 655 | "llvm.codeview.annotation">; |
| 656 | |
| 657 | //===------------------------ Trampoline Intrinsics -----------------------===// |
| 658 | // |
| 659 | def int_init_trampoline : Intrinsic<[], |
| 660 | [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty], |
| 661 | [IntrArgMemOnly, NoCapture<0>]>, |
| 662 | GCCBuiltin<"__builtin_init_trampoline">; |
| 663 | |
| 664 | def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], |
| 665 | [IntrReadMem, IntrArgMemOnly]>, |
| 666 | GCCBuiltin<"__builtin_adjust_trampoline">; |
| 667 | |
| 668 | //===------------------------ Overflow Intrinsics -------------------------===// |
| 669 | // |
| 670 | |
| 671 | // Expose the carry flag from add operations on two integrals. |
| 672 | def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], |
| 673 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 674 | [IntrNoMem, IntrSpeculatable]>; |
| 675 | def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], |
| 676 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 677 | [IntrNoMem, IntrSpeculatable]>; |
| 678 | |
| 679 | def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], |
| 680 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 681 | [IntrNoMem, IntrSpeculatable]>; |
| 682 | def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], |
| 683 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 684 | [IntrNoMem, IntrSpeculatable]>; |
| 685 | |
| 686 | def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], |
| 687 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 688 | [IntrNoMem, IntrSpeculatable]>; |
| 689 | def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], |
| 690 | [LLVMMatchType<0>, LLVMMatchType<0>], |
| 691 | [IntrNoMem, IntrSpeculatable]>; |
| 692 | |
| 693 | //===------------------------- Memory Use Markers -------------------------===// |
| 694 | // |
| 695 | def int_lifetime_start : Intrinsic<[], |
| 696 | [llvm_i64_ty, llvm_anyptr_ty], |
| 697 | [IntrArgMemOnly, NoCapture<1>]>; |
| 698 | def int_lifetime_end : Intrinsic<[], |
| 699 | [llvm_i64_ty, llvm_anyptr_ty], |
| 700 | [IntrArgMemOnly, NoCapture<1>]>; |
| 701 | def int_invariant_start : Intrinsic<[llvm_descriptor_ty], |
| 702 | [llvm_i64_ty, llvm_anyptr_ty], |
| 703 | [IntrArgMemOnly, NoCapture<1>]>; |
| 704 | def int_invariant_end : Intrinsic<[], |
| 705 | [llvm_descriptor_ty, llvm_i64_ty, |
| 706 | llvm_anyptr_ty], |
| 707 | [IntrArgMemOnly, NoCapture<2>]>; |
| 708 | |
| 709 | // invariant.group.barrier can't be marked with 'readnone' (IntrNoMem), |
| 710 | // because it would cause CSE of two barriers with the same argument. |
| 711 | // Readonly and argmemonly says that barrier only reads its argument and |
| 712 | // it can be CSE only if memory didn't change between 2 barriers call, |
| 713 | // which is valid. |
| 714 | // The argument also can't be marked with 'returned' attribute, because |
| 715 | // it would remove barrier. |
| 716 | def int_invariant_group_barrier : Intrinsic<[llvm_anyptr_ty], |
| 717 | [LLVMMatchType<0>], |
| 718 | [IntrReadMem, IntrArgMemOnly]>; |
| 719 | |
| 720 | //===------------------------ Stackmap Intrinsics -------------------------===// |
| 721 | // |
| 722 | def int_experimental_stackmap : Intrinsic<[], |
| 723 | [llvm_i64_ty, llvm_i32_ty, llvm_vararg_ty], |
| 724 | [Throws]>; |
| 725 | def int_experimental_patchpoint_void : Intrinsic<[], |
| 726 | [llvm_i64_ty, llvm_i32_ty, |
| 727 | llvm_ptr_ty, llvm_i32_ty, |
| 728 | llvm_vararg_ty], |
| 729 | [Throws]>; |
| 730 | def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty], |
| 731 | [llvm_i64_ty, llvm_i32_ty, |
| 732 | llvm_ptr_ty, llvm_i32_ty, |
| 733 | llvm_vararg_ty], |
| 734 | [Throws]>; |
| 735 | |
| 736 | |
| 737 | //===------------------------ Garbage Collection Intrinsics ---------------===// |
| 738 | // These are documented in docs/Statepoint.rst |
| 739 | |
| 740 | def int_experimental_gc_statepoint : Intrinsic<[llvm_token_ty], |
| 741 | [llvm_i64_ty, llvm_i32_ty, |
| 742 | llvm_anyptr_ty, llvm_i32_ty, |
| 743 | llvm_i32_ty, llvm_vararg_ty], |
| 744 | [Throws]>; |
| 745 | |
| 746 | def int_experimental_gc_result : Intrinsic<[llvm_any_ty], [llvm_token_ty], |
| 747 | [IntrReadMem]>; |
| 748 | def int_experimental_gc_relocate : Intrinsic<[llvm_any_ty], |
| 749 | [llvm_token_ty, llvm_i32_ty, llvm_i32_ty], |
| 750 | [IntrReadMem]>; |
| 751 | |
| 752 | //===------------------------ Coroutine Intrinsics ---------------===// |
| 753 | // These are documented in docs/Coroutines.rst |
| 754 | |
| 755 | // Coroutine Structure Intrinsics. |
| 756 | |
| 757 | def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty, |
| 758 | llvm_ptr_ty, llvm_ptr_ty], |
| 759 | [IntrArgMemOnly, IntrReadMem, |
| 760 | ReadNone<1>, ReadOnly<2>, NoCapture<2>]>; |
| 761 | def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>; |
| 762 | def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty], |
| 763 | [WriteOnly<1>]>; |
| 764 | |
| 765 | def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty], |
| 766 | [IntrReadMem, IntrArgMemOnly, ReadOnly<1>, |
| 767 | NoCapture<1>]>; |
| 768 | def int_coro_end : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty], []>; |
| 769 | |
| 770 | def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; |
| 771 | def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>; |
| 772 | |
| 773 | def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>; |
| 774 | def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>; |
| 775 | |
| 776 | def int_coro_param : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_ptr_ty], |
| 777 | [IntrNoMem, ReadNone<0>, ReadNone<1>]>; |
| 778 | |
| 779 | // Coroutine Manipulation Intrinsics. |
| 780 | |
| 781 | def int_coro_resume : Intrinsic<[], [llvm_ptr_ty], [Throws]>; |
| 782 | def int_coro_destroy : Intrinsic<[], [llvm_ptr_ty], [Throws]>; |
| 783 | def int_coro_done : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], |
| 784 | [IntrArgMemOnly, ReadOnly<0>, NoCapture<0>]>; |
| 785 | def int_coro_promise : Intrinsic<[llvm_ptr_ty], |
| 786 | [llvm_ptr_ty, llvm_i32_ty, llvm_i1_ty], |
| 787 | [IntrNoMem, NoCapture<0>]>; |
| 788 | |
| 789 | // Coroutine Lowering Intrinsics. Used internally by coroutine passes. |
| 790 | |
| 791 | def int_coro_subfn_addr : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty], |
| 792 | [IntrReadMem, IntrArgMemOnly, ReadOnly<0>, |
| 793 | NoCapture<0>]>; |
| 794 | |
| 795 | ///===-------------------------- Other Intrinsics --------------------------===// |
| 796 | // |
| 797 | def int_flt_rounds : Intrinsic<[llvm_i32_ty]>, |
| 798 | GCCBuiltin<"__builtin_flt_rounds">; |
| 799 | def int_trap : Intrinsic<[], [], [IntrNoReturn]>, |
| 800 | GCCBuiltin<"__builtin_trap">; |
| 801 | def int_debugtrap : Intrinsic<[]>, |
| 802 | GCCBuiltin<"__builtin_debugtrap">; |
| 803 | |
| 804 | // Support for dynamic deoptimization (or de-specialization) |
| 805 | def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty], |
| 806 | [Throws]>; |
| 807 | |
| 808 | // Support for speculative runtime guards |
| 809 | def int_experimental_guard : Intrinsic<[], [llvm_i1_ty, llvm_vararg_ty], |
| 810 | [Throws]>; |
| 811 | |
| 812 | // NOP: calls/invokes to this intrinsic are removed by codegen |
| 813 | def int_donothing : Intrinsic<[], [], [IntrNoMem]>; |
| 814 | |
| 815 | // This instruction has no actual effect, though it is treated by the optimizer |
| 816 | // has having opaque side effects. This may be inserted into loops to ensure |
| 817 | // that they are not removed even if they turn out to be empty, for languages |
| 818 | // which specify that infinite loops must be preserved. |
| 819 | def int_sideeffect : Intrinsic<[], [], [IntrInaccessibleMemOnly]>; |
| 820 | |
| 821 | // Intrisics to support half precision floating point format |
| 822 | let IntrProperties = [IntrNoMem] in { |
| 823 | def int_convert_to_fp16 : Intrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>; |
| 824 | def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>; |
| 825 | } |
| 826 | |
| 827 | // Clear cache intrinsic, default to ignore (ie. emit nothing) |
| 828 | // maps to void __clear_cache() on supporting platforms |
| 829 | def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], |
| 830 | [], "llvm.clear_cache">; |
| 831 | |
| 832 | //===-------------------------- Masked Intrinsics -------------------------===// |
| 833 | // |
| 834 | def int_masked_store : Intrinsic<[], [llvm_anyvector_ty, |
| 835 | LLVMAnyPointerType<LLVMMatchType<0>>, |
| 836 | llvm_i32_ty, |
| 837 | LLVMVectorSameWidth<0, llvm_i1_ty>], |
| 838 | [IntrArgMemOnly]>; |
| 839 | |
| 840 | def int_masked_load : Intrinsic<[llvm_anyvector_ty], |
| 841 | [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty, |
| 842 | LLVMVectorSameWidth<0, llvm_i1_ty>, LLVMMatchType<0>], |
| 843 | [IntrReadMem, IntrArgMemOnly]>; |
| 844 | |
| 845 | def int_masked_gather: Intrinsic<[llvm_anyvector_ty], |
| 846 | [LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty, |
| 847 | LLVMVectorSameWidth<0, llvm_i1_ty>, |
| 848 | LLVMMatchType<0>], |
| 849 | [IntrReadMem]>; |
| 850 | |
| 851 | def int_masked_scatter: Intrinsic<[], |
| 852 | [llvm_anyvector_ty, |
| 853 | LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty, |
| 854 | LLVMVectorSameWidth<0, llvm_i1_ty>]>; |
| 855 | |
| 856 | def int_masked_expandload: Intrinsic<[llvm_anyvector_ty], |
| 857 | [LLVMPointerToElt<0>, |
| 858 | LLVMVectorSameWidth<0, llvm_i1_ty>, |
| 859 | LLVMMatchType<0>], |
| 860 | [IntrReadMem]>; |
| 861 | |
| 862 | def int_masked_compressstore: Intrinsic<[], |
| 863 | [llvm_anyvector_ty, |
| 864 | LLVMPointerToElt<0>, |
| 865 | LLVMVectorSameWidth<0, llvm_i1_ty>], |
| 866 | [IntrArgMemOnly]>; |
| 867 | |
| 868 | // Test whether a pointer is associated with a type metadata identifier. |
| 869 | def int_type_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty], |
| 870 | [IntrNoMem]>; |
| 871 | |
| 872 | // Safely loads a function pointer from a virtual table pointer using type metadata. |
| 873 | def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty], |
| 874 | [llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty], |
| 875 | [IntrNoMem]>; |
| 876 | |
| 877 | // Create a branch funnel that implements an indirect call to a limited set of |
| 878 | // callees. This needs to be a musttail call. |
| 879 | def int_icall_branch_funnel : Intrinsic<[], [llvm_vararg_ty], []>; |
| 880 | |
| 881 | def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty], |
| 882 | [IntrReadMem, IntrArgMemOnly]>; |
| 883 | |
| 884 | // Xray intrinsics |
| 885 | //===----------------------------------------------------------------------===// |
| 886 | // Custom event logging for x-ray. |
| 887 | // Takes a pointer to a string and the length of the string. |
| 888 | def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], |
| 889 | [NoCapture<0>, ReadOnly<0>, IntrWriteMem]>; |
| 890 | //===----------------------------------------------------------------------===// |
| 891 | |
| 892 | //===------ Memory intrinsics with element-wise atomicity guarantees ------===// |
| 893 | // |
| 894 | |
| 895 | // @llvm.memcpy.element.unordered.atomic.*(dest, src, length, elementsize) |
| 896 | def int_memcpy_element_unordered_atomic |
| 897 | : Intrinsic<[], |
| 898 | [ |
| 899 | llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty |
| 900 | ], |
| 901 | [ |
| 902 | IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>, |
| 903 | ReadOnly<1> |
| 904 | ]>; |
| 905 | |
| 906 | // @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize) |
| 907 | def int_memmove_element_unordered_atomic |
| 908 | : Intrinsic<[], |
| 909 | [ |
| 910 | llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty |
| 911 | ], |
| 912 | [ |
| 913 | IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>, |
| 914 | ReadOnly<1> |
| 915 | ]>; |
| 916 | |
| 917 | // @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize) |
| 918 | def int_memset_element_unordered_atomic |
| 919 | : Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ], |
| 920 | [ IntrArgMemOnly, NoCapture<0>, WriteOnly<0> ]>; |
| 921 | |
| 922 | //===------------------------ Reduction Intrinsics ------------------------===// |
| 923 | // |
| 924 | def int_experimental_vector_reduce_fadd : Intrinsic<[llvm_anyfloat_ty], |
| 925 | [llvm_anyfloat_ty, |
| 926 | llvm_anyvector_ty], |
| 927 | [IntrNoMem]>; |
| 928 | def int_experimental_vector_reduce_fmul : Intrinsic<[llvm_anyfloat_ty], |
| 929 | [llvm_anyfloat_ty, |
| 930 | llvm_anyvector_ty], |
| 931 | [IntrNoMem]>; |
| 932 | def int_experimental_vector_reduce_add : Intrinsic<[llvm_anyint_ty], |
| 933 | [llvm_anyvector_ty], |
| 934 | [IntrNoMem]>; |
| 935 | def int_experimental_vector_reduce_mul : Intrinsic<[llvm_anyint_ty], |
| 936 | [llvm_anyvector_ty], |
| 937 | [IntrNoMem]>; |
| 938 | def int_experimental_vector_reduce_and : Intrinsic<[llvm_anyint_ty], |
| 939 | [llvm_anyvector_ty], |
| 940 | [IntrNoMem]>; |
| 941 | def int_experimental_vector_reduce_or : Intrinsic<[llvm_anyint_ty], |
| 942 | [llvm_anyvector_ty], |
| 943 | [IntrNoMem]>; |
| 944 | def int_experimental_vector_reduce_xor : Intrinsic<[llvm_anyint_ty], |
| 945 | [llvm_anyvector_ty], |
| 946 | [IntrNoMem]>; |
| 947 | def int_experimental_vector_reduce_smax : Intrinsic<[llvm_anyint_ty], |
| 948 | [llvm_anyvector_ty], |
| 949 | [IntrNoMem]>; |
| 950 | def int_experimental_vector_reduce_smin : Intrinsic<[llvm_anyint_ty], |
| 951 | [llvm_anyvector_ty], |
| 952 | [IntrNoMem]>; |
| 953 | def int_experimental_vector_reduce_umax : Intrinsic<[llvm_anyint_ty], |
| 954 | [llvm_anyvector_ty], |
| 955 | [IntrNoMem]>; |
| 956 | def int_experimental_vector_reduce_umin : Intrinsic<[llvm_anyint_ty], |
| 957 | [llvm_anyvector_ty], |
| 958 | [IntrNoMem]>; |
| 959 | def int_experimental_vector_reduce_fmax : Intrinsic<[llvm_anyfloat_ty], |
| 960 | [llvm_anyvector_ty], |
| 961 | [IntrNoMem]>; |
| 962 | def int_experimental_vector_reduce_fmin : Intrinsic<[llvm_anyfloat_ty], |
| 963 | [llvm_anyvector_ty], |
| 964 | [IntrNoMem]>; |
| 965 | |
| 966 | //===----- Intrinsics that are used to provide predicate information -----===// |
| 967 | |
| 968 | def int_ssa_copy : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], |
| 969 | [IntrNoMem, Returned<0>]>; |
| 970 | //===----------------------------------------------------------------------===// |
| 971 | // Target-specific intrinsics |
| 972 | //===----------------------------------------------------------------------===// |
| 973 | |
| 974 | include "llvm/IR/IntrinsicsPowerPC.td" |
| 975 | include "llvm/IR/IntrinsicsX86.td" |
| 976 | include "llvm/IR/IntrinsicsARM.td" |
| 977 | include "llvm/IR/IntrinsicsAArch64.td" |
| 978 | include "llvm/IR/IntrinsicsXCore.td" |
| 979 | include "llvm/IR/IntrinsicsHexagon.td" |
| 980 | include "llvm/IR/IntrinsicsNVVM.td" |
| 981 | include "llvm/IR/IntrinsicsMips.td" |
| 982 | include "llvm/IR/IntrinsicsAMDGPU.td" |
| 983 | include "llvm/IR/IntrinsicsBPF.td" |
| 984 | include "llvm/IR/IntrinsicsSystemZ.td" |
| 985 | include "llvm/IR/IntrinsicsWebAssembly.td" |