blob: 1d639e8aeb016f8e17773894792b60d2a1c07c48 [file] [log] [blame]
//===-- IR/VPIntrinsics.def - Describes llvm.vp.* Intrinsics -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains descriptions of the various Vector Predication intrinsics.
// This is used as a central place for enumerating the different instructions
// and should eventually be the place to put comments about the instructions.
//
//===----------------------------------------------------------------------===//
// NOTE: NO INCLUDE GUARD DESIRED!
// Provide definitions of macros so that users of this file do not have to
// define everything to use it...
//
// Register a VP intrinsic and begin its property scope.
// All VP intrinsic scopes are top level, ie it is illegal to place a
// BEGIN_REGISTER_VP_INTRINSIC within a VP intrinsic scope.
// \p VPID The VP intrinsic id.
// \p MASKPOS The mask operand position.
// \p EVLPOS The explicit vector length operand position.
#ifndef BEGIN_REGISTER_VP_INTRINSIC
#define BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, EVLPOS)
#endif
// End the property scope of a VP intrinsic.
#ifndef END_REGISTER_VP_INTRINSIC
#define END_REGISTER_VP_INTRINSIC(VPID)
#endif
// Register a new VP SDNode and begin its property scope.
// When the SDNode scope is nested within a VP intrinsic scope, it is
// implicitly registered as the canonical SDNode for this VP intrinsic. There
// is one VP intrinsic that maps directly to one SDNode that goes by the
// same name. Since the operands are also the same, we open the property
// scopes for both the VPIntrinsic and the SDNode at once.
// \p VPSD The SelectionDAG Node id (eg VP_ADD).
// \p LEGALPOS The operand position of the SDNode that is used for legalizing.
// If LEGALPOS < 0, then the return type given by
// TheNode->getValueType(-1-LEGALPOS) is used.
// \p TDNAME The name of the TableGen definition of this SDNode.
// \p MASKPOS The mask operand position.
// \p EVLPOS The explicit vector length operand position.
#ifndef BEGIN_REGISTER_VP_SDNODE
#define BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, TDNAME, MASKPOS, EVLPOS)
#endif
// End the property scope of a new VP SDNode.
#ifndef END_REGISTER_VP_SDNODE
#define END_REGISTER_VP_SDNODE(VPSD)
#endif
// Helper macro to set up the mapping from VP intrinsic to ISD opcode.
// Note: More than one VP intrinsic may map to one ISD opcode.
#ifndef HELPER_MAP_VPID_TO_VPSD
#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)
#endif
// Helper macros for the common "1:1 - Intrinsic : SDNode" case.
//
// There is one VP intrinsic that maps directly to one SDNode that goes by the
// same name. Since the operands are also the same, we open the property
// scopes for both the VPIntrinsic and the SDNode at once.
//
// \p VPID The canonical name (eg `vp_add`, which at the same time is the
// name of the intrinsic and the TableGen def of the SDNode).
// \p MASKPOS The mask operand position.
// \p EVLPOS The explicit vector length operand position.
// \p VPSD The SelectionDAG Node id (eg VP_ADD).
// \p LEGALPOS The operand position of the SDNode that is used for legalizing
// this SDNode. This can be `-1`, in which case the return type of
// the SDNode is used.
#define BEGIN_REGISTER_VP(VPID, MASKPOS, EVLPOS, VPSD, LEGALPOS) \
BEGIN_REGISTER_VP_INTRINSIC(VPID, MASKPOS, EVLPOS) \
BEGIN_REGISTER_VP_SDNODE(VPSD, LEGALPOS, VPID, MASKPOS, EVLPOS) \
HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)
#define END_REGISTER_VP(VPID, VPSD) \
END_REGISTER_VP_INTRINSIC(VPID) \
END_REGISTER_VP_SDNODE(VPSD)
// The following macros attach properties to the scope they are placed in. This
// assigns the property to the VP Intrinsic and/or SDNode that belongs to the
// scope.
//
// Property Macros {
// The intrinsic and/or SDNode has the same function as this LLVM IR Opcode.
// \p OPC The opcode of the instruction with the same function.
#ifndef VP_PROPERTY_FUNCTIONAL_OPC
#define VP_PROPERTY_FUNCTIONAL_OPC(OPC)
#endif
// Whether the intrinsic may have a rounding mode or exception behavior operand
// bundle.
// \p HASROUND '1' if the intrinsic can have a rounding mode operand bundle,
// '0' otherwise.
// \p HASEXCEPT '1' if the intrinsic can have an exception behavior operand
// bundle, '0' otherwise.
// \p INTRINID The constrained fp intrinsic this VP intrinsic corresponds to.
#ifndef VP_PROPERTY_CONSTRAINEDFP
#define VP_PROPERTY_CONSTRAINEDFP(HASROUND, HASEXCEPT, INTRINID)
#endif
// Map this VP intrinsic to its canonical functional intrinsic.
// \p INTRIN The non-VP intrinsics with the same function.
#ifndef VP_PROPERTY_FUNCTIONAL_INTRINSIC
#define VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN)
#endif
// This VP Intrinsic is a memory operation
// The pointer arg is at POINTERPOS and the data arg is at DATAPOS.
#ifndef VP_PROPERTY_MEMOP
#define VP_PROPERTY_MEMOP(POINTERPOS, DATAPOS)
#endif
// Map this VP reduction intrinsic to its reduction operand positions.
#ifndef VP_PROPERTY_REDUCTION
#define VP_PROPERTY_REDUCTION(STARTPOS, VECTORPOS)
#endif
// A property to infer VP binary-op SDNode opcodes automatically.
#ifndef VP_PROPERTY_BINARYOP
#define VP_PROPERTY_BINARYOP
#endif
// A property to infer VP type casts automatically.
#ifndef VP_PROPERTY_CASTOP
#define VP_PROPERTY_CASTOP
#endif
// This VP Intrinsic is a comparison operation
// The condition code arg is at CCPOS and accepts floating-point condition
// codes if ISFP is set, else it accepts integer condition codes.
#ifndef VP_PROPERTY_CMP
#define VP_PROPERTY_CMP(CCPOS, ISFP)
#endif
/// } Property Macros
///// Integer Arithmetic {
// Specialized helper macro for integer binary operators (%x, %y, %mask, %evl).
#ifdef HELPER_REGISTER_BINARY_INT_VP
#error \
"The internal helper macro HELPER_REGISTER_BINARY_INT_VP is already defined!"
#endif
#define HELPER_REGISTER_BINARY_INT_VP(VPID, VPSD, IROPC) \
BEGIN_REGISTER_VP(VPID, 2, 3, VPSD, -1) \
VP_PROPERTY_FUNCTIONAL_OPC(IROPC) \
VP_PROPERTY_BINARYOP \
END_REGISTER_VP(VPID, VPSD)
// llvm.vp.add(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_add, VP_ADD, Add)
// llvm.vp.and(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_and, VP_AND, And)
// llvm.vp.ashr(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_ashr, VP_ASHR, AShr)
// llvm.vp.lshr(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_lshr, VP_LSHR, LShr)
// llvm.vp.mul(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_mul, VP_MUL, Mul)
// llvm.vp.or(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_or, VP_OR, Or)
// llvm.vp.sdiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_sdiv, VP_SDIV, SDiv)
// llvm.vp.shl(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_shl, VP_SHL, Shl)
// llvm.vp.srem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_srem, VP_SREM, SRem)
// llvm.vp.sub(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_sub, VP_SUB, Sub)
// llvm.vp.udiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_udiv, VP_UDIV, UDiv)
// llvm.vp.urem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_urem, VP_UREM, URem)
// llvm.vp.xor(x,y,mask,vlen)
HELPER_REGISTER_BINARY_INT_VP(vp_xor, VP_XOR, Xor)
#undef HELPER_REGISTER_BINARY_INT_VP
///// } Integer Arithmetic
///// Floating-Point Arithmetic {
// Specialized helper macro for floating-point binary operators
// <operation>(%x, %y, %mask, %evl).
#ifdef HELPER_REGISTER_BINARY_FP_VP
#error \
"The internal helper macro HELPER_REGISTER_BINARY_FP_VP is already defined!"
#endif
#define HELPER_REGISTER_BINARY_FP_VP(OPSUFFIX, VPSD, IROPC) \
BEGIN_REGISTER_VP(vp_##OPSUFFIX, 2, 3, VPSD, -1) \
VP_PROPERTY_FUNCTIONAL_OPC(IROPC) \
VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_##OPSUFFIX) \
VP_PROPERTY_BINARYOP \
END_REGISTER_VP(vp_##OPSUFFIX, VPSD)
// llvm.vp.fadd(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fadd, VP_FADD, FAdd)
// llvm.vp.fsub(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fsub, VP_FSUB, FSub)
// llvm.vp.fmul(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fmul, VP_FMUL, FMul)
// llvm.vp.fdiv(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(fdiv, VP_FDIV, FDiv)
// llvm.vp.frem(x,y,mask,vlen)
HELPER_REGISTER_BINARY_FP_VP(frem, VP_FREM, FRem)
#undef HELPER_REGISTER_BINARY_FP_VP
// llvm.vp.fneg(x,mask,vlen)
BEGIN_REGISTER_VP(vp_fneg, 1, 2, VP_FNEG, -1)
VP_PROPERTY_FUNCTIONAL_OPC(FNeg)
END_REGISTER_VP(vp_fneg, VP_FNEG)
// llvm.vp.fma(x,y,z,mask,vlen)
BEGIN_REGISTER_VP(vp_fma, 3, 4, VP_FMA, -1)
VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_fma)
END_REGISTER_VP(vp_fma, VP_FMA)
///// } Floating-Point Arithmetic
///// Type Casts {
// Specialized helper macro for type conversions.
// <operation>(%x, %mask, %evl).
#ifdef HELPER_REGISTER_FP_CAST_VP
#error \
"The internal helper macro HELPER_REGISTER_FP_CAST_VP is already defined!"
#endif
#define HELPER_REGISTER_FP_CAST_VP(OPSUFFIX, VPSD, IROPC, HASROUND) \
BEGIN_REGISTER_VP(vp_##OPSUFFIX, 1, 2, VPSD, -1) \
VP_PROPERTY_FUNCTIONAL_OPC(IROPC) \
VP_PROPERTY_CONSTRAINEDFP(HASROUND, 1, experimental_constrained_##OPSUFFIX) \
VP_PROPERTY_CASTOP \
END_REGISTER_VP(vp_##OPSUFFIX, VPSD)
// llvm.vp.fptoui(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fptoui, VP_FPTOUI, FPToUI, 0)
// llvm.vp.fptosi(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fptosi, VP_FPTOSI, FPToSI, 0)
// llvm.vp.uitofp(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(uitofp, VP_UITOFP, UIToFP, 1)
// llvm.vp.sitofp(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(sitofp, VP_SITOFP, SIToFP, 1)
// llvm.vp.fptrunc(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fptrunc, VP_FP_ROUND, FPTrunc, 1)
// llvm.vp.fpext(x,mask,vlen)
HELPER_REGISTER_FP_CAST_VP(fpext, VP_FP_EXTEND, FPExt, 0)
#undef HELPER_REGISTER_FP_CAST_VP
// Specialized helper macro for integer type conversions.
// <operation>(%x, %mask, %evl).
#ifdef HELPER_REGISTER_INT_CAST_VP
#error \
"The internal helper macro HELPER_REGISTER_INT_CAST_VP is already defined!"
#endif
#define HELPER_REGISTER_INT_CAST_VP(OPSUFFIX, VPSD, IROPC) \
BEGIN_REGISTER_VP(vp_##OPSUFFIX, 1, 2, VPSD, -1) \
VP_PROPERTY_FUNCTIONAL_OPC(IROPC) \
VP_PROPERTY_CASTOP \
END_REGISTER_VP(vp_##OPSUFFIX, VPSD)
// llvm.vp.trunc(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(trunc, VP_TRUNCATE, Trunc)
// llvm.vp.zext(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(zext, VP_ZERO_EXTEND, ZExt)
// llvm.vp.sext(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(sext, VP_SIGN_EXTEND, SExt)
// llvm.vp.ptrtoint(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(ptrtoint, VP_PTRTOINT, PtrToInt)
// llvm.vp.inttoptr(x,mask,vlen)
HELPER_REGISTER_INT_CAST_VP(inttoptr, VP_INTTOPTR, IntToPtr)
#undef HELPER_REGISTER_INT_CAST_VP
///// } Type Casts
///// Comparisons {
// VP_SETCC (ISel only)
BEGIN_REGISTER_VP_SDNODE(VP_SETCC, 0, vp_setcc, 3, 4)
END_REGISTER_VP_SDNODE(VP_SETCC)
// llvm.vp.fcmp(x,y,cc,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_fcmp, 3, 4)
HELPER_MAP_VPID_TO_VPSD(vp_fcmp, VP_SETCC)
VP_PROPERTY_FUNCTIONAL_OPC(FCmp)
VP_PROPERTY_CMP(2, true)
VP_PROPERTY_CONSTRAINEDFP(0, 1, experimental_constrained_fcmp)
END_REGISTER_VP_INTRINSIC(vp_fcmp)
// llvm.vp.icmp(x,y,cc,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_icmp, 3, 4)
HELPER_MAP_VPID_TO_VPSD(vp_icmp, VP_SETCC)
VP_PROPERTY_FUNCTIONAL_OPC(ICmp)
VP_PROPERTY_CMP(2, false)
END_REGISTER_VP_INTRINSIC(vp_icmp)
///// } Comparisons
///// Memory Operations {
// llvm.vp.store(val,ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_store, 2, 3)
// chain = VP_STORE chain,val,base,offset,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_STORE, 0, vp_store, 4, 5)
HELPER_MAP_VPID_TO_VPSD(vp_store, VP_STORE)
VP_PROPERTY_FUNCTIONAL_OPC(Store)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_store)
VP_PROPERTY_MEMOP(1, 0)
END_REGISTER_VP(vp_store, VP_STORE)
// llvm.experimental.vp.strided.store(val,ptr,stride,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(experimental_vp_strided_store, 3, 4)
// chain = EXPERIMENTAL_VP_STRIDED_STORE chain,val,base,offset,stride,mask,evl
BEGIN_REGISTER_VP_SDNODE(EXPERIMENTAL_VP_STRIDED_STORE, 0, experimental_vp_strided_store, 5, 6)
HELPER_MAP_VPID_TO_VPSD(experimental_vp_strided_store, EXPERIMENTAL_VP_STRIDED_STORE)
VP_PROPERTY_MEMOP(1, 0)
END_REGISTER_VP(experimental_vp_strided_store, EXPERIMENTAL_VP_STRIDED_STORE)
// llvm.vp.scatter(ptr,val,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_scatter, 2, 3)
// chain = VP_SCATTER chain,val,base,indices,scale,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_SCATTER, -1, vp_scatter, 5, 6)
HELPER_MAP_VPID_TO_VPSD(vp_scatter, VP_SCATTER)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_scatter)
VP_PROPERTY_MEMOP(1, 0)
END_REGISTER_VP(vp_scatter, VP_SCATTER)
// llvm.vp.load(ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_load, 1, 2)
// val,chain = VP_LOAD chain,base,offset,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_LOAD, -1, vp_load, 3, 4)
HELPER_MAP_VPID_TO_VPSD(vp_load, VP_LOAD)
VP_PROPERTY_FUNCTIONAL_OPC(Load)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_load)
VP_PROPERTY_MEMOP(0, None)
END_REGISTER_VP(vp_load, VP_LOAD)
// llvm.experimental.vp.strided.load(ptr,stride,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(experimental_vp_strided_load, 2, 3)
// chain = EXPERIMENTAL_VP_STRIDED_LOAD chain,base,offset,stride,mask,evl
BEGIN_REGISTER_VP_SDNODE(EXPERIMENTAL_VP_STRIDED_LOAD, -1, experimental_vp_strided_load, 4, 5)
HELPER_MAP_VPID_TO_VPSD(experimental_vp_strided_load, EXPERIMENTAL_VP_STRIDED_LOAD)
VP_PROPERTY_MEMOP(0, None)
END_REGISTER_VP(experimental_vp_strided_load, EXPERIMENTAL_VP_STRIDED_LOAD)
// llvm.vp.gather(ptr,mask,vlen)
BEGIN_REGISTER_VP_INTRINSIC(vp_gather, 1, 2)
// val,chain = VP_GATHER chain,base,indices,scale,mask,evl
BEGIN_REGISTER_VP_SDNODE(VP_GATHER, -1, vp_gather, 4, 5)
HELPER_MAP_VPID_TO_VPSD(vp_gather, VP_GATHER)
VP_PROPERTY_FUNCTIONAL_INTRINSIC(masked_gather)
VP_PROPERTY_MEMOP(0, None)
END_REGISTER_VP(vp_gather, VP_GATHER)
///// } Memory Operations
///// Reductions {
// Specialized helper macro for VP reductions (%start, %x, %mask, %evl).
#ifdef HELPER_REGISTER_REDUCTION_VP
#error \
"The internal helper macro HELPER_REGISTER_REDUCTION_VP is already defined!"
#endif
#define HELPER_REGISTER_REDUCTION_VP(VPID, VPSD, INTRIN) \
BEGIN_REGISTER_VP(VPID, 2, 3, VPSD, -1) \
VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN) \
VP_PROPERTY_REDUCTION(0, 1) \
END_REGISTER_VP(VPID, VPSD)
// llvm.vp.reduce.add(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_add, VP_REDUCE_ADD,
experimental_vector_reduce_add)
// llvm.vp.reduce.mul(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_mul, VP_REDUCE_MUL,
experimental_vector_reduce_mul)
// llvm.vp.reduce.and(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_and, VP_REDUCE_AND,
experimental_vector_reduce_and)
// llvm.vp.reduce.or(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_or, VP_REDUCE_OR,
experimental_vector_reduce_or)
// llvm.vp.reduce.xor(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_xor, VP_REDUCE_XOR,
experimental_vector_reduce_xor)
// llvm.vp.reduce.smax(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_smax, VP_REDUCE_SMAX,
experimental_vector_reduce_smax)
// llvm.vp.reduce.smin(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_smin, VP_REDUCE_SMIN,
experimental_vector_reduce_smin)
// llvm.vp.reduce.umax(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_umax, VP_REDUCE_UMAX,
experimental_vector_reduce_umax)
// llvm.vp.reduce.umin(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_umin, VP_REDUCE_UMIN,
experimental_vector_reduce_umin)
// llvm.vp.reduce.fmax(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmax, VP_REDUCE_FMAX,
experimental_vector_reduce_fmax)
// llvm.vp.reduce.fmin(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_VP(vp_reduce_fmin, VP_REDUCE_FMIN,
experimental_vector_reduce_fmin)
#undef HELPER_REGISTER_REDUCTION_VP
// Specialized helper macro for VP reductions as above but with two forms:
// sequential and reassociative. These manifest as the presence of 'reassoc'
// fast-math flags in the IR and as two distinct ISD opcodes in the
// SelectionDAG.
// Note we by default map from the VP intrinsic to the SEQ ISD opcode, which
// can then be relaxed to the non-SEQ ISD opcode if the 'reassoc' flag is set.
#ifdef HELPER_REGISTER_REDUCTION_SEQ_VP
#error \
"The internal helper macro HELPER_REGISTER_REDUCTION_SEQ_VP is already defined!"
#endif
#define HELPER_REGISTER_REDUCTION_SEQ_VP(VPID, VPSD, SEQ_VPSD, INTRIN) \
BEGIN_REGISTER_VP_INTRINSIC(VPID, 2, 3) \
BEGIN_REGISTER_VP_SDNODE(VPSD, -1, VPID, 2, 3) \
VP_PROPERTY_REDUCTION(0, 1) \
END_REGISTER_VP_SDNODE(VPSD) \
BEGIN_REGISTER_VP_SDNODE(SEQ_VPSD, -1, VPID, 2, 3) \
HELPER_MAP_VPID_TO_VPSD(VPID, SEQ_VPSD) \
VP_PROPERTY_REDUCTION(0, 1) \
END_REGISTER_VP_SDNODE(SEQ_VPSD) \
VP_PROPERTY_FUNCTIONAL_INTRINSIC(INTRIN) \
END_REGISTER_VP_INTRINSIC(VPID)
// llvm.vp.reduce.fadd(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fadd, VP_REDUCE_FADD,
VP_REDUCE_SEQ_FADD,
experimental_vector_reduce_fadd)
// llvm.vp.reduce.fmul(start,x,mask,vlen)
HELPER_REGISTER_REDUCTION_SEQ_VP(vp_reduce_fmul, VP_REDUCE_FMUL,
VP_REDUCE_SEQ_FMUL,
experimental_vector_reduce_fmul)
#undef HELPER_REGISTER_REDUCTION_SEQ_VP
///// } Reduction
///// Shuffles {
// The mask 'cond' operand of llvm.vp.select and llvm.vp.merge are not reported
// as masks with the BEGIN_REGISTER_VP_* macros. This is because, unlike other
// VP intrinsics, these two have a defined result on lanes where the mask is
// false.
//
// llvm.vp.select(cond,on_true,on_false,vlen)
BEGIN_REGISTER_VP(vp_select, None, 3, VP_SELECT, -1)
VP_PROPERTY_FUNCTIONAL_OPC(Select)
END_REGISTER_VP(vp_select, VP_SELECT)
// llvm.vp.merge(cond,on_true,on_false,pivot)
BEGIN_REGISTER_VP(vp_merge, None, 3, VP_MERGE, -1)
END_REGISTER_VP(vp_merge, VP_MERGE)
BEGIN_REGISTER_VP(experimental_vp_splice, 3, 5, EXPERIMENTAL_VP_SPLICE, -1)
END_REGISTER_VP(experimental_vp_splice, EXPERIMENTAL_VP_SPLICE)
///// } Shuffles
#undef BEGIN_REGISTER_VP
#undef BEGIN_REGISTER_VP_INTRINSIC
#undef BEGIN_REGISTER_VP_SDNODE
#undef END_REGISTER_VP
#undef END_REGISTER_VP_INTRINSIC
#undef END_REGISTER_VP_SDNODE
#undef HELPER_MAP_VPID_TO_VPSD
#undef VP_PROPERTY_BINARYOP
#undef VP_PROPERTY_CASTOP
#undef VP_PROPERTY_CMP
#undef VP_PROPERTY_CONSTRAINEDFP
#undef VP_PROPERTY_FUNCTIONAL_INTRINSIC
#undef VP_PROPERTY_FUNCTIONAL_OPC
#undef VP_PROPERTY_MEMOP
#undef VP_PROPERTY_REDUCTION