diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index ca7b4691ff5f..3ab814a37868 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -11152,6 +11152,31 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn); } + case X86::BI__builtin_ia32_vpshufbitqmb128_mask: + case X86::BI__builtin_ia32_vpshufbitqmb256_mask: + case X86::BI__builtin_ia32_vpshufbitqmb512_mask: { + unsigned NumElts = Ops[0]->getType()->getVectorNumElements(); + Value *MaskIn = Ops[2]; + Ops.erase(&Ops[2]); + + Intrinsic::ID ID; + switch (BuiltinID) { + default: llvm_unreachable("Unsupported intrinsic!"); + case X86::BI__builtin_ia32_vpshufbitqmb128_mask: + ID = Intrinsic::x86_avx512_vpshufbitqmb_128; + break; + case X86::BI__builtin_ia32_vpshufbitqmb256_mask: + ID = Intrinsic::x86_avx512_vpshufbitqmb_256; + break; + case X86::BI__builtin_ia32_vpshufbitqmb512_mask: + ID = Intrinsic::x86_avx512_vpshufbitqmb_512; + break; + } + + Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops); + return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn); + } + // packed comparison intrinsics case X86::BI__builtin_ia32_cmpeqps: case X86::BI__builtin_ia32_cmpeqpd: diff --git a/test/CodeGen/avx512bitalg-builtins.c b/test/CodeGen/avx512bitalg-builtins.c index 5770c662f07e..b289c237cfc0 100644 --- a/test/CodeGen/avx512bitalg-builtins.c +++ b/test/CodeGen/avx512bitalg-builtins.c @@ -42,13 +42,14 @@ __m512i test_mm512_maskz_popcnt_epi8(__mmask64 __U, __m512i __B) { __mmask64 test_mm512_mask_bitshuffle_epi64_mask(__mmask64 __U, __m512i __A, __m512i __B) { // CHECK-LABEL: @test_mm512_mask_bitshuffle_epi64_mask - // CHECK: @llvm.x86.avx512.mask.vpshufbitqmb.512 + // CHECK: @llvm.x86.avx512.vpshufbitqmb.512 + // CHECK: and <64 x i1> %{{.*}}, %{{.*}} return _mm512_mask_bitshuffle_epi64_mask(__U, __A, __B); } __mmask64 test_mm512_bitshuffle_epi64_mask(__m512i __A, __m512i __B) { // CHECK-LABEL: @test_mm512_bitshuffle_epi64_mask - // CHECK: @llvm.x86.avx512.mask.vpshufbitqmb.512 + // CHECK: @llvm.x86.avx512.vpshufbitqmb.512 return _mm512_bitshuffle_epi64_mask(__A, __B); } diff --git a/test/CodeGen/avx512vlbitalg-builtins.c b/test/CodeGen/avx512vlbitalg-builtins.c index 3dd5b68fd463..d54a607f7178 100644 --- a/test/CodeGen/avx512vlbitalg-builtins.c +++ b/test/CodeGen/avx512vlbitalg-builtins.c @@ -80,25 +80,27 @@ __m128i test_mm_maskz_popcnt_epi8(__mmask16 __U, __m128i __B) { __mmask32 test_mm256_mask_bitshuffle_epi64_mask(__mmask32 __U, __m256i __A, __m256i __B) { // CHECK-LABEL: @test_mm256_mask_bitshuffle_epi64_mask - // CHECK: @llvm.x86.avx512.mask.vpshufbitqmb.256 + // CHECK: @llvm.x86.avx512.vpshufbitqmb.256 + // CHECK: and <32 x i1> %{{.*}}, %{{.*}} return _mm256_mask_bitshuffle_epi64_mask(__U, __A, __B); } __mmask32 test_mm256_bitshuffle_epi64_mask(__m256i __A, __m256i __B) { // CHECK-LABEL: @test_mm256_bitshuffle_epi64_mask - // CHECK: @llvm.x86.avx512.mask.vpshufbitqmb.256 + // CHECK: @llvm.x86.avx512.vpshufbitqmb.256 return _mm256_bitshuffle_epi64_mask(__A, __B); } __mmask16 test_mm_mask_bitshuffle_epi64_mask(__mmask16 __U, __m128i __A, __m128i __B) { // CHECK-LABEL: @test_mm_mask_bitshuffle_epi64_mask - // CHECK: @llvm.x86.avx512.mask.vpshufbitqmb.128 + // CHECK: @llvm.x86.avx512.vpshufbitqmb.128 + // CHECK: and <16 x i1> %{{.*}}, %{{.*}} return _mm_mask_bitshuffle_epi64_mask(__U, __A, __B); } __mmask16 test_mm_bitshuffle_epi64_mask(__m128i __A, __m128i __B) { // CHECK-LABEL: @test_mm_bitshuffle_epi64_mask - // CHECK: @llvm.x86.avx512.mask.vpshufbitqmb.128 + // CHECK: @llvm.x86.avx512.vpshufbitqmb.128 return _mm_bitshuffle_epi64_mask(__A, __B); }