Warning: That file was not part of the compilation database. It may have many parsing errors.

1/* Copyright (C) 2013-2017 Free Software Foundation, Inc.
2
3 This file is part of GCC.
4
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3, or (at your option)
8 any later version.
9
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 Under Section 7 of GPL version 3, you are granted additional
16 permissions described in the GCC Runtime Library Exception, version
17 3.1, as published by the Free Software Foundation.
18
19 You should have received a copy of the GNU General Public License and
20 a copy of the GCC Runtime Library Exception along with this program;
21 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
22 <http://www.gnu.org/licenses/>. */
23
24#ifndef _IMMINTRIN_H_INCLUDED
25#error "Never use <avx512vbmi2vlintrin.h> directly; include <immintrin.h> instead."
26#endif
27
28#ifndef _AVX512VBMI2VLINTRIN_H_INCLUDED
29#define _AVX512VBMI2VLINTRIN_H_INCLUDED
30
31#if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__)
32#pragma GCC push_options
33#pragma GCC target("avx512vbmi2,avx512vl")
34#define __DISABLE_AVX512VBMI2VL__
35#endif /* __AVX512VBMIVL__ */
36
37extern __inline __m128i
38__attribute__((__gnu_inline__, __always_inline__, __artificial__))
39_mm_mask_compress_epi8 (__m128i __A, __mmask16 __B, __m128i __C)
40{
41 return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi)__C,
42 (__v16qi)__A, (__mmask16)__B);
43}
44
45extern __inline __m128i
46__attribute__((__gnu_inline__, __always_inline__, __artificial__))
47_mm_maskz_compress_epi8 (__mmask16 __A, __m128i __B)
48{
49 return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __B,
50 (__v16qi) _mm_setzero_si128 (), (__mmask16) __A);
51}
52
53
54extern __inline void
55__attribute__((__gnu_inline__, __always_inline__, __artificial__))
56_mm256_mask_compressstoreu_epi16 (void * __A, __mmask16 __B, __m256i __C)
57{
58 __builtin_ia32_compressstoreuhi256_mask ((__v16hi *) __A, (__v16hi) __C,
59 (__mmask16) __B);
60}
61
62extern __inline __m128i
63__attribute__((__gnu_inline__, __always_inline__, __artificial__))
64_mm_mask_compress_epi16 (__m128i __A, __mmask8 __B, __m128i __C)
65{
66 return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi)__C, (__v8hi)__A,
67 (__mmask8)__B);
68}
69
70extern __inline __m128i
71__attribute__((__gnu_inline__, __always_inline__, __artificial__))
72_mm_maskz_compress_epi16 (__mmask8 __A, __m128i __B)
73{
74 return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __B,
75 (__v8hi) _mm_setzero_si128 (), (__mmask8) __A);
76}
77
78extern __inline __m256i
79__attribute__((__gnu_inline__, __always_inline__, __artificial__))
80_mm256_mask_compress_epi16 (__m256i __A, __mmask16 __B, __m256i __C)
81{
82 return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi)__C,
83 (__v16hi)__A, (__mmask16)__B);
84}
85
86extern __inline __m256i
87__attribute__((__gnu_inline__, __always_inline__, __artificial__))
88_mm256_maskz_compress_epi16 (__mmask16 __A, __m256i __B)
89{
90 return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __B,
91 (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A);
92}
93
94extern __inline void
95__attribute__((__gnu_inline__, __always_inline__, __artificial__))
96_mm_mask_compressstoreu_epi8 (void * __A, __mmask16 __B, __m128i __C)
97{
98 __builtin_ia32_compressstoreuqi128_mask ((__v16qi *) __A, (__v16qi) __C,
99 (__mmask16) __B);
100}
101
102extern __inline void
103__attribute__((__gnu_inline__, __always_inline__, __artificial__))
104_mm_mask_compressstoreu_epi16 (void * __A, __mmask8 __B, __m128i __C)
105{
106 __builtin_ia32_compressstoreuhi128_mask ((__v8hi *) __A, (__v8hi) __C,
107 (__mmask8) __B);
108}
109
110extern __inline __m128i
111__attribute__((__gnu_inline__, __always_inline__, __artificial__))
112_mm_mask_expand_epi8 (__m128i __A, __mmask16 __B, __m128i __C)
113{
114 return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __C,
115 (__v16qi) __A,
116 (__mmask16) __B);
117}
118
119extern __inline __m128i
120__attribute__((__gnu_inline__, __always_inline__, __artificial__))
121_mm_maskz_expand_epi8 (__mmask16 __A, __m128i __B)
122{
123 return (__m128i) __builtin_ia32_expandqi128_maskz ((__v16qi) __B,
124 (__v16qi) _mm_setzero_si128 (), (__mmask16) __A);
125}
126
127extern __inline __m128i
128__attribute__((__gnu_inline__, __always_inline__, __artificial__))
129_mm_mask_expandloadu_epi8 (__m128i __A, __mmask16 __B, const void * __C)
130{
131 return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *) __C,
132 (__v16qi) __A, (__mmask16) __B);
133}
134
135extern __inline __m128i
136__attribute__((__gnu_inline__, __always_inline__, __artificial__))
137_mm_maskz_expandloadu_epi8 (__mmask16 __A, const void * __B)
138{
139 return (__m128i) __builtin_ia32_expandloadqi128_maskz ((const __v16qi *) __B,
140 (__v16qi) _mm_setzero_si128 (), (__mmask16) __A);
141}
142
143extern __inline __m128i
144__attribute__((__gnu_inline__, __always_inline__, __artificial__))
145_mm_mask_expand_epi16 (__m128i __A, __mmask8 __B, __m128i __C)
146{
147 return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __C,
148 (__v8hi) __A,
149 (__mmask8) __B);
150}
151
152extern __inline __m128i
153__attribute__((__gnu_inline__, __always_inline__, __artificial__))
154_mm_maskz_expand_epi16 (__mmask8 __A, __m128i __B)
155{
156 return (__m128i) __builtin_ia32_expandhi128_maskz ((__v8hi) __B,
157 (__v8hi) _mm_setzero_si128 (), (__mmask8) __A);
158}
159
160extern __inline __m128i
161__attribute__((__gnu_inline__, __always_inline__, __artificial__))
162_mm_mask_expandloadu_epi16 (__m128i __A, __mmask8 __B, const void * __C)
163{
164 return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *) __C,
165 (__v8hi) __A, (__mmask8) __B);
166}
167
168extern __inline __m128i
169__attribute__((__gnu_inline__, __always_inline__, __artificial__))
170_mm_maskz_expandloadu_epi16 (__mmask8 __A, const void * __B)
171{
172 return (__m128i) __builtin_ia32_expandloadhi128_maskz ((const __v8hi *) __B,
173 (__v8hi) _mm_setzero_si128 (), (__mmask8) __A);
174}
175extern __inline __m256i
176__attribute__((__gnu_inline__, __always_inline__, __artificial__))
177_mm256_mask_expand_epi16 (__m256i __A, __mmask16 __B, __m256i __C)
178{
179 return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __C,
180 (__v16hi) __A,
181 (__mmask16) __B);
182}
183
184extern __inline __m256i
185__attribute__((__gnu_inline__, __always_inline__, __artificial__))
186_mm256_maskz_expand_epi16 (__mmask16 __A, __m256i __B)
187{
188 return (__m256i) __builtin_ia32_expandhi256_maskz ((__v16hi) __B,
189 (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A);
190}
191
192extern __inline __m256i
193__attribute__((__gnu_inline__, __always_inline__, __artificial__))
194_mm256_mask_expandloadu_epi16 (__m256i __A, __mmask16 __B, const void * __C)
195{
196 return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *) __C,
197 (__v16hi) __A, (__mmask16) __B);
198}
199
200extern __inline __m256i
201__attribute__((__gnu_inline__, __always_inline__, __artificial__))
202_mm256_maskz_expandloadu_epi16 (__mmask16 __A, const void * __B)
203{
204 return (__m256i) __builtin_ia32_expandloadhi256_maskz ((const __v16hi *) __B,
205 (__v16hi) _mm256_setzero_si256 (), (__mmask16) __A);
206}
207
208#ifdef __OPTIMIZE__
209extern __inline __m256i
210__attribute__((__gnu_inline__, __always_inline__, __artificial__))
211_mm256_shrdi_epi16 (__m256i __A, __m256i __B, int __C)
212{
213 return (__m256i) __builtin_ia32_vpshrd_v16hi ((__v16hi)__A, (__v16hi) __B,
214 __C);
215}
216
217extern __inline __m256i
218__attribute__((__gnu_inline__, __always_inline__, __artificial__))
219_mm256_mask_shrdi_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D,
220 int __E)
221{
222 return (__m256i)__builtin_ia32_vpshrd_v16hi_mask ((__v16hi)__C,
223 (__v16hi) __D, __E, (__v16hi) __A, (__mmask16)__B);
224}
225
226extern __inline __m256i
227__attribute__((__gnu_inline__, __always_inline__, __artificial__))
228_mm256_maskz_shrdi_epi16 (__mmask16 __A, __m256i __B, __m256i __C, int __D)
229{
230 return (__m256i)__builtin_ia32_vpshrd_v16hi_mask ((__v16hi)__B,
231 (__v16hi) __C, __D, (__v16hi) _mm256_setzero_si256 (), (__mmask16)__A);
232}
233
234extern __inline __m256i
235__attribute__((__gnu_inline__, __always_inline__, __artificial__))
236_mm256_mask_shrdi_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D,
237 int __E)
238{
239 return (__m256i)__builtin_ia32_vpshrd_v8si_mask ((__v8si)__C, (__v8si) __D,
240 __E, (__v8si) __A, (__mmask8)__B);
241}
242
243extern __inline __m256i
244__attribute__((__gnu_inline__, __always_inline__, __artificial__))
245_mm256_maskz_shrdi_epi32 (__mmask8 __A, __m256i __B, __m256i __C, int __D)
246{
247 return (__m256i)__builtin_ia32_vpshrd_v8si_mask ((__v8si)__B, (__v8si) __C,
248 __D, (__v8si) _mm256_setzero_si256 (), (__mmask8)__A);
249}
250
251extern __inline __m256i
252__attribute__((__gnu_inline__, __always_inline__, __artificial__))
253_mm256_shrdi_epi32 (__m256i __A, __m256i __B, int __C)
254{
255 return (__m256i) __builtin_ia32_vpshrd_v8si ((__v8si)__A, (__v8si) __B, __C);
256}
257
258extern __inline __m256i
259__attribute__((__gnu_inline__, __always_inline__, __artificial__))
260_mm256_mask_shrdi_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D,
261 int __E)
262{
263 return (__m256i)__builtin_ia32_vpshrd_v4di_mask ((__v4di)__C, (__v4di) __D,
264 __E, (__v4di) __A, (__mmask8)__B);
265}
266
267extern __inline __m256i
268__attribute__((__gnu_inline__, __always_inline__, __artificial__))
269_mm256_maskz_shrdi_epi64 (__mmask8 __A, __m256i __B, __m256i __C, int __D)
270{
271 return (__m256i)__builtin_ia32_vpshrd_v4di_mask ((__v4di)__B, (__v4di) __C,
272 __D, (__v4di) _mm256_setzero_si256 (), (__mmask8)__A);
273}
274
275extern __inline __m256i
276__attribute__((__gnu_inline__, __always_inline__, __artificial__))
277_mm256_shrdi_epi64 (__m256i __A, __m256i __B, int __C)
278{
279 return (__m256i) __builtin_ia32_vpshrd_v4di ((__v4di)__A, (__v4di) __B, __C);
280}
281
282extern __inline __m128i
283__attribute__((__gnu_inline__, __always_inline__, __artificial__))
284_mm_mask_shrdi_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D,
285 int __E)
286{
287 return (__m128i)__builtin_ia32_vpshrd_v8hi_mask ((__v8hi)__C, (__v8hi) __D,
288 __E, (__v8hi) __A, (__mmask8)__B);
289}
290
291extern __inline __m128i
292__attribute__((__gnu_inline__, __always_inline__, __artificial__))
293_mm_maskz_shrdi_epi16 (__mmask8 __A, __m128i __B, __m128i __C, int __D)
294{
295 return (__m128i)__builtin_ia32_vpshrd_v8hi_mask ((__v8hi)__B, (__v8hi) __C,
296 __D, (__v8hi) _mm_setzero_si128 (), (__mmask8)__A);
297}
298
299extern __inline __m128i
300__attribute__((__gnu_inline__, __always_inline__, __artificial__))
301_mm_shrdi_epi16 (__m128i __A, __m128i __B, int __C)
302{
303 return (__m128i) __builtin_ia32_vpshrd_v8hi ((__v8hi)__A, (__v8hi) __B, __C);
304}
305
306extern __inline __m128i
307__attribute__((__gnu_inline__, __always_inline__, __artificial__))
308_mm_mask_shrdi_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D,
309 int __E)
310{
311 return (__m128i)__builtin_ia32_vpshrd_v4si_mask ((__v4si)__C, (__v4si) __D,
312 __E, (__v4si) __A, (__mmask8)__B);
313}
314
315extern __inline __m128i
316__attribute__((__gnu_inline__, __always_inline__, __artificial__))
317_mm_maskz_shrdi_epi32 (__mmask8 __A, __m128i __B, __m128i __C, int __D)
318{
319 return (__m128i)__builtin_ia32_vpshrd_v4si_mask ((__v4si)__B, (__v4si) __C,
320 __D, (__v4si) _mm_setzero_si128 (), (__mmask8)__A);
321}
322
323extern __inline __m128i
324__attribute__((__gnu_inline__, __always_inline__, __artificial__))
325_mm_shrdi_epi32 (__m128i __A, __m128i __B, int __C)
326{
327 return (__m128i) __builtin_ia32_vpshrd_v4si ((__v4si)__A, (__v4si) __B, __C);
328}
329
330extern __inline __m128i
331__attribute__((__gnu_inline__, __always_inline__, __artificial__))
332_mm_mask_shrdi_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D,
333 int __E)
334{
335 return (__m128i)__builtin_ia32_vpshrd_v2di_mask ((__v2di)__C, (__v2di) __D,
336 __E, (__v2di) __A, (__mmask8)__B);
337}
338
339extern __inline __m128i
340__attribute__((__gnu_inline__, __always_inline__, __artificial__))
341_mm_maskz_shrdi_epi64 (__mmask8 __A, __m128i __B, __m128i __C, int __D)
342{
343 return (__m128i)__builtin_ia32_vpshrd_v2di_mask ((__v2di)__B, (__v2di) __C,
344 __D, (__v2di) _mm_setzero_si128 (), (__mmask8)__A);
345}
346
347extern __inline __m128i
348__attribute__((__gnu_inline__, __always_inline__, __artificial__))
349_mm_shrdi_epi64 (__m128i __A, __m128i __B, int __C)
350{
351 return (__m128i) __builtin_ia32_vpshrd_v2di ((__v2di)__A, (__v2di) __B, __C);
352}
353
354extern __inline __m256i
355__attribute__((__gnu_inline__, __always_inline__, __artificial__))
356_mm256_shldi_epi16 (__m256i __A, __m256i __B, int __C)
357{
358 return (__m256i) __builtin_ia32_vpshld_v16hi ((__v16hi)__A, (__v16hi) __B,
359 __C);
360}
361
362extern __inline __m256i
363__attribute__((__gnu_inline__, __always_inline__, __artificial__))
364_mm256_mask_shldi_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D,
365 int __E)
366{
367 return (__m256i)__builtin_ia32_vpshld_v16hi_mask ((__v16hi)__C,
368 (__v16hi) __D, __E, (__v16hi) __A, (__mmask16)__B);
369}
370
371extern __inline __m256i
372__attribute__((__gnu_inline__, __always_inline__, __artificial__))
373_mm256_maskz_shldi_epi16 (__mmask16 __A, __m256i __B, __m256i __C, int __D)
374{
375 return (__m256i)__builtin_ia32_vpshld_v16hi_mask ((__v16hi)__B,
376 (__v16hi) __C, __D, (__v16hi) _mm256_setzero_si256 (), (__mmask16)__A);
377}
378
379extern __inline __m256i
380__attribute__((__gnu_inline__, __always_inline__, __artificial__))
381_mm256_mask_shldi_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D,
382 int __E)
383{
384 return (__m256i)__builtin_ia32_vpshld_v8si_mask ((__v8si)__C, (__v8si) __D,
385 __E, (__v8si) __A, (__mmask8)__B);
386}
387
388extern __inline __m256i
389__attribute__((__gnu_inline__, __always_inline__, __artificial__))
390_mm256_maskz_shldi_epi32 (__mmask8 __A, __m256i __B, __m256i __C, int __D)
391{
392 return (__m256i)__builtin_ia32_vpshld_v8si_mask ((__v8si)__B, (__v8si) __C,
393 __D, (__v8si) _mm256_setzero_si256 (), (__mmask8)__A);
394}
395
396extern __inline __m256i
397__attribute__((__gnu_inline__, __always_inline__, __artificial__))
398_mm256_shldi_epi32 (__m256i __A, __m256i __B, int __C)
399{
400 return (__m256i) __builtin_ia32_vpshld_v8si ((__v8si)__A, (__v8si) __B, __C);
401}
402
403extern __inline __m256i
404__attribute__((__gnu_inline__, __always_inline__, __artificial__))
405_mm256_mask_shldi_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D,
406 int __E)
407{
408 return (__m256i)__builtin_ia32_vpshld_v4di_mask ((__v4di)__C, (__v4di) __D,
409 __E, (__v4di) __A, (__mmask8)__B);
410}
411
412extern __inline __m256i
413__attribute__((__gnu_inline__, __always_inline__, __artificial__))
414_mm256_maskz_shldi_epi64 (__mmask8 __A, __m256i __B, __m256i __C, int __D)
415{
416 return (__m256i)__builtin_ia32_vpshld_v4di_mask ((__v4di)__B, (__v4di) __C,
417 __D, (__v4di) _mm256_setzero_si256 (), (__mmask8)__A);
418}
419
420extern __inline __m256i
421__attribute__((__gnu_inline__, __always_inline__, __artificial__))
422_mm256_shldi_epi64 (__m256i __A, __m256i __B, int __C)
423{
424 return (__m256i) __builtin_ia32_vpshld_v4di ((__v4di)__A, (__v4di) __B, __C);
425}
426
427extern __inline __m128i
428__attribute__((__gnu_inline__, __always_inline__, __artificial__))
429_mm_mask_shldi_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D,
430 int __E)
431{
432 return (__m128i)__builtin_ia32_vpshld_v8hi_mask ((__v8hi)__C, (__v8hi) __D,
433 __E, (__v8hi) __A, (__mmask8)__B);
434}
435
436extern __inline __m128i
437__attribute__((__gnu_inline__, __always_inline__, __artificial__))
438_mm_maskz_shldi_epi16 (__mmask8 __A, __m128i __B, __m128i __C, int __D)
439{
440 return (__m128i)__builtin_ia32_vpshld_v8hi_mask ((__v8hi)__B, (__v8hi) __C,
441 __D, (__v8hi) _mm_setzero_si128 (), (__mmask8)__A);
442}
443
444extern __inline __m128i
445__attribute__((__gnu_inline__, __always_inline__, __artificial__))
446_mm_shldi_epi16 (__m128i __A, __m128i __B, int __C)
447{
448 return (__m128i) __builtin_ia32_vpshld_v8hi ((__v8hi)__A, (__v8hi) __B, __C);
449}
450
451extern __inline __m128i
452__attribute__((__gnu_inline__, __always_inline__, __artificial__))
453_mm_mask_shldi_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D,
454 int __E)
455{
456 return (__m128i)__builtin_ia32_vpshld_v4si_mask ((__v4si)__C, (__v4si) __D,
457 __E, (__v4si) __A, (__mmask8)__B);
458}
459
460extern __inline __m128i
461__attribute__((__gnu_inline__, __always_inline__, __artificial__))
462_mm_maskz_shldi_epi32 (__mmask8 __A, __m128i __B, __m128i __C, int __D)
463{
464 return (__m128i)__builtin_ia32_vpshld_v4si_mask ((__v4si)__B, (__v4si) __C,
465 __D, (__v4si) _mm_setzero_si128 (), (__mmask8)__A);
466}
467
468extern __inline __m128i
469__attribute__((__gnu_inline__, __always_inline__, __artificial__))
470_mm_shldi_epi32 (__m128i __A, __m128i __B, int __C)
471{
472 return (__m128i) __builtin_ia32_vpshld_v4si ((__v4si)__A, (__v4si) __B, __C);
473}
474
475extern __inline __m128i
476__attribute__((__gnu_inline__, __always_inline__, __artificial__))
477_mm_mask_shldi_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D,
478 int __E)
479{
480 return (__m128i)__builtin_ia32_vpshld_v2di_mask ((__v2di)__C, (__v2di) __D,
481 __E, (__v2di) __A, (__mmask8)__B);
482}
483
484extern __inline __m128i
485__attribute__((__gnu_inline__, __always_inline__, __artificial__))
486_mm_maskz_shldi_epi64 (__mmask8 __A, __m128i __B, __m128i __C, int __D)
487{
488 return (__m128i)__builtin_ia32_vpshld_v2di_mask ((__v2di)__B, (__v2di) __C,
489 __D, (__v2di) _mm_setzero_si128 (), (__mmask8)__A);
490}
491
492extern __inline __m128i
493__attribute__((__gnu_inline__, __always_inline__, __artificial__))
494_mm_shldi_epi64 (__m128i __A, __m128i __B, int __C)
495{
496 return (__m128i) __builtin_ia32_vpshld_v2di ((__v2di)__A, (__v2di) __B, __C);
497}
498#else
499#define _mm256_shrdi_epi16(A, B, C) \
500 ((__m256i) __builtin_ia32_vpshrd_v16hi ((__v16hi)(__m256i)(A), \
501 (__v16hi)(__m256i)(B),(int)(C))
502#define _mm256_mask_shrdi_epi16(A, B, C, D, E) \
503 ((__m256i) __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(C), \
504 (__v16hi)(__m256i)(D), (int)(E), (__v16hi)(__m256i)(A),(__mmask16)(B))
505#define _mm256_maskz_shrdi_epi16(A, B, C, D) \
506 ((__m256i) __builtin_ia32_vpshrd_v16hi_mask ((__v16hi)(__m256i)(B), \
507 (__v16hi)(__m256i)(C),(int)(D), \
508 (__v16hi)(__m256i)_mm256_setzero_si256 (), (__mmask16)(A))
509#define _mm256_shrdi_epi32(A, B, C) \
510 ((__m256i) __builtin_ia32_vpshrd_v8si ((__v8si)(__m256i)(A), \
511 (__v8si)(__m256i)(B),(int)(C))
512#define _mm256_mask_shrdi_epi32(A, B, C, D, E) \
513 ((__m256i) __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(C), \
514 (__v8si)(__m256i)(D), (int)(E), (__v8si)(__m256i)(A),(__mmask8)(B))
515#define _mm256_maskz_shrdi_epi32(A, B, C, D) \
516 ((__m256i) __builtin_ia32_vpshrd_v8si_mask ((__v8si)(__m256i)(B), \
517 (__v8si)(__m256i)(C),(int)(D), \
518 (__v8si)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A))
519#define _mm256_shrdi_epi64(A, B, C) \
520 ((__m256i) __builtin_ia32_vpshrd_v4di ((__v4di)(__m256i)(A), \
521 (__v4di)(__m256i)(B),(int)(C))
522#define _mm256_mask_shrdi_epi64(A, B, C, D, E) \
523 ((__m256i) __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(C), \
524 (__v4di)(__m256i)(D), (int)(E), (__v4di)(__m256i)(A),(__mmask8)(B))
525#define _mm256_maskz_shrdi_epi64(A, B, C, D) \
526 ((__m256i) __builtin_ia32_vpshrd_v4di_mask ((__v4di)(__m256i)(B), \
527 (__v4di)(__m256i)(C),(int)(D), \
528 (__v4di)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A))
529#define _mm_shrdi_epi16(A, B, C) \
530 ((__m128i) __builtin_ia32_vpshrd_v8hi ((__v8hi)(__m128i)(A), \
531 (__v8hi)(__m128i)(B),(int)(C))
532#define _mm_mask_shrdi_epi16(A, B, C, D, E) \
533 ((__m128i) __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(C), \
534 (__v8hi)(__m128i)(D), (int)(E), (__v8hi)(__m128i)(A),(__mmask8)(B))
535#define _mm_maskz_shrdi_epi16(A, B, C, D) \
536 ((__m128i) __builtin_ia32_vpshrd_v8hi_mask ((__v8hi)(__m128i)(B), \
537 (__v8hi)(__m128i)(C),(int)(D), \
538 (__v8hi)(__m128i)_mm_setzero_si128 (), (__mmask8)(A))
539#define _mm_shrdi_epi32(A, B, C) \
540 ((__m128i) __builtin_ia32_vpshrd_v4si ((__v4si)(__m128i)(A), \
541 (__v4si)(__m128i)(B),(int)(C))
542#define _mm_mask_shrdi_epi32(A, B, C, D, E) \
543 ((__m128i) __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(C), \
544 (__v4si)(__m128i)(D), (int)(E), (__v4si)(__m128i)(A),(__mmask16)(B))
545#define _mm_maskz_shrdi_epi32(A, B, C, D) \
546 ((__m128i) __builtin_ia32_vpshrd_v4si_mask ((__v4si)(__m128i)(B), \
547 (__v4si)(__m128i)(C),(int)(D), \
548 (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)(A))
549#define _mm_shrdi_epi64(A, B, C) \
550 ((__m128i) __builtin_ia32_vpshrd_v2di ((__v2di)(__m128i)(A), \
551 (__v2di)(__m128i)(B),(int)(C))
552#define _mm_mask_shrdi_epi64(A, B, C, D, E) \
553 ((__m128i) __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(C), \
554 (__v2di)(__m128i)(D), (int)(E), (__v2di)(__m128i)(A),(__mmask8)(B))
555#define _mm_maskz_shrdi_epi64(A, B, C, D) \
556 ((__m128i) __builtin_ia32_vpshrd_v2di_mask ((__v2di)(__m128i)(B), \
557 (__v2di)(__m128i)(C),(int)(D), \
558 (__v2di)(__m128i)_mm_setzero_si128 (), (__mmask8)(A))
559#define _mm256_shldi_epi16(A, B, C) \
560 ((__m256i) __builtin_ia32_vpshld_v16hi ((__v16hi)(__m256i)(A), \
561 (__v16hi)(__m256i)(B),(int)(C))
562#define _mm256_mask_shldi_epi16(A, B, C, D, E) \
563 ((__m256i) __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(C), \
564 (__v16hi)(__m256i)(D), (int)(E), (__v16hi)(__m256i)(A),(__mmask16)(B))
565#define _mm256_maskz_shldi_epi16(A, B, C, D) \
566 ((__m256i) __builtin_ia32_vpshld_v16hi_mask ((__v16hi)(__m256i)(B), \
567 (__v16hi)(__m256i)(C),(int)(D), \
568 (__v16hi)(__m256i)_mm256_setzero_si256 (), (__mmask16)(A))
569#define _mm256_shldi_epi32(A, B, C) \
570 ((__m256i) __builtin_ia32_vpshld_v8si ((__v8si)(__m256i)(A), \
571 (__v8si)(__m256i)(B),(int)(C))
572#define _mm256_mask_shldi_epi32(A, B, C, D, E) \
573 ((__m256i) __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(C), \
574 (__v8si)(__m256i)(D), (int)(E), (__v8si)(__m256i)(A),(__mmask8)(B))
575#define _mm256_maskz_shldi_epi32(A, B, C, D) \
576 ((__m256i) __builtin_ia32_vpshld_v8si_mask ((__v8si)(__m256i)(B), \
577 (__v8si)(__m256i)(C),(int)(D), \
578 (__v8si)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A))
579#define _mm256_shldi_epi64(A, B, C) \
580 ((__m256i) __builtin_ia32_vpshld_v4di ((__v4di)(__m256i)(A), \
581 (__v4di)(__m256i)(B),(int)(C))
582#define _mm256_mask_shldi_epi64(A, B, C, D, E) \
583 ((__m256i) __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(C), \
584 (__v4di)(__m256i)(D), (int)(E), (__v4di)(__m256i)(A),(__mmask8)(B))
585#define _mm256_maskz_shldi_epi64(A, B, C, D) \
586 ((__m256i) __builtin_ia32_vpshld_v4di_mask ((__v4di)(__m256i)(B), \
587 (__v4di)(__m256i)(C),(int)(D), \
588 (__v4di)(__m256i)_mm256_setzero_si256 (), (__mmask8)(A))
589#define _mm_shldi_epi16(A, B, C) \
590 ((__m128i) __builtin_ia32_vpshld_v8hi ((__v8hi)(__m128i)(A), \
591 (__v8hi)(__m128i)(B),(int)(C))
592#define _mm_mask_shldi_epi16(A, B, C, D, E) \
593 ((__m128i) __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(C), \
594 (__v8hi)(__m128i)(D), (int)(E), (__v8hi)(__m128i)(A),(__mmask8)(B))
595#define _mm_maskz_shldi_epi16(A, B, C, D) \
596 ((__m128i) __builtin_ia32_vpshld_v8hi_mask ((__v8hi)(__m128i)(B), \
597 (__v8hi)(__m128i)(C),(int)(D), \
598 (__v8hi)(__m128i)_mm_setzero_si128 (), (__mmask8)(A))
599#define _mm_shldi_epi32(A, B, C) \
600 ((__m128i) __builtin_ia32_vpshld_v4si ((__v4si)(__m128i)(A), \
601 (__v4si)(__m128i)(B),(int)(C))
602#define _mm_mask_shldi_epi32(A, B, C, D, E) \
603 ((__m128i) __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(C), \
604 (__v4si)(__m128i)(D), (int)(E), (__v4si)(__m128i)(A),(__mmask16)(B))
605#define _mm_maskz_shldi_epi32(A, B, C, D) \
606 ((__m128i) __builtin_ia32_vpshld_v4si_mask ((__v4si)(__m128i)(B), \
607 (__v4si)(__m128i)(C),(int)(D), \
608 (__v4si)(__m128i)_mm_setzero_si128 (), (__mmask8)(A))
609#define _mm_shldi_epi64(A, B, C) \
610 ((__m128i) __builtin_ia32_vpshld_v2di ((__v2di)(__m128i)(A), \
611 (__v2di)(__m128i)(B),(int)(C))
612#define _mm_mask_shldi_epi64(A, B, C, D, E) \
613 ((__m128i) __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(C), \
614 (__v2di)(__m128i)(D), (int)(E), (__v2di)(__m128i)(A),(__mmask8)(B))
615#define _mm_maskz_shldi_epi64(A, B, C, D) \
616 ((__m128i) __builtin_ia32_vpshld_v2di_mask ((__v2di)(__m128i)(B), \
617 (__v2di)(__m128i)(C),(int)(D), \
618 (__v2di)(__m128i)_mm_setzero_si128 (), (__mmask8)(A))
619#endif
620
621extern __inline __m256i
622__attribute__((__gnu_inline__, __always_inline__, __artificial__))
623_mm256_shrdv_epi16 (__m256i __A, __m256i __B, __m256i __C)
624{
625 return (__m256i) __builtin_ia32_vpshrdv_v16hi ((__v16hi)__A, (__v16hi) __B,
626 (__v16hi) __C);
627}
628
629extern __inline __m256i
630__attribute__((__gnu_inline__, __always_inline__, __artificial__))
631_mm256_mask_shrdv_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D)
632{
633 return (__m256i)__builtin_ia32_vpshrdv_v16hi_mask ((__v16hi)__A,
634 (__v16hi) __C, (__v16hi) __D, (__mmask16)__B);
635}
636
637extern __inline __m256i
638__attribute__((__gnu_inline__, __always_inline__, __artificial__))
639_mm256_maskz_shrdv_epi16 (__mmask16 __A, __m256i __B, __m256i __C, __m256i __D)
640{
641 return (__m256i)__builtin_ia32_vpshrdv_v16hi_maskz ((__v16hi)__B,
642 (__v16hi) __C, (__v16hi) __D, (__mmask16)__A);
643}
644
645extern __inline __m256i
646__attribute__((__gnu_inline__, __always_inline__, __artificial__))
647_mm256_shrdv_epi32 (__m256i __A, __m256i __B, __m256i __C)
648{
649 return (__m256i) __builtin_ia32_vpshrdv_v8si ((__v8si)__A, (__v8si) __B,
650 (__v8si) __C);
651}
652
653extern __inline __m256i
654__attribute__((__gnu_inline__, __always_inline__, __artificial__))
655_mm256_mask_shrdv_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D)
656{
657 return (__m256i)__builtin_ia32_vpshrdv_v8si_mask ((__v8si)__A, (__v8si) __C,
658 (__v8si) __D, (__mmask8)__B);
659}
660
661extern __inline __m256i
662__attribute__((__gnu_inline__, __always_inline__, __artificial__))
663_mm256_maskz_shrdv_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D)
664{
665 return (__m256i)__builtin_ia32_vpshrdv_v8si_maskz ((__v8si)__B, (__v8si) __C,
666 (__v8si) __D, (__mmask8)__A);
667}
668
669extern __inline __m256i
670__attribute__((__gnu_inline__, __always_inline__, __artificial__))
671_mm256_shrdv_epi64 (__m256i __A, __m256i __B, __m256i __C)
672{
673 return (__m256i) __builtin_ia32_vpshrdv_v4di ((__v4di)__A, (__v4di) __B,
674 (__v4di) __C);
675}
676
677extern __inline __m256i
678__attribute__((__gnu_inline__, __always_inline__, __artificial__))
679_mm256_mask_shrdv_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D)
680{
681 return (__m256i)__builtin_ia32_vpshrdv_v4di_mask ((__v4di)__A, (__v4di) __C,
682 (__v4di) __D, (__mmask8)__B);
683}
684
685extern __inline __m256i
686__attribute__((__gnu_inline__, __always_inline__, __artificial__))
687_mm256_maskz_shrdv_epi64 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D)
688{
689 return (__m256i)__builtin_ia32_vpshrdv_v4di_maskz ((__v4di)__B, (__v4di) __C,
690 (__v4di) __D, (__mmask8)__A);
691}
692
693extern __inline __m128i
694__attribute__((__gnu_inline__, __always_inline__, __artificial__))
695_mm_shrdv_epi16 (__m128i __A, __m128i __B, __m128i __C)
696{
697 return (__m128i) __builtin_ia32_vpshrdv_v8hi ((__v8hi)__A, (__v8hi) __B,
698 (__v8hi) __C);
699}
700
701extern __inline __m128i
702__attribute__((__gnu_inline__, __always_inline__, __artificial__))
703_mm_mask_shrdv_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D)
704{
705 return (__m128i)__builtin_ia32_vpshrdv_v8hi_mask ((__v8hi)__A, (__v8hi) __C,
706 (__v8hi) __D, (__mmask8)__B);
707}
708
709extern __inline __m128i
710__attribute__((__gnu_inline__, __always_inline__, __artificial__))
711_mm_maskz_shrdv_epi16 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D)
712{
713 return (__m128i)__builtin_ia32_vpshrdv_v8hi_maskz ((__v8hi)__B, (__v8hi) __C,
714 (__v8hi) __D, (__mmask8)__A);
715}
716
717extern __inline __m128i
718__attribute__((__gnu_inline__, __always_inline__, __artificial__))
719_mm_shrdv_epi32 (__m128i __A, __m128i __B, __m128i __C)
720{
721 return (__m128i) __builtin_ia32_vpshrdv_v4si ((__v4si)__A, (__v4si) __B,
722 (__v4si) __C);
723}
724
725extern __inline __m128i
726__attribute__((__gnu_inline__, __always_inline__, __artificial__))
727_mm_mask_shrdv_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D)
728{
729 return (__m128i)__builtin_ia32_vpshrdv_v4si_mask ((__v4si)__A, (__v4si) __C,
730 (__v4si) __D, (__mmask8)__B);
731}
732
733extern __inline __m128i
734__attribute__((__gnu_inline__, __always_inline__, __artificial__))
735_mm_maskz_shrdv_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D)
736{
737 return (__m128i)__builtin_ia32_vpshrdv_v4si_maskz ((__v4si)__B, (__v4si) __C,
738 (__v4si) __D, (__mmask8)__A);
739}
740
741extern __inline __m128i
742__attribute__((__gnu_inline__, __always_inline__, __artificial__))
743_mm_shrdv_epi64 (__m128i __A, __m128i __B, __m128i __C)
744{
745 return (__m128i) __builtin_ia32_vpshrdv_v2di ((__v2di)__A, (__v2di) __B,
746 (__v2di) __C);
747}
748
749extern __inline __m128i
750__attribute__((__gnu_inline__, __always_inline__, __artificial__))
751_mm_mask_shrdv_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D)
752{
753 return (__m128i)__builtin_ia32_vpshrdv_v2di_mask ((__v2di)__A, (__v2di) __C,
754 (__v2di) __D, (__mmask8)__B);
755}
756
757extern __inline __m128i
758__attribute__((__gnu_inline__, __always_inline__, __artificial__))
759_mm_maskz_shrdv_epi64 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D)
760{
761 return (__m128i)__builtin_ia32_vpshrdv_v2di_maskz ((__v2di)__B, (__v2di) __C,
762 (__v2di) __D, (__mmask8)__A);
763}
764
765extern __inline __m256i
766__attribute__((__gnu_inline__, __always_inline__, __artificial__))
767_mm256_shldv_epi16 (__m256i __A, __m256i __B, __m256i __C)
768{
769 return (__m256i) __builtin_ia32_vpshldv_v16hi ((__v16hi)__A, (__v16hi) __B,
770 (__v16hi) __C);
771}
772
773extern __inline __m256i
774__attribute__((__gnu_inline__, __always_inline__, __artificial__))
775_mm256_mask_shldv_epi16 (__m256i __A, __mmask16 __B, __m256i __C, __m256i __D)
776{
777 return (__m256i)__builtin_ia32_vpshldv_v16hi_mask ((__v16hi)__A,
778 (__v16hi) __C, (__v16hi) __D, (__mmask16)__B);
779}
780
781extern __inline __m256i
782__attribute__((__gnu_inline__, __always_inline__, __artificial__))
783_mm256_maskz_shldv_epi16 (__mmask16 __A, __m256i __B, __m256i __C, __m256i __D)
784{
785 return (__m256i)__builtin_ia32_vpshldv_v16hi_maskz ((__v16hi)__B,
786 (__v16hi) __C, (__v16hi) __D, (__mmask16)__A);
787}
788
789extern __inline __m256i
790__attribute__((__gnu_inline__, __always_inline__, __artificial__))
791_mm256_shldv_epi32 (__m256i __A, __m256i __B, __m256i __C)
792{
793 return (__m256i) __builtin_ia32_vpshldv_v8si ((__v8si)__A, (__v8si) __B,
794 (__v8si) __C);
795}
796
797extern __inline __m256i
798__attribute__((__gnu_inline__, __always_inline__, __artificial__))
799_mm256_mask_shldv_epi32 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D)
800{
801 return (__m256i)__builtin_ia32_vpshldv_v8si_mask ((__v8si)__A, (__v8si) __C,
802 (__v8si) __D, (__mmask8)__B) ;
803}
804
805extern __inline __m256i
806__attribute__((__gnu_inline__, __always_inline__, __artificial__))
807_mm256_maskz_shldv_epi32 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D)
808{
809 return (__m256i)__builtin_ia32_vpshldv_v8si_maskz ((__v8si)__B, (__v8si) __C,
810 (__v8si) __D, (__mmask8)__A);
811}
812
813extern __inline __m256i
814__attribute__((__gnu_inline__, __always_inline__, __artificial__))
815_mm256_shldv_epi64 (__m256i __A, __m256i __B, __m256i __C)
816{
817 return (__m256i) __builtin_ia32_vpshldv_v4di ((__v4di)__A, (__v4di) __B,
818 (__v4di) __C);
819}
820
821extern __inline __m256i
822__attribute__((__gnu_inline__, __always_inline__, __artificial__))
823_mm256_mask_shldv_epi64 (__m256i __A, __mmask8 __B, __m256i __C, __m256i __D)
824{
825 return (__m256i)__builtin_ia32_vpshldv_v4di_mask ((__v4di)__A, (__v4di) __C,
826 (__v4di) __D, (__mmask8)__B);
827}
828
829extern __inline __m256i
830__attribute__((__gnu_inline__, __always_inline__, __artificial__))
831_mm256_maskz_shldv_epi64 (__mmask8 __A, __m256i __B, __m256i __C, __m256i __D)
832{
833 return (__m256i)__builtin_ia32_vpshldv_v4di_maskz ((__v4di)__B, (__v4di) __C,
834 (__v4di) __D, (__mmask8)__A);
835}
836
837extern __inline __m128i
838__attribute__((__gnu_inline__, __always_inline__, __artificial__))
839_mm_shldv_epi16 (__m128i __A, __m128i __B, __m128i __C)
840{
841 return (__m128i) __builtin_ia32_vpshldv_v8hi ((__v8hi)__A, (__v8hi) __B,
842 (__v8hi) __C);
843}
844
845extern __inline __m128i
846__attribute__((__gnu_inline__, __always_inline__, __artificial__))
847_mm_mask_shldv_epi16 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D)
848{
849 return (__m128i)__builtin_ia32_vpshldv_v8hi_mask ((__v8hi)__A, (__v8hi) __C,
850 (__v8hi) __D, (__mmask8)__B);
851}
852
853extern __inline __m128i
854__attribute__((__gnu_inline__, __always_inline__, __artificial__))
855_mm_maskz_shldv_epi16 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D)
856{
857 return (__m128i)__builtin_ia32_vpshldv_v8hi_maskz ((__v8hi)__B, (__v8hi) __C,
858 (__v8hi) __D, (__mmask8)__A);
859}
860
861extern __inline __m128i
862__attribute__((__gnu_inline__, __always_inline__, __artificial__))
863_mm_shldv_epi32 (__m128i __A, __m128i __B, __m128i __C)
864{
865 return (__m128i) __builtin_ia32_vpshldv_v4si ((__v4si)__A, (__v4si) __B,
866 (__v4si) __C);
867}
868
869extern __inline __m128i
870__attribute__((__gnu_inline__, __always_inline__, __artificial__))
871_mm_mask_shldv_epi32 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D)
872{
873 return (__m128i)__builtin_ia32_vpshldv_v4si_mask ((__v4si)__A, (__v4si) __C,
874 (__v4si) __D, (__mmask8)__B);
875}
876
877extern __inline __m128i
878__attribute__((__gnu_inline__, __always_inline__, __artificial__))
879_mm_maskz_shldv_epi32 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D)
880{
881 return (__m128i)__builtin_ia32_vpshldv_v4si_maskz ((__v4si)__B, (__v4si) __C,
882 (__v4si) __D, (__mmask8)__A);
883}
884
885extern __inline __m128i
886__attribute__((__gnu_inline__, __always_inline__, __artificial__))
887_mm_shldv_epi64 (__m128i __A, __m128i __B, __m128i __C)
888{
889 return (__m128i) __builtin_ia32_vpshldv_v2di ((__v2di)__A, (__v2di) __B,
890 (__v2di) __C);
891}
892
893extern __inline __m128i
894__attribute__((__gnu_inline__, __always_inline__, __artificial__))
895_mm_mask_shldv_epi64 (__m128i __A, __mmask8 __B, __m128i __C, __m128i __D)
896{
897 return (__m128i)__builtin_ia32_vpshldv_v2di_mask ((__v2di)__A, (__v2di) __C,
898 (__v2di) __D, (__mmask8)__B);
899}
900
901extern __inline __m128i
902__attribute__((__gnu_inline__, __always_inline__, __artificial__))
903_mm_maskz_shldv_epi64 (__mmask8 __A, __m128i __B, __m128i __C, __m128i __D)
904{
905 return (__m128i)__builtin_ia32_vpshldv_v2di_maskz ((__v2di)__B, (__v2di) __C,
906 (__v2di) __D, (__mmask8)__A);
907}
908
909
910
911
912#ifdef __DISABLE_AVX512VBMI2VL__
913#undef __DISABLE_AVX512VBMI2VL__
914#pragma GCC pop_options
915#endif /* __DISABLE_AVX512VBMIVL__ */
916
917#if !defined(__AVX512VL__) || !defined(__AVX512VBMI2__) || \
918 !defined(__AVX512BW__)
919#pragma GCC push_options
920#pragma GCC target("avx512vbmi2,avx512vl,avx512bw")
921#define __DISABLE_AVX512VBMI2VLBW__
922#endif /* __AVX512VBMIVLBW__ */
923
924extern __inline __m256i
925__attribute__((__gnu_inline__, __always_inline__, __artificial__))
926_mm256_mask_compress_epi8 (__m256i __A, __mmask32 __B, __m256i __C)
927{
928 return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi)__C,
929 (__v32qi)__A, (__mmask32)__B);
930}
931
932extern __inline __m256i
933__attribute__((__gnu_inline__, __always_inline__, __artificial__))
934_mm256_maskz_compress_epi8 (__mmask32 __A, __m256i __B)
935{
936 return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __B,
937 (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A);
938}
939
940extern __inline void
941__attribute__((__gnu_inline__, __always_inline__, __artificial__))
942_mm256_mask_compressstoreu_epi8 (void * __A, __mmask32 __B, __m256i __C)
943{
944 __builtin_ia32_compressstoreuqi256_mask ((__v32qi *) __A, (__v32qi) __C,
945 (__mmask32) __B);
946}
947
948extern __inline __m256i
949__attribute__((__gnu_inline__, __always_inline__, __artificial__))
950_mm256_mask_expand_epi8 (__m256i __A, __mmask32 __B, __m256i __C)
951{
952 return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __C,
953 (__v32qi) __A,
954 (__mmask32) __B);
955}
956
957extern __inline __m256i
958__attribute__((__gnu_inline__, __always_inline__, __artificial__))
959_mm256_maskz_expand_epi8 (__mmask32 __A, __m256i __B)
960{
961 return (__m256i) __builtin_ia32_expandqi256_maskz ((__v32qi) __B,
962 (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A);
963}
964
965extern __inline __m256i
966__attribute__((__gnu_inline__, __always_inline__, __artificial__))
967_mm256_mask_expandloadu_epi8 (__m256i __A, __mmask32 __B, const void * __C)
968{
969 return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *) __C,
970 (__v32qi) __A, (__mmask32) __B);
971}
972
973extern __inline __m256i
974__attribute__((__gnu_inline__, __always_inline__, __artificial__))
975_mm256_maskz_expandloadu_epi8 (__mmask32 __A, const void * __B)
976{
977 return (__m256i) __builtin_ia32_expandloadqi256_maskz ((const __v32qi *) __B,
978 (__v32qi) _mm256_setzero_si256 (), (__mmask32) __A);
979}
980
981#ifdef __DISABLE_AVX512VBMI2VLBW__
982#undef __DISABLE_AVX512VBMI2VLBW__
983#pragma GCC pop_options
984#endif /* __DISABLE_AVX512VBMIVLBW__ */
985
986#endif /* _AVX512VBMIVLINTRIN_H_INCLUDED */
987

Warning: That file was not part of the compilation database. It may have many parsing errors.