1/* Tree-based target query functions relating to optabs
2 Copyright (C) 1987-2024 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "target.h"
25#include "insn-codes.h"
26#include "rtl.h"
27#include "tree.h"
28#include "memmodel.h"
29#include "optabs.h"
30#include "optabs-tree.h"
31#include "stor-layout.h"
32
33/* Return the optab used for computing the operation given by the tree code,
34 CODE and the tree EXP. This function is not always usable (for example, it
35 cannot give complete results for multiplication or division) but probably
36 ought to be relied on more widely throughout the expander. */
37optab
38optab_for_tree_code (enum tree_code code, const_tree type,
39 enum optab_subtype subtype)
40{
41 bool trapv;
42 switch (code)
43 {
44 case BIT_AND_EXPR:
45 return and_optab;
46
47 case BIT_IOR_EXPR:
48 return ior_optab;
49
50 case BIT_NOT_EXPR:
51 return one_cmpl_optab;
52
53 case BIT_XOR_EXPR:
54 return xor_optab;
55
56 case MULT_HIGHPART_EXPR:
57 return TYPE_UNSIGNED (type) ? umul_highpart_optab : smul_highpart_optab;
58
59 case CEIL_MOD_EXPR:
60 case FLOOR_MOD_EXPR:
61 case ROUND_MOD_EXPR:
62 /* {s,u}mod_optab implements TRUNC_MOD_EXPR. For scalar modes,
63 expansion has code to adjust TRUNC_MOD_EXPR into the desired other
64 modes, but for vector modes it does not. The adjustment code
65 should be instead emitted in tree-vect-patterns.cc. */
66 if (VECTOR_TYPE_P (type))
67 return unknown_optab;
68 /* FALLTHRU */
69 case TRUNC_MOD_EXPR:
70 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
71
72 case CEIL_DIV_EXPR:
73 case FLOOR_DIV_EXPR:
74 case ROUND_DIV_EXPR:
75 /* {,u}{s,u}div_optab implements {TRUNC,EXACT}_DIV_EXPR or RDIV_EXPR.
76 For scalar modes, expansion has code to adjust TRUNC_DIV_EXPR
77 into the desired other modes, but for vector modes it does not.
78 The adjustment code should be instead emitted in
79 tree-vect-patterns.cc. */
80 if (VECTOR_TYPE_P (type))
81 return unknown_optab;
82 /* FALLTHRU */
83 case RDIV_EXPR:
84 case TRUNC_DIV_EXPR:
85 case EXACT_DIV_EXPR:
86 if (TYPE_SATURATING (type))
87 return TYPE_UNSIGNED (type) ? usdiv_optab : ssdiv_optab;
88 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
89
90 case LSHIFT_EXPR:
91 if (VECTOR_TYPE_P (type))
92 {
93 if (subtype == optab_vector)
94 return TYPE_SATURATING (type) ? unknown_optab : vashl_optab;
95
96 gcc_assert (subtype == optab_scalar);
97 }
98 if (TYPE_SATURATING (type))
99 return TYPE_UNSIGNED (type) ? usashl_optab : ssashl_optab;
100 return ashl_optab;
101
102 case RSHIFT_EXPR:
103 if (VECTOR_TYPE_P (type))
104 {
105 if (subtype == optab_vector)
106 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
107
108 gcc_assert (subtype == optab_scalar);
109 }
110 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
111
112 case LROTATE_EXPR:
113 if (VECTOR_TYPE_P (type))
114 {
115 if (subtype == optab_vector)
116 return vrotl_optab;
117
118 gcc_assert (subtype == optab_scalar);
119 }
120 return rotl_optab;
121
122 case RROTATE_EXPR:
123 if (VECTOR_TYPE_P (type))
124 {
125 if (subtype == optab_vector)
126 return vrotr_optab;
127
128 gcc_assert (subtype == optab_scalar);
129 }
130 return rotr_optab;
131
132 case MAX_EXPR:
133 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
134
135 case MIN_EXPR:
136 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
137
138 case REALIGN_LOAD_EXPR:
139 return vec_realign_load_optab;
140
141 case WIDEN_SUM_EXPR:
142 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
143
144 case DOT_PROD_EXPR:
145 {
146 if (subtype == optab_vector_mixed_sign)
147 return usdot_prod_optab;
148
149 return (TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab);
150 }
151
152 case SAD_EXPR:
153 return TYPE_UNSIGNED (type) ? usad_optab : ssad_optab;
154
155 case WIDEN_MULT_PLUS_EXPR:
156 return (TYPE_UNSIGNED (type)
157 ? (TYPE_SATURATING (type)
158 ? usmadd_widen_optab : umadd_widen_optab)
159 : (TYPE_SATURATING (type)
160 ? ssmadd_widen_optab : smadd_widen_optab));
161
162 case WIDEN_MULT_MINUS_EXPR:
163 return (TYPE_UNSIGNED (type)
164 ? (TYPE_SATURATING (type)
165 ? usmsub_widen_optab : umsub_widen_optab)
166 : (TYPE_SATURATING (type)
167 ? ssmsub_widen_optab : smsub_widen_optab));
168
169 case VEC_WIDEN_MULT_HI_EXPR:
170 return (TYPE_UNSIGNED (type)
171 ? vec_widen_umult_hi_optab : vec_widen_smult_hi_optab);
172
173 case VEC_WIDEN_MULT_LO_EXPR:
174 return (TYPE_UNSIGNED (type)
175 ? vec_widen_umult_lo_optab : vec_widen_smult_lo_optab);
176
177 case VEC_WIDEN_MULT_EVEN_EXPR:
178 return (TYPE_UNSIGNED (type)
179 ? vec_widen_umult_even_optab : vec_widen_smult_even_optab);
180
181 case VEC_WIDEN_MULT_ODD_EXPR:
182 return (TYPE_UNSIGNED (type)
183 ? vec_widen_umult_odd_optab : vec_widen_smult_odd_optab);
184
185 case VEC_WIDEN_LSHIFT_HI_EXPR:
186 return (TYPE_UNSIGNED (type)
187 ? vec_widen_ushiftl_hi_optab : vec_widen_sshiftl_hi_optab);
188
189 case VEC_WIDEN_LSHIFT_LO_EXPR:
190 return (TYPE_UNSIGNED (type)
191 ? vec_widen_ushiftl_lo_optab : vec_widen_sshiftl_lo_optab);
192
193 case VEC_UNPACK_HI_EXPR:
194 return (TYPE_UNSIGNED (type)
195 ? vec_unpacku_hi_optab : vec_unpacks_hi_optab);
196
197 case VEC_UNPACK_LO_EXPR:
198 return (TYPE_UNSIGNED (type)
199 ? vec_unpacku_lo_optab : vec_unpacks_lo_optab);
200
201 case VEC_UNPACK_FLOAT_HI_EXPR:
202 /* The signedness is determined from input operand. */
203 return (TYPE_UNSIGNED (type)
204 ? vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab);
205
206 case VEC_UNPACK_FLOAT_LO_EXPR:
207 /* The signedness is determined from input operand. */
208 return (TYPE_UNSIGNED (type)
209 ? vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab);
210
211 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
212 /* The signedness is determined from output operand. */
213 return (TYPE_UNSIGNED (type)
214 ? vec_unpack_ufix_trunc_hi_optab
215 : vec_unpack_sfix_trunc_hi_optab);
216
217 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
218 /* The signedness is determined from output operand. */
219 return (TYPE_UNSIGNED (type)
220 ? vec_unpack_ufix_trunc_lo_optab
221 : vec_unpack_sfix_trunc_lo_optab);
222
223 case VEC_PACK_TRUNC_EXPR:
224 return vec_pack_trunc_optab;
225
226 case VEC_PACK_SAT_EXPR:
227 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
228
229 case VEC_PACK_FIX_TRUNC_EXPR:
230 /* The signedness is determined from output operand. */
231 return (TYPE_UNSIGNED (type)
232 ? vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab);
233
234 case VEC_PACK_FLOAT_EXPR:
235 /* The signedness is determined from input operand. */
236 return (TYPE_UNSIGNED (type)
237 ? vec_packu_float_optab : vec_packs_float_optab);
238
239 case VEC_DUPLICATE_EXPR:
240 return vec_duplicate_optab;
241
242 case VEC_SERIES_EXPR:
243 return vec_series_optab;
244
245 default:
246 break;
247 }
248
249 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
250 switch (code)
251 {
252 case POINTER_PLUS_EXPR:
253 case PLUS_EXPR:
254 if (TYPE_SATURATING (type))
255 return TYPE_UNSIGNED (type) ? usadd_optab : ssadd_optab;
256 return trapv ? addv_optab : add_optab;
257
258 case POINTER_DIFF_EXPR:
259 case MINUS_EXPR:
260 if (TYPE_SATURATING (type))
261 return TYPE_UNSIGNED (type) ? ussub_optab : sssub_optab;
262 return trapv ? subv_optab : sub_optab;
263
264 case MULT_EXPR:
265 if (TYPE_SATURATING (type))
266 return TYPE_UNSIGNED (type) ? usmul_optab : ssmul_optab;
267 return trapv ? smulv_optab : smul_optab;
268
269 case NEGATE_EXPR:
270 if (TYPE_SATURATING (type))
271 return TYPE_UNSIGNED (type) ? usneg_optab : ssneg_optab;
272 return trapv ? negv_optab : neg_optab;
273
274 case ABS_EXPR:
275 return trapv ? absv_optab : abs_optab;
276
277 case ABSU_EXPR:
278 return abs_optab;
279 default:
280 return unknown_optab;
281 }
282}
283
284/* Check whether an operation represented by CODE is a 'half' widening operation
285 in which the input vector type has half the number of bits of the output
286 vector type e.g. V8QI->V8HI.
287
288 This is handled by widening the inputs using NOP_EXPRs then using a
289 non-widening stmt e.g. MINUS_EXPR. RTL fusing converts these to the widening
290 hardware instructions if supported.
291
292 The more typical case (handled in supportable_widening_operation) is where
293 the input vector type has the same number of bits as the output vector type.
294 In this case half the elements of the input vectors must be processed at a
295 time into respective vector outputs with elements twice as wide i.e. a
296 'hi'/'lo' pair using codes such as VEC_WIDEN_MINUS_HI/LO.
297
298 Supported widening operations:
299 WIDEN_MULT_EXPR
300 WIDEN_LSHIFT_EXPR
301
302 Output:
303 - CODE1 - The non-widened code, which will be used after the inputs are
304 converted to the wide type. */
305bool
306supportable_half_widening_operation (enum tree_code code, tree vectype_out,
307 tree vectype_in, enum tree_code *code1)
308{
309 machine_mode m1,m2;
310 enum tree_code dummy_code;
311 optab op;
312
313 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
314
315 m1 = TYPE_MODE (vectype_out);
316 m2 = TYPE_MODE (vectype_in);
317
318 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
319 return false;
320
321 if (maybe_ne (a: TYPE_VECTOR_SUBPARTS (node: vectype_in),
322 b: TYPE_VECTOR_SUBPARTS (node: vectype_out)))
323 return false;
324
325 switch (code)
326 {
327 case WIDEN_LSHIFT_EXPR:
328 *code1 = LSHIFT_EXPR;
329 break;
330 case WIDEN_MULT_EXPR:
331 *code1 = MULT_EXPR;
332 break;
333 default:
334 return false;
335 }
336
337 if (!supportable_convert_operation (NOP_EXPR, vectype_out, vectype_in,
338 &dummy_code))
339 return false;
340
341 op = optab_for_tree_code (code: *code1, type: vectype_out, subtype: optab_vector);
342 return (optab_handler (op, TYPE_MODE (vectype_out)) != CODE_FOR_nothing);
343}
344
345/* Function supportable_convert_operation
346
347 Check whether an operation represented by the code CODE is a
348 convert operation that is supported by the target platform in
349 vector form (i.e., when operating on arguments of type VECTYPE_IN
350 producing a result of type VECTYPE_OUT).
351
352 Convert operations we currently support directly are FIX_TRUNC and FLOAT.
353 This function checks if these operations are supported
354 by the target platform directly (via vector tree-codes).
355
356 Output:
357 - CODE1 is code of vector operation to be used when
358 vectorizing the operation, if available. */
359
360bool
361supportable_convert_operation (enum tree_code code,
362 tree vectype_out, tree vectype_in,
363 enum tree_code *code1)
364{
365 machine_mode m1,m2;
366 bool truncp;
367
368 gcc_assert (VECTOR_TYPE_P (vectype_out) && VECTOR_TYPE_P (vectype_in));
369
370 m1 = TYPE_MODE (vectype_out);
371 m2 = TYPE_MODE (vectype_in);
372
373 if (!VECTOR_MODE_P (m1) || !VECTOR_MODE_P (m2))
374 return false;
375
376 /* First check if we can done conversion directly. */
377 if ((code == FIX_TRUNC_EXPR
378 && can_fix_p (m1,m2,TYPE_UNSIGNED (vectype_out), &truncp)
379 != CODE_FOR_nothing)
380 || (code == FLOAT_EXPR
381 && can_float_p (m1,m2,TYPE_UNSIGNED (vectype_in))
382 != CODE_FOR_nothing))
383 {
384 *code1 = code;
385 return true;
386 }
387
388 if (GET_MODE_UNIT_PRECISION (m1) > GET_MODE_UNIT_PRECISION (m2)
389 && can_extend_p (m1, m2, TYPE_UNSIGNED (vectype_in)))
390 {
391 *code1 = code;
392 return true;
393 }
394
395 if (GET_MODE_UNIT_PRECISION (m1) < GET_MODE_UNIT_PRECISION (m2)
396 && convert_optab_handler (op: trunc_optab, to_mode: m1, from_mode: m2) != CODE_FOR_nothing)
397 {
398 *code1 = code;
399 return true;
400 }
401
402 return false;
403}
404
405/* Return true iff vec_cmp_optab/vec_cmpu_optab can handle a vector comparison
406 for code CODE, comparing operands of type VALUE_TYPE and producing a result
407 of type MASK_TYPE. */
408
409static bool
410vec_cmp_icode_p (tree value_type, tree mask_type, enum tree_code code)
411{
412 enum rtx_code rcode = get_rtx_code_1 (tcode: code, TYPE_UNSIGNED (value_type));
413 if (rcode == UNKNOWN)
414 return false;
415
416 return can_vec_cmp_compare_p (rcode, TYPE_MODE (value_type),
417 TYPE_MODE (mask_type));
418}
419
420/* Return true iff vec_cmpeq_optab can handle a vector comparison for code
421 CODE, comparing operands of type VALUE_TYPE and producing a result of type
422 MASK_TYPE. */
423
424static bool
425vec_cmp_eq_icode_p (tree value_type, tree mask_type, enum tree_code code)
426{
427 if (code != EQ_EXPR && code != NE_EXPR)
428 return false;
429
430 return get_vec_cmp_eq_icode (TYPE_MODE (value_type), TYPE_MODE (mask_type))
431 != CODE_FOR_nothing;
432}
433
434/* Return TRUE if appropriate vector insn is available
435 for vector comparison expr with vector type VALUE_TYPE
436 and resulting mask with MASK_TYPE. */
437
438bool
439expand_vec_cmp_expr_p (tree value_type, tree mask_type, enum tree_code code)
440{
441 return vec_cmp_icode_p (value_type, mask_type, code)
442 || vec_cmp_eq_icode_p (value_type, mask_type, code);
443}
444
445/* Return true iff vcond_optab/vcondu_optab can handle a vector
446 comparison for code CODE, comparing operands of type CMP_OP_TYPE and
447 producing a result of type VALUE_TYPE. */
448
449static bool
450vcond_icode_p (tree value_type, tree cmp_op_type, enum tree_code code)
451{
452 enum rtx_code rcode = get_rtx_code_1 (tcode: code, TYPE_UNSIGNED (cmp_op_type));
453 if (rcode == UNKNOWN)
454 return false;
455
456 return can_vcond_compare_p (rcode, TYPE_MODE (value_type),
457 TYPE_MODE (cmp_op_type));
458}
459
460/* Return true iff vcondeq_optab can handle a vector comparison for code CODE,
461 comparing operands of type CMP_OP_TYPE and producing a result of type
462 VALUE_TYPE. */
463
464static bool
465vcond_eq_icode_p (tree value_type, tree cmp_op_type, enum tree_code code)
466{
467 if (code != EQ_EXPR && code != NE_EXPR)
468 return false;
469
470 return get_vcond_eq_icode (TYPE_MODE (value_type), TYPE_MODE (cmp_op_type))
471 != CODE_FOR_nothing;
472}
473
474/* Return TRUE iff, appropriate vector insns are available
475 for vector cond expr with vector type VALUE_TYPE and a comparison
476 with operand vector types in CMP_OP_TYPE. */
477
478bool
479expand_vec_cond_expr_p (tree value_type, tree cmp_op_type, enum tree_code code)
480{
481 machine_mode value_mode = TYPE_MODE (value_type);
482 machine_mode cmp_op_mode = TYPE_MODE (cmp_op_type);
483 if (VECTOR_BOOLEAN_TYPE_P (cmp_op_type)
484 && get_vcond_mask_icode (TYPE_MODE (value_type),
485 TYPE_MODE (cmp_op_type)) != CODE_FOR_nothing)
486 return true;
487
488 if (maybe_ne (a: GET_MODE_NUNITS (mode: value_mode), b: GET_MODE_NUNITS (mode: cmp_op_mode)))
489 return false;
490
491 if (TREE_CODE_CLASS (code) != tcc_comparison)
492 /* This may happen, for example, if code == SSA_NAME, in which case we
493 cannot be certain whether a vector insn is available. */
494 return false;
495
496 return vcond_icode_p (value_type, cmp_op_type, code)
497 || vcond_eq_icode_p (value_type, cmp_op_type, code);
498}
499
500/* Use the current target and options to initialize
501 TREE_OPTIMIZATION_OPTABS (OPTNODE). */
502
503void
504init_tree_optimization_optabs (tree optnode)
505{
506 /* Quick exit if we have already computed optabs for this target. */
507 if (TREE_OPTIMIZATION_BASE_OPTABS (optnode) == this_target_optabs)
508 return;
509
510 /* Forget any previous information and set up for the current target. */
511 TREE_OPTIMIZATION_BASE_OPTABS (optnode) = this_target_optabs;
512 struct target_optabs *tmp_optabs = (struct target_optabs *)
513 TREE_OPTIMIZATION_OPTABS (optnode);
514 if (tmp_optabs)
515 memset (s: tmp_optabs, c: 0, n: sizeof (struct target_optabs));
516 else
517 tmp_optabs = ggc_cleared_alloc<target_optabs> ();
518
519 /* Generate a new set of optabs into tmp_optabs. */
520 init_all_optabs (tmp_optabs);
521
522 /* If the optabs changed, record it. */
523 if (memcmp (s1: tmp_optabs, s2: this_target_optabs, n: sizeof (struct target_optabs)))
524 TREE_OPTIMIZATION_OPTABS (optnode) = tmp_optabs;
525 else
526 {
527 TREE_OPTIMIZATION_OPTABS (optnode) = NULL;
528 ggc_free (tmp_optabs);
529 }
530}
531
532/* Return TRUE if the target has support for vector right shift of an
533 operand of type TYPE. If OT_TYPE is OPTAB_DEFAULT, check for existence
534 of a shift by either a scalar or a vector. Otherwise, check only
535 for a shift that matches OT_TYPE. */
536
537bool
538target_supports_op_p (tree type, enum tree_code code,
539 enum optab_subtype ot_subtype)
540{
541 optab ot = optab_for_tree_code (code, type, subtype: ot_subtype);
542 return (ot != unknown_optab
543 && optab_handler (op: ot, TYPE_MODE (type)) != CODE_FOR_nothing);
544}
545
546/* Return true if the target has support for masked load/store.
547 We can support masked load/store by either mask{load,store}
548 or mask_len_{load,store}.
549 This helper function checks whether target supports masked
550 load/store and return corresponding IFN in the last argument
551 (IFN_MASK_{LOAD,STORE} or IFN_MASK_LEN_{LOAD,STORE}). */
552
553static bool
554target_supports_mask_load_store_p (machine_mode mode, machine_mode mask_mode,
555 bool is_load, internal_fn *ifn)
556{
557 optab op = is_load ? maskload_optab : maskstore_optab;
558 optab len_op = is_load ? mask_len_load_optab : mask_len_store_optab;
559 if (convert_optab_handler (op, to_mode: mode, from_mode: mask_mode) != CODE_FOR_nothing)
560 {
561 if (ifn)
562 *ifn = is_load ? IFN_MASK_LOAD : IFN_MASK_STORE;
563 return true;
564 }
565 else if (convert_optab_handler (op: len_op, to_mode: mode, from_mode: mask_mode) != CODE_FOR_nothing)
566 {
567 if (ifn)
568 *ifn = is_load ? IFN_MASK_LEN_LOAD : IFN_MASK_LEN_STORE;
569 return true;
570 }
571 return false;
572}
573
574/* Return true if target supports vector masked load/store for mode.
575 An additional output in the last argument which is the IFN pointer.
576 We set IFN as MASK_{LOAD,STORE} or MASK_LEN_{LOAD,STORE} according
577 which optab is supported in the target. */
578
579bool
580can_vec_mask_load_store_p (machine_mode mode,
581 machine_mode mask_mode,
582 bool is_load,
583 internal_fn *ifn)
584{
585 machine_mode vmode;
586
587 /* If mode is vector mode, check it directly. */
588 if (VECTOR_MODE_P (mode))
589 return target_supports_mask_load_store_p (mode, mask_mode, is_load, ifn);
590
591 /* Otherwise, return true if there is some vector mode with
592 the mask load/store supported. */
593
594 /* See if there is any chance the mask load or store might be
595 vectorized. If not, punt. */
596 scalar_mode smode;
597 if (!is_a <scalar_mode> (m: mode, result: &smode))
598 return false;
599
600 vmode = targetm.vectorize.preferred_simd_mode (smode);
601 if (VECTOR_MODE_P (vmode)
602 && targetm.vectorize.get_mask_mode (vmode).exists (mode: &mask_mode)
603 && target_supports_mask_load_store_p (mode: vmode, mask_mode, is_load, ifn))
604 return true;
605
606 auto_vector_modes vector_modes;
607 targetm.vectorize.autovectorize_vector_modes (&vector_modes, true);
608 for (machine_mode base_mode : vector_modes)
609 if (related_vector_mode (base_mode, smode).exists (mode: &vmode)
610 && targetm.vectorize.get_mask_mode (vmode).exists (mode: &mask_mode)
611 && target_supports_mask_load_store_p (mode: vmode, mask_mode, is_load, ifn))
612 return true;
613 return false;
614}
615
616/* Return true if the target has support for len load/store.
617 We can support len load/store by either len_{load,store}
618 or mask_len_{load,store}.
619 This helper function checks whether target supports len
620 load/store and return corresponding IFN in the last argument
621 (IFN_LEN_{LOAD,STORE} or IFN_MASK_LEN_{LOAD,STORE}). */
622
623static bool
624target_supports_len_load_store_p (machine_mode mode, bool is_load,
625 internal_fn *ifn)
626{
627 optab op = is_load ? len_load_optab : len_store_optab;
628 optab masked_op = is_load ? mask_len_load_optab : mask_len_store_optab;
629
630 if (direct_optab_handler (op, mode))
631 {
632 if (ifn)
633 *ifn = is_load ? IFN_LEN_LOAD : IFN_LEN_STORE;
634 return true;
635 }
636 machine_mode mask_mode;
637 if (targetm.vectorize.get_mask_mode (mode).exists (mode: &mask_mode)
638 && convert_optab_handler (op: masked_op, to_mode: mode, from_mode: mask_mode) != CODE_FOR_nothing)
639 {
640 if (ifn)
641 *ifn = is_load ? IFN_MASK_LEN_LOAD : IFN_MASK_LEN_STORE;
642 return true;
643 }
644 return false;
645}
646
647/* If target supports vector load/store with length for vector mode MODE,
648 return the corresponding vector mode, otherwise return opt_machine_mode ().
649 There are two flavors for vector load/store with length, one is to measure
650 length with bytes, the other is to measure length with lanes.
651 As len_{load,store} optabs point out, for the flavor with bytes, we use
652 VnQI to wrap the other supportable same size vector modes.
653 An additional output in the last argument which is the IFN pointer.
654 We set IFN as LEN_{LOAD,STORE} or MASK_LEN_{LOAD,STORE} according
655 which optab is supported in the target. */
656
657opt_machine_mode
658get_len_load_store_mode (machine_mode mode, bool is_load, internal_fn *ifn)
659{
660 gcc_assert (VECTOR_MODE_P (mode));
661
662 /* Check if length in lanes supported for this mode directly. */
663 if (target_supports_len_load_store_p (mode, is_load, ifn))
664 return mode;
665
666 /* Check if length in bytes supported for same vector size VnQI. */
667 machine_mode vmode;
668 poly_uint64 nunits = GET_MODE_SIZE (mode);
669 if (related_vector_mode (mode, QImode, nunits).exists (mode: &vmode)
670 && target_supports_len_load_store_p (mode: vmode, is_load, ifn))
671 return vmode;
672
673 return opt_machine_mode ();
674}
675

source code of gcc/optabs-tree.cc