1 | /* |
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
4 | * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved. |
5 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
6 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
7 | * |
8 | * This software is available to you under a choice of one of two |
9 | * licenses. You may choose to be licensed under the terms of the GNU |
10 | * General Public License (GPL) Version 2, available from the file |
11 | * COPYING in the main directory of this source tree, or the |
12 | * OpenIB.org BSD license below: |
13 | * |
14 | * Redistribution and use in source and binary forms, with or |
15 | * without modification, are permitted provided that the following |
16 | * conditions are met: |
17 | * |
18 | * - Redistributions of source code must retain the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer. |
21 | * |
22 | * - Redistributions in binary form must reproduce the above |
23 | * copyright notice, this list of conditions and the following |
24 | * disclaimer in the documentation and/or other materials |
25 | * provided with the distribution. |
26 | * |
27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
34 | * SOFTWARE. |
35 | */ |
36 | |
37 | #ifndef MLX4_H |
38 | #define MLX4_H |
39 | |
40 | #include <linux/mutex.h> |
41 | #include <linux/radix-tree.h> |
42 | #include <linux/rbtree.h> |
43 | #include <linux/timer.h> |
44 | #include <linux/semaphore.h> |
45 | #include <linux/workqueue.h> |
46 | #include <linux/interrupt.h> |
47 | #include <linux/spinlock.h> |
48 | #include <net/devlink.h> |
49 | #include <linux/rwsem.h> |
50 | #include <linux/auxiliary_bus.h> |
51 | #include <linux/notifier.h> |
52 | |
53 | #include <linux/mlx4/device.h> |
54 | #include <linux/mlx4/driver.h> |
55 | #include <linux/mlx4/doorbell.h> |
56 | #include <linux/mlx4/cmd.h> |
57 | #include "fw_qos.h" |
58 | |
59 | #define DRV_NAME "mlx4_core" |
60 | #define DRV_VERSION "4.0-0" |
61 | #define DRV_NAME_FOR_FW "Linux," DRV_NAME "," DRV_VERSION |
62 | |
63 | #define MLX4_FS_UDP_UC_EN (1 << 1) |
64 | #define MLX4_FS_TCP_UC_EN (1 << 2) |
65 | #define MLX4_FS_NUM_OF_L2_ADDR 8 |
66 | #define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 |
67 | #define MLX4_FS_NUM_MCG (1 << 17) |
68 | |
69 | #define INIT_HCA_TPT_MW_ENABLE (1 << 7) |
70 | |
71 | #define MLX4_QUERY_IF_STAT_RESET BIT(31) |
72 | |
73 | enum { |
74 | MLX4_HCR_BASE = 0x80680, |
75 | MLX4_HCR_SIZE = 0x0001c, |
76 | MLX4_CLR_INT_SIZE = 0x00008, |
77 | MLX4_SLAVE_COMM_BASE = 0x0, |
78 | MLX4_COMM_PAGESIZE = 0x1000, |
79 | MLX4_CLOCK_SIZE = 0x00008, |
80 | MLX4_COMM_CHAN_CAPS = 0x8, |
81 | MLX4_COMM_CHAN_FLAGS = 0xc |
82 | }; |
83 | |
84 | enum { |
85 | MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10, |
86 | MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, |
87 | MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, |
88 | MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), |
89 | }; |
90 | |
91 | enum { |
92 | MLX4_NUM_PDS = 1 << 15 |
93 | }; |
94 | |
95 | enum { |
96 | MLX4_CMPT_TYPE_QP = 0, |
97 | MLX4_CMPT_TYPE_SRQ = 1, |
98 | MLX4_CMPT_TYPE_CQ = 2, |
99 | MLX4_CMPT_TYPE_EQ = 3, |
100 | MLX4_CMPT_NUM_TYPE |
101 | }; |
102 | |
103 | enum { |
104 | MLX4_CMPT_SHIFT = 24, |
105 | MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT |
106 | }; |
107 | |
108 | enum mlx4_mpt_state { |
109 | MLX4_MPT_DISABLED = 0, |
110 | MLX4_MPT_EN_HW, |
111 | MLX4_MPT_EN_SW |
112 | }; |
113 | |
114 | #define MLX4_COMM_TIME 10000 |
115 | #define MLX4_COMM_OFFLINE_TIME_OUT 30000 |
116 | #define MLX4_COMM_CMD_NA_OP 0x0 |
117 | |
118 | |
119 | enum { |
120 | MLX4_COMM_CMD_RESET, |
121 | MLX4_COMM_CMD_VHCR0, |
122 | MLX4_COMM_CMD_VHCR1, |
123 | MLX4_COMM_CMD_VHCR2, |
124 | MLX4_COMM_CMD_VHCR_EN, |
125 | MLX4_COMM_CMD_VHCR_POST, |
126 | MLX4_COMM_CMD_FLR = 254 |
127 | }; |
128 | |
129 | enum { |
130 | MLX4_VF_SMI_DISABLED, |
131 | MLX4_VF_SMI_ENABLED |
132 | }; |
133 | |
134 | /*The flag indicates that the slave should delay the RESET cmd*/ |
135 | #define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb |
136 | /*indicates how many retries will be done if we are in the middle of FLR*/ |
137 | #define NUM_OF_RESET_RETRIES 10 |
138 | #define SLEEP_TIME_IN_RESET (2 * 1000) |
139 | enum mlx4_resource { |
140 | RES_QP, |
141 | RES_CQ, |
142 | RES_SRQ, |
143 | RES_XRCD, |
144 | RES_MPT, |
145 | RES_MTT, |
146 | RES_MAC, |
147 | RES_VLAN, |
148 | RES_NPORT_ID, |
149 | RES_COUNTER, |
150 | RES_FS_RULE, |
151 | RES_EQ, |
152 | MLX4_NUM_OF_RESOURCE_TYPE |
153 | }; |
154 | |
155 | enum mlx4_alloc_mode { |
156 | RES_OP_RESERVE, |
157 | RES_OP_RESERVE_AND_MAP, |
158 | RES_OP_MAP_ICM, |
159 | }; |
160 | |
161 | enum mlx4_res_tracker_free_type { |
162 | RES_TR_FREE_ALL, |
163 | RES_TR_FREE_SLAVES_ONLY, |
164 | RES_TR_FREE_STRUCTS_ONLY, |
165 | }; |
166 | |
167 | /* |
168 | *Virtual HCR structures. |
169 | * mlx4_vhcr is the sw representation, in machine endianness |
170 | * |
171 | * mlx4_vhcr_cmd is the formalized structure, the one that is passed |
172 | * to FW to go through communication channel. |
173 | * It is big endian, and has the same structure as the physical HCR |
174 | * used by command interface |
175 | */ |
176 | struct mlx4_vhcr { |
177 | u64 in_param; |
178 | u64 out_param; |
179 | u32 in_modifier; |
180 | u32 errno; |
181 | u16 op; |
182 | u16 token; |
183 | u8 op_modifier; |
184 | u8 e_bit; |
185 | }; |
186 | |
187 | struct mlx4_vhcr_cmd { |
188 | __be64 in_param; |
189 | __be32 in_modifier; |
190 | u32 reserved1; |
191 | __be64 out_param; |
192 | __be16 token; |
193 | u16 reserved; |
194 | u8 status; |
195 | u8 flags; |
196 | __be16 opcode; |
197 | }; |
198 | |
199 | struct mlx4_cmd_info { |
200 | u16 opcode; |
201 | bool has_inbox; |
202 | bool has_outbox; |
203 | bool out_is_imm; |
204 | bool encode_slave_id; |
205 | int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, |
206 | struct mlx4_cmd_mailbox *inbox); |
207 | int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, |
208 | struct mlx4_cmd_mailbox *inbox, |
209 | struct mlx4_cmd_mailbox *outbox, |
210 | struct mlx4_cmd_info *cmd); |
211 | }; |
212 | |
213 | #ifdef CONFIG_MLX4_DEBUG |
214 | extern int mlx4_debug_level; |
215 | #else /* CONFIG_MLX4_DEBUG */ |
216 | #define mlx4_debug_level (0) |
217 | #endif /* CONFIG_MLX4_DEBUG */ |
218 | |
219 | #define mlx4_dbg(mdev, format, ...) \ |
220 | do { \ |
221 | if (mlx4_debug_level) \ |
222 | dev_printk(KERN_DEBUG, \ |
223 | &(mdev)->persist->pdev->dev, format, \ |
224 | ##__VA_ARGS__); \ |
225 | } while (0) |
226 | |
227 | #define mlx4_err(mdev, format, ...) \ |
228 | dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) |
229 | #define mlx4_info(mdev, format, ...) \ |
230 | dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) |
231 | #define mlx4_warn(mdev, format, ...) \ |
232 | dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__) |
233 | |
234 | extern int log_mtts_per_seg; |
235 | extern int mlx4_internal_err_reset; |
236 | |
237 | #define MLX4_MAX_NUM_SLAVES (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \ |
238 | MLX4_MFUNC_MAX)) |
239 | #define ALL_SLAVES 0xff |
240 | |
241 | struct mlx4_bitmap { |
242 | u32 last; |
243 | u32 top; |
244 | u32 max; |
245 | u32 reserved_top; |
246 | u32 mask; |
247 | u32 avail; |
248 | u32 effective_len; |
249 | spinlock_t lock; |
250 | unsigned long *table; |
251 | }; |
252 | |
253 | struct mlx4_buddy { |
254 | unsigned long **bits; |
255 | unsigned int *num_free; |
256 | u32 max_order; |
257 | spinlock_t lock; |
258 | }; |
259 | |
260 | struct mlx4_icm; |
261 | |
262 | struct mlx4_icm_table { |
263 | u64 virt; |
264 | int num_icm; |
265 | u32 num_obj; |
266 | int obj_size; |
267 | int lowmem; |
268 | int coherent; |
269 | struct mutex mutex; |
270 | struct mlx4_icm **icm; |
271 | }; |
272 | |
273 | #define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28) |
274 | #define MLX4_MPT_FLAG_FREE (0x3UL << 28) |
275 | #define MLX4_MPT_FLAG_MIO (1 << 17) |
276 | #define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15) |
277 | #define MLX4_MPT_FLAG_PHYSICAL (1 << 9) |
278 | #define MLX4_MPT_FLAG_REGION (1 << 8) |
279 | |
280 | #define MLX4_MPT_PD_MASK (0x1FFFFUL) |
281 | #define MLX4_MPT_PD_VF_MASK (0xFE0000UL) |
282 | #define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27) |
283 | #define MLX4_MPT_PD_FLAG_RAE (1 << 28) |
284 | #define MLX4_MPT_PD_FLAG_EN_INV (3 << 24) |
285 | |
286 | #define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7) |
287 | |
288 | #define MLX4_MPT_STATUS_SW 0xF0 |
289 | #define MLX4_MPT_STATUS_HW 0x00 |
290 | |
291 | #define MLX4_CQE_SIZE_MASK_STRIDE 0x3 |
292 | #define MLX4_EQE_SIZE_MASK_STRIDE 0x30 |
293 | |
294 | #define MLX4_EQ_ASYNC 0 |
295 | #define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \ |
296 | !!((int)(vector) >= MLX4_EQ_ASYNC)) |
297 | #define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \ |
298 | !!((int)(vector) >= MLX4_EQ_ASYNC)) |
299 | |
300 | /* |
301 | * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. |
302 | */ |
303 | struct mlx4_mpt_entry { |
304 | __be32 flags; |
305 | __be32 qpn; |
306 | __be32 key; |
307 | __be32 pd_flags; |
308 | __be64 start; |
309 | __be64 length; |
310 | __be32 lkey; |
311 | __be32 win_cnt; |
312 | u8 reserved1[3]; |
313 | u8 mtt_rep; |
314 | __be64 mtt_addr; |
315 | __be32 mtt_sz; |
316 | __be32 entity_size; |
317 | __be32 first_byte_offset; |
318 | } __packed; |
319 | |
320 | /* |
321 | * Must be packed because start is 64 bits but only aligned to 32 bits. |
322 | */ |
323 | struct mlx4_eq_context { |
324 | __be32 flags; |
325 | u16 reserved1[3]; |
326 | __be16 page_offset; |
327 | u8 log_eq_size; |
328 | u8 reserved2[4]; |
329 | u8 eq_period; |
330 | u8 reserved3; |
331 | u8 eq_max_count; |
332 | u8 reserved4[3]; |
333 | u8 intr; |
334 | u8 log_page_size; |
335 | u8 reserved5[2]; |
336 | u8 mtt_base_addr_h; |
337 | __be32 mtt_base_addr_l; |
338 | u32 reserved6[2]; |
339 | __be32 consumer_index; |
340 | __be32 producer_index; |
341 | u32 reserved7[4]; |
342 | }; |
343 | |
344 | struct mlx4_cq_context { |
345 | __be32 flags; |
346 | u16 reserved1[3]; |
347 | __be16 page_offset; |
348 | __be32 logsize_usrpage; |
349 | __be16 cq_period; |
350 | __be16 cq_max_count; |
351 | u8 reserved2[3]; |
352 | u8 comp_eqn; |
353 | u8 log_page_size; |
354 | u8 reserved3[2]; |
355 | u8 mtt_base_addr_h; |
356 | __be32 mtt_base_addr_l; |
357 | __be32 last_notified_index; |
358 | __be32 solicit_producer_index; |
359 | __be32 consumer_index; |
360 | __be32 producer_index; |
361 | u32 reserved4[2]; |
362 | __be64 db_rec_addr; |
363 | }; |
364 | |
365 | struct mlx4_srq_context { |
366 | __be32 state_logsize_srqn; |
367 | u8 logstride; |
368 | u8 reserved1; |
369 | __be16 xrcd; |
370 | __be32 pg_offset_cqn; |
371 | u32 reserved2; |
372 | u8 log_page_size; |
373 | u8 reserved3[2]; |
374 | u8 mtt_base_addr_h; |
375 | __be32 mtt_base_addr_l; |
376 | __be32 pd; |
377 | __be16 limit_watermark; |
378 | __be16 wqe_cnt; |
379 | u16 reserved4; |
380 | __be16 wqe_counter; |
381 | u32 reserved5; |
382 | __be64 db_rec_addr; |
383 | }; |
384 | |
385 | struct mlx4_eq_tasklet { |
386 | struct list_head list; |
387 | struct list_head process_list; |
388 | struct tasklet_struct task; |
389 | /* lock on completion tasklet list */ |
390 | spinlock_t lock; |
391 | }; |
392 | |
393 | struct mlx4_eq { |
394 | struct mlx4_dev *dev; |
395 | void __iomem *doorbell; |
396 | int eqn; |
397 | u32 cons_index; |
398 | u16 irq; |
399 | u16 have_irq; |
400 | int nent; |
401 | struct mlx4_buf_list *page_list; |
402 | struct mlx4_mtt mtt; |
403 | struct mlx4_eq_tasklet tasklet_ctx; |
404 | struct mlx4_active_ports actv_ports; |
405 | u32 ref_count; |
406 | cpumask_var_t affinity_mask; |
407 | }; |
408 | |
409 | struct mlx4_slave_eqe { |
410 | u8 type; |
411 | u8 port; |
412 | u32 param; |
413 | }; |
414 | |
415 | struct mlx4_slave_event_eq_info { |
416 | int eqn; |
417 | u16 token; |
418 | }; |
419 | |
420 | struct mlx4_profile { |
421 | int num_qp; |
422 | int rdmarc_per_qp; |
423 | int num_srq; |
424 | int num_cq; |
425 | int num_mcg; |
426 | int num_mpt; |
427 | unsigned num_mtt; |
428 | }; |
429 | |
430 | struct mlx4_fw { |
431 | u64 clr_int_base; |
432 | u64 catas_offset; |
433 | u64 comm_base; |
434 | u64 clock_offset; |
435 | struct mlx4_icm *fw_icm; |
436 | struct mlx4_icm *aux_icm; |
437 | u32 catas_size; |
438 | u16 fw_pages; |
439 | u8 clr_int_bar; |
440 | u8 catas_bar; |
441 | u8 comm_bar; |
442 | u8 clock_bar; |
443 | }; |
444 | |
445 | struct mlx4_comm { |
446 | u32 slave_write; |
447 | u32 slave_read; |
448 | }; |
449 | |
450 | enum { |
451 | MLX4_MCAST_CONFIG = 0, |
452 | MLX4_MCAST_DISABLE = 1, |
453 | MLX4_MCAST_ENABLE = 2, |
454 | }; |
455 | |
456 | #define VLAN_FLTR_SIZE 128 |
457 | |
458 | struct mlx4_vlan_fltr { |
459 | __be32 entry[VLAN_FLTR_SIZE]; |
460 | }; |
461 | |
462 | struct mlx4_mcast_entry { |
463 | struct list_head list; |
464 | u64 addr; |
465 | }; |
466 | |
467 | struct mlx4_promisc_qp { |
468 | struct list_head list; |
469 | u32 qpn; |
470 | }; |
471 | |
472 | struct mlx4_steer_index { |
473 | struct list_head list; |
474 | unsigned int index; |
475 | struct list_head duplicates; |
476 | }; |
477 | |
478 | #define MLX4_EVENT_TYPES_NUM 64 |
479 | |
480 | struct mlx4_slave_state { |
481 | u8 comm_toggle; |
482 | u8 last_cmd; |
483 | u8 init_port_mask; |
484 | bool active; |
485 | bool old_vlan_api; |
486 | bool vst_qinq_supported; |
487 | u8 function; |
488 | dma_addr_t vhcr_dma; |
489 | u16 user_mtu[MLX4_MAX_PORTS + 1]; |
490 | u16 mtu[MLX4_MAX_PORTS + 1]; |
491 | __be32 ib_cap_mask[MLX4_MAX_PORTS + 1]; |
492 | struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES]; |
493 | struct list_head mcast_filters[MLX4_MAX_PORTS + 1]; |
494 | struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1]; |
495 | /* event type to eq number lookup */ |
496 | struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM]; |
497 | u16 eq_pi; |
498 | u16 eq_ci; |
499 | spinlock_t lock; |
500 | /*initialized via the kzalloc*/ |
501 | u8 is_slave_going_down; |
502 | u32 cookie; |
503 | enum slave_port_state port_state[MLX4_MAX_PORTS + 1]; |
504 | }; |
505 | |
506 | #define MLX4_VGT 4095 |
507 | #define NO_INDX (-1) |
508 | |
509 | struct mlx4_vport_state { |
510 | u64 mac; |
511 | u16 default_vlan; |
512 | u8 default_qos; |
513 | __be16 vlan_proto; |
514 | u32 tx_rate; |
515 | bool spoofchk; |
516 | u32 link_state; |
517 | u8 qos_vport; |
518 | __be64 guid; |
519 | }; |
520 | |
521 | struct mlx4_vf_admin_state { |
522 | struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1]; |
523 | u8 enable_smi[MLX4_MAX_PORTS + 1]; |
524 | }; |
525 | |
526 | struct mlx4_vport_oper_state { |
527 | struct mlx4_vport_state state; |
528 | int mac_idx; |
529 | int vlan_idx; |
530 | }; |
531 | |
532 | struct mlx4_vf_oper_state { |
533 | struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1]; |
534 | u8 smi_enabled[MLX4_MAX_PORTS + 1]; |
535 | }; |
536 | |
537 | struct slave_list { |
538 | struct mutex mutex; |
539 | struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; |
540 | }; |
541 | |
542 | struct resource_allocator { |
543 | spinlock_t alloc_lock; /* protect quotas */ |
544 | union { |
545 | unsigned int res_reserved; |
546 | unsigned int res_port_rsvd[MLX4_MAX_PORTS]; |
547 | }; |
548 | union { |
549 | int res_free; |
550 | int res_port_free[MLX4_MAX_PORTS]; |
551 | }; |
552 | int *quota; |
553 | int *allocated; |
554 | int *guaranteed; |
555 | }; |
556 | |
557 | struct mlx4_resource_tracker { |
558 | spinlock_t lock; |
559 | /* tree for each resources */ |
560 | struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; |
561 | /* num_of_slave's lists, one per slave */ |
562 | struct slave_list *slave_list; |
563 | struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE]; |
564 | }; |
565 | |
566 | #define SLAVE_EVENT_EQ_SIZE 128 |
567 | struct mlx4_slave_event_eq { |
568 | u32 eqn; |
569 | u32 cons; |
570 | u32 prod; |
571 | spinlock_t event_lock; |
572 | struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE]; |
573 | }; |
574 | |
575 | struct mlx4_qos_manager { |
576 | int num_of_qos_vfs; |
577 | DECLARE_BITMAP(priority_bm, MLX4_NUM_UP); |
578 | }; |
579 | |
580 | struct mlx4_master_qp0_state { |
581 | int proxy_qp0_active; |
582 | int qp0_active; |
583 | int port_active; |
584 | }; |
585 | |
586 | struct mlx4_mfunc_master_ctx { |
587 | struct mlx4_slave_state *slave_state; |
588 | struct mlx4_vf_admin_state *vf_admin; |
589 | struct mlx4_vf_oper_state *vf_oper; |
590 | struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; |
591 | int init_port_ref[MLX4_MAX_PORTS + 1]; |
592 | u16 max_mtu[MLX4_MAX_PORTS + 1]; |
593 | u16 max_user_mtu[MLX4_MAX_PORTS + 1]; |
594 | u8 pptx; |
595 | u8 pprx; |
596 | int disable_mcast_ref[MLX4_MAX_PORTS + 1]; |
597 | struct mlx4_resource_tracker res_tracker; |
598 | struct workqueue_struct *comm_wq; |
599 | struct work_struct comm_work; |
600 | struct work_struct slave_event_work; |
601 | struct work_struct slave_flr_event_work; |
602 | spinlock_t slave_state_lock; |
603 | __be32 comm_arm_bit_vector[4]; |
604 | struct mlx4_eqe cmd_eqe; |
605 | struct mlx4_slave_event_eq slave_eq; |
606 | struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX]; |
607 | struct mlx4_qos_manager qos_ctl[MLX4_MAX_PORTS + 1]; |
608 | u32 next_slave; /* mlx4_master_comm_channel */ |
609 | }; |
610 | |
611 | struct mlx4_mfunc { |
612 | struct mlx4_comm __iomem *comm; |
613 | struct mlx4_vhcr_cmd *vhcr; |
614 | dma_addr_t vhcr_dma; |
615 | |
616 | struct mlx4_mfunc_master_ctx master; |
617 | }; |
618 | |
619 | #define MGM_QPN_MASK 0x00FFFFFF |
620 | #define MGM_BLCK_LB_BIT 30 |
621 | |
622 | struct mlx4_mgm { |
623 | __be32 next_gid_index; |
624 | __be32 members_count; |
625 | u32 reserved[2]; |
626 | u8 gid[16]; |
627 | __be32 qp[MLX4_MAX_QP_PER_MGM]; |
628 | }; |
629 | |
630 | struct mlx4_cmd { |
631 | struct dma_pool *pool; |
632 | void __iomem *hcr; |
633 | struct mutex slave_cmd_mutex; |
634 | struct semaphore poll_sem; |
635 | struct semaphore event_sem; |
636 | struct rw_semaphore switch_sem; |
637 | int max_cmds; |
638 | spinlock_t context_lock; |
639 | int free_head; |
640 | struct mlx4_cmd_context *context; |
641 | u16 token_mask; |
642 | u8 use_events; |
643 | u8 toggle; |
644 | u8 comm_toggle; |
645 | u8 initialized; |
646 | }; |
647 | |
648 | enum { |
649 | MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0, |
650 | MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1, |
651 | MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2, |
652 | }; |
653 | struct mlx4_vf_immed_vlan_work { |
654 | struct work_struct work; |
655 | struct mlx4_priv *priv; |
656 | int flags; |
657 | int slave; |
658 | int vlan_ix; |
659 | int orig_vlan_ix; |
660 | u8 port; |
661 | u8 qos; |
662 | u8 qos_vport; |
663 | u16 vlan_id; |
664 | u16 orig_vlan_id; |
665 | __be16 vlan_proto; |
666 | }; |
667 | |
668 | |
669 | struct mlx4_uar_table { |
670 | struct mlx4_bitmap bitmap; |
671 | }; |
672 | |
673 | struct mlx4_mr_table { |
674 | struct mlx4_bitmap mpt_bitmap; |
675 | struct mlx4_buddy mtt_buddy; |
676 | u64 mtt_base; |
677 | u64 mpt_base; |
678 | struct mlx4_icm_table mtt_table; |
679 | struct mlx4_icm_table dmpt_table; |
680 | }; |
681 | |
682 | struct mlx4_cq_table { |
683 | struct mlx4_bitmap bitmap; |
684 | spinlock_t lock; |
685 | struct radix_tree_root tree; |
686 | struct mlx4_icm_table table; |
687 | struct mlx4_icm_table cmpt_table; |
688 | }; |
689 | |
690 | struct mlx4_eq_table { |
691 | struct mlx4_bitmap bitmap; |
692 | char *irq_names; |
693 | void __iomem *clr_int; |
694 | void __iomem **uar_map; |
695 | u32 clr_mask; |
696 | struct mlx4_eq *eq; |
697 | struct mlx4_icm_table table; |
698 | struct mlx4_icm_table cmpt_table; |
699 | int have_irq; |
700 | u8 inta_pin; |
701 | }; |
702 | |
703 | struct mlx4_srq_table { |
704 | struct mlx4_bitmap bitmap; |
705 | spinlock_t lock; |
706 | struct radix_tree_root tree; |
707 | struct mlx4_icm_table table; |
708 | struct mlx4_icm_table cmpt_table; |
709 | }; |
710 | |
711 | enum mlx4_qp_table_zones { |
712 | MLX4_QP_TABLE_ZONE_GENERAL, |
713 | , |
714 | MLX4_QP_TABLE_ZONE_RAW_ETH, |
715 | MLX4_QP_TABLE_ZONE_NUM |
716 | }; |
717 | |
718 | struct mlx4_qp_table { |
719 | struct mlx4_bitmap *bitmap_gen; |
720 | struct mlx4_zone_allocator *zones; |
721 | u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM]; |
722 | u32 rdmarc_base; |
723 | int rdmarc_shift; |
724 | spinlock_t lock; |
725 | struct mlx4_icm_table qp_table; |
726 | struct mlx4_icm_table auxc_table; |
727 | struct mlx4_icm_table altc_table; |
728 | struct mlx4_icm_table rdmarc_table; |
729 | struct mlx4_icm_table cmpt_table; |
730 | }; |
731 | |
732 | struct mlx4_mcg_table { |
733 | struct mutex mutex; |
734 | struct mlx4_bitmap bitmap; |
735 | struct mlx4_icm_table table; |
736 | }; |
737 | |
738 | struct mlx4_catas_err { |
739 | u32 __iomem *map; |
740 | struct timer_list timer; |
741 | struct list_head list; |
742 | }; |
743 | |
744 | #define MLX4_MAX_MAC_NUM 128 |
745 | #define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3) |
746 | |
747 | struct mlx4_mac_table { |
748 | __be64 entries[MLX4_MAX_MAC_NUM]; |
749 | int refs[MLX4_MAX_MAC_NUM]; |
750 | bool is_dup[MLX4_MAX_MAC_NUM]; |
751 | struct mutex mutex; |
752 | int total; |
753 | int max; |
754 | }; |
755 | |
756 | #define MLX4_ROCE_GID_ENTRY_SIZE 16 |
757 | |
758 | struct mlx4_roce_gid_entry { |
759 | u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; |
760 | }; |
761 | |
762 | struct mlx4_roce_gid_table { |
763 | struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; |
764 | struct mutex mutex; |
765 | }; |
766 | |
767 | #define MLX4_MAX_VLAN_NUM 128 |
768 | #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2) |
769 | |
770 | struct mlx4_vlan_table { |
771 | __be32 entries[MLX4_MAX_VLAN_NUM]; |
772 | int refs[MLX4_MAX_VLAN_NUM]; |
773 | int is_dup[MLX4_MAX_VLAN_NUM]; |
774 | struct mutex mutex; |
775 | int total; |
776 | int max; |
777 | }; |
778 | |
779 | #define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK | \ |
780 | MLX4_FLAG_V_PPRX_MASK | \ |
781 | MLX4_FLAG_V_PPTX_MASK) |
782 | #define SET_PORT_PROMISC_SHIFT 31 |
783 | #define SET_PORT_MC_PROMISC_SHIFT 30 |
784 | |
785 | enum { |
786 | MCAST_DIRECT_ONLY = 0, |
787 | MCAST_DIRECT = 1, |
788 | MCAST_DEFAULT = 2 |
789 | }; |
790 | |
791 | |
792 | struct mlx4_set_port_general_context { |
793 | u16 reserved1; |
794 | u8 flags2; |
795 | u8 flags; |
796 | union { |
797 | u8 ignore_fcs; |
798 | u8 roce_mode; |
799 | }; |
800 | u8 reserved2; |
801 | __be16 mtu; |
802 | u8 pptx; |
803 | u8 pfctx; |
804 | u16 reserved3; |
805 | u8 pprx; |
806 | u8 pfcrx; |
807 | u16 reserved4; |
808 | u32 reserved5; |
809 | u8 phv_en; |
810 | u8 reserved6[5]; |
811 | __be16 user_mtu; |
812 | u16 reserved7; |
813 | u8 user_mac[6]; |
814 | }; |
815 | |
816 | struct mlx4_set_port_rqp_calc_context { |
817 | __be32 base_qpn; |
818 | u8 rererved; |
819 | u8 n_mac; |
820 | u8 n_vlan; |
821 | u8 n_prio; |
822 | u8 reserved2[3]; |
823 | u8 mac_miss; |
824 | u8 intra_no_vlan; |
825 | u8 no_vlan; |
826 | u8 intra_vlan_miss; |
827 | u8 vlan_miss; |
828 | u8 reserved3[3]; |
829 | u8 no_vlan_prio; |
830 | __be32 promisc; |
831 | __be32 mcast; |
832 | }; |
833 | |
834 | struct mlx4_port_info { |
835 | struct mlx4_dev *dev; |
836 | int port; |
837 | char dev_name[16]; |
838 | struct device_attribute port_attr; |
839 | enum mlx4_port_type tmp_type; |
840 | char dev_mtu_name[16]; |
841 | struct device_attribute port_mtu_attr; |
842 | struct mlx4_mac_table mac_table; |
843 | struct mlx4_vlan_table vlan_table; |
844 | struct mlx4_roce_gid_table gid_table; |
845 | int base_qpn; |
846 | struct cpu_rmap *rmap; |
847 | struct devlink_port devlink_port; |
848 | }; |
849 | |
850 | struct mlx4_sense { |
851 | struct mlx4_dev *dev; |
852 | u8 do_sense_port[MLX4_MAX_PORTS + 1]; |
853 | u8 sense_allowed[MLX4_MAX_PORTS + 1]; |
854 | struct delayed_work sense_poll; |
855 | }; |
856 | |
857 | struct mlx4_msix_ctl { |
858 | DECLARE_BITMAP(pool_bm, MAX_MSIX); |
859 | struct mutex pool_lock; |
860 | }; |
861 | |
862 | struct mlx4_steer { |
863 | struct list_head promisc_qps[MLX4_NUM_STEERS]; |
864 | struct list_head steer_entries[MLX4_NUM_STEERS]; |
865 | }; |
866 | |
867 | struct mlx4_port_map { |
868 | u8 port1; |
869 | u8 port2; |
870 | }; |
871 | |
872 | enum { |
873 | MLX4_PCI_DEV_IS_VF = 1 << 0, |
874 | MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, |
875 | }; |
876 | |
877 | enum { |
878 | MLX4_NO_RR = 0, |
879 | MLX4_USE_RR = 1, |
880 | }; |
881 | |
882 | struct mlx4_priv { |
883 | struct mlx4_dev dev; |
884 | |
885 | struct mlx4_adev **adev; |
886 | int adev_idx; |
887 | struct atomic_notifier_head event_nh; |
888 | |
889 | int pci_dev_data; |
890 | int removed; |
891 | |
892 | struct list_head pgdir_list; |
893 | struct mutex pgdir_mutex; |
894 | |
895 | struct mlx4_fw fw; |
896 | struct mlx4_cmd cmd; |
897 | struct mlx4_mfunc mfunc; |
898 | |
899 | struct mlx4_bitmap pd_bitmap; |
900 | struct mlx4_bitmap xrcd_bitmap; |
901 | struct mlx4_uar_table uar_table; |
902 | struct mlx4_mr_table mr_table; |
903 | struct mlx4_cq_table cq_table; |
904 | struct mlx4_eq_table eq_table; |
905 | struct mlx4_srq_table srq_table; |
906 | struct mlx4_qp_table qp_table; |
907 | struct mlx4_mcg_table mcg_table; |
908 | struct mlx4_bitmap counters_bitmap; |
909 | int def_counter[MLX4_MAX_PORTS]; |
910 | |
911 | struct mlx4_catas_err catas_err; |
912 | |
913 | void __iomem *clr_base; |
914 | |
915 | struct mlx4_uar driver_uar; |
916 | void __iomem *kar; |
917 | struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; |
918 | struct mlx4_sense sense; |
919 | struct mutex port_mutex; |
920 | struct mlx4_msix_ctl msix_ctl; |
921 | struct mlx4_steer *steer; |
922 | struct list_head bf_list; |
923 | struct mutex bf_mutex; |
924 | struct io_mapping *bf_mapping; |
925 | void __iomem *clock_mapping; |
926 | int reserved_mtts; |
927 | int fs_hash_mode; |
928 | u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; |
929 | struct mlx4_port_map v2p; /* cached port mapping configuration */ |
930 | struct mutex bond_mutex; /* for bond mode */ |
931 | __be64 slave_node_guids[MLX4_MFUNC_MAX]; |
932 | |
933 | atomic_t opreq_count; |
934 | struct work_struct opreq_task; |
935 | }; |
936 | |
937 | static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) |
938 | { |
939 | return container_of(dev, struct mlx4_priv, dev); |
940 | } |
941 | |
942 | #define MLX4_SENSE_RANGE (HZ * 3) |
943 | |
944 | extern struct workqueue_struct *mlx4_wq; |
945 | |
946 | u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); |
947 | void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); |
948 | u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, |
949 | int align, u32 skip_mask); |
950 | void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, |
951 | int use_rr); |
952 | u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); |
953 | int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, |
954 | u32 reserved_bot, u32 resetrved_top); |
955 | void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); |
956 | |
957 | int mlx4_reset(struct mlx4_dev *dev); |
958 | |
959 | int mlx4_alloc_eq_table(struct mlx4_dev *dev); |
960 | void mlx4_free_eq_table(struct mlx4_dev *dev); |
961 | |
962 | int mlx4_init_pd_table(struct mlx4_dev *dev); |
963 | int mlx4_init_xrcd_table(struct mlx4_dev *dev); |
964 | int mlx4_init_uar_table(struct mlx4_dev *dev); |
965 | int mlx4_init_mr_table(struct mlx4_dev *dev); |
966 | int mlx4_init_eq_table(struct mlx4_dev *dev); |
967 | int mlx4_init_cq_table(struct mlx4_dev *dev); |
968 | int mlx4_init_qp_table(struct mlx4_dev *dev); |
969 | int mlx4_init_srq_table(struct mlx4_dev *dev); |
970 | int mlx4_init_mcg_table(struct mlx4_dev *dev); |
971 | |
972 | void mlx4_cleanup_pd_table(struct mlx4_dev *dev); |
973 | void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev); |
974 | void mlx4_cleanup_uar_table(struct mlx4_dev *dev); |
975 | void mlx4_cleanup_mr_table(struct mlx4_dev *dev); |
976 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev); |
977 | void mlx4_cleanup_cq_table(struct mlx4_dev *dev); |
978 | void mlx4_cleanup_qp_table(struct mlx4_dev *dev); |
979 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); |
980 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); |
981 | int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn); |
982 | void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); |
983 | int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); |
984 | void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); |
985 | int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); |
986 | void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); |
987 | int __mlx4_mpt_reserve(struct mlx4_dev *dev); |
988 | void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); |
989 | int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index); |
990 | void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); |
991 | u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); |
992 | void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); |
993 | |
994 | int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave, |
995 | struct mlx4_vhcr *vhcr, |
996 | struct mlx4_cmd_mailbox *inbox, |
997 | struct mlx4_cmd_mailbox *outbox, |
998 | struct mlx4_cmd_info *cmd); |
999 | int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave, |
1000 | struct mlx4_vhcr *vhcr, |
1001 | struct mlx4_cmd_mailbox *inbox, |
1002 | struct mlx4_cmd_mailbox *outbox, |
1003 | struct mlx4_cmd_info *cmd); |
1004 | int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, |
1005 | struct mlx4_vhcr *vhcr, |
1006 | struct mlx4_cmd_mailbox *inbox, |
1007 | struct mlx4_cmd_mailbox *outbox, |
1008 | struct mlx4_cmd_info *cmd); |
1009 | int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave, |
1010 | struct mlx4_vhcr *vhcr, |
1011 | struct mlx4_cmd_mailbox *inbox, |
1012 | struct mlx4_cmd_mailbox *outbox, |
1013 | struct mlx4_cmd_info *cmd); |
1014 | int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave, |
1015 | struct mlx4_vhcr *vhcr, |
1016 | struct mlx4_cmd_mailbox *inbox, |
1017 | struct mlx4_cmd_mailbox *outbox, |
1018 | struct mlx4_cmd_info *cmd); |
1019 | int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave, |
1020 | struct mlx4_vhcr *vhcr, |
1021 | struct mlx4_cmd_mailbox *inbox, |
1022 | struct mlx4_cmd_mailbox *outbox, |
1023 | struct mlx4_cmd_info *cmd); |
1024 | int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave, |
1025 | struct mlx4_vhcr *vhcr, |
1026 | struct mlx4_cmd_mailbox *inbox, |
1027 | struct mlx4_cmd_mailbox *outbox, |
1028 | struct mlx4_cmd_info *cmd); |
1029 | int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, |
1030 | struct mlx4_vhcr *vhcr, |
1031 | struct mlx4_cmd_mailbox *inbox, |
1032 | struct mlx4_cmd_mailbox *outbox, |
1033 | struct mlx4_cmd_info *cmd); |
1034 | int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, |
1035 | int *base, u8 flags); |
1036 | void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); |
1037 | int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); |
1038 | void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); |
1039 | int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
1040 | int start_index, int npages, u64 *page_list); |
1041 | int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); |
1042 | void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx); |
1043 | int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port, |
1044 | struct mlx4_counter *data); |
1045 | int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn); |
1046 | void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn); |
1047 | |
1048 | void mlx4_start_catas_poll(struct mlx4_dev *dev); |
1049 | void mlx4_stop_catas_poll(struct mlx4_dev *dev); |
1050 | int mlx4_catas_init(struct mlx4_dev *dev); |
1051 | void mlx4_catas_end(struct mlx4_dev *dev); |
1052 | int mlx4_crdump_init(struct mlx4_dev *dev); |
1053 | void mlx4_crdump_end(struct mlx4_dev *dev); |
1054 | int mlx4_restart_one(struct pci_dev *pdev); |
1055 | |
1056 | int mlx4_adev_init(struct mlx4_dev *dev); |
1057 | void mlx4_adev_cleanup(struct mlx4_dev *dev); |
1058 | int mlx4_register_device(struct mlx4_dev *dev); |
1059 | void mlx4_unregister_device(struct mlx4_dev *dev); |
1060 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, |
1061 | void *param); |
1062 | |
1063 | struct mlx4_dev_cap; |
1064 | struct mlx4_init_hca_param; |
1065 | |
1066 | u64 mlx4_make_profile(struct mlx4_dev *dev, |
1067 | struct mlx4_profile *request, |
1068 | struct mlx4_dev_cap *dev_cap, |
1069 | struct mlx4_init_hca_param *init_hca); |
1070 | void mlx4_master_comm_channel(struct work_struct *work); |
1071 | void mlx4_gen_slave_eqe(struct work_struct *work); |
1072 | void mlx4_master_handle_slave_flr(struct work_struct *work); |
1073 | |
1074 | int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave, |
1075 | struct mlx4_vhcr *vhcr, |
1076 | struct mlx4_cmd_mailbox *inbox, |
1077 | struct mlx4_cmd_mailbox *outbox, |
1078 | struct mlx4_cmd_info *cmd); |
1079 | int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave, |
1080 | struct mlx4_vhcr *vhcr, |
1081 | struct mlx4_cmd_mailbox *inbox, |
1082 | struct mlx4_cmd_mailbox *outbox, |
1083 | struct mlx4_cmd_info *cmd); |
1084 | int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, |
1085 | struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, |
1086 | struct mlx4_cmd_mailbox *outbox, |
1087 | struct mlx4_cmd_info *cmd); |
1088 | int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave, |
1089 | struct mlx4_vhcr *vhcr, |
1090 | struct mlx4_cmd_mailbox *inbox, |
1091 | struct mlx4_cmd_mailbox *outbox, |
1092 | struct mlx4_cmd_info *cmd); |
1093 | int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave, |
1094 | struct mlx4_vhcr *vhcr, |
1095 | struct mlx4_cmd_mailbox *inbox, |
1096 | struct mlx4_cmd_mailbox *outbox, |
1097 | struct mlx4_cmd_info *cmd); |
1098 | int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave, |
1099 | struct mlx4_vhcr *vhcr, |
1100 | struct mlx4_cmd_mailbox *inbox, |
1101 | struct mlx4_cmd_mailbox *outbox, |
1102 | struct mlx4_cmd_info *cmd); |
1103 | int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave, |
1104 | struct mlx4_vhcr *vhcr, |
1105 | struct mlx4_cmd_mailbox *inbox, |
1106 | struct mlx4_cmd_mailbox *outbox, |
1107 | struct mlx4_cmd_info *cmd); |
1108 | int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave, |
1109 | struct mlx4_vhcr *vhcr, |
1110 | struct mlx4_cmd_mailbox *inbox, |
1111 | struct mlx4_cmd_mailbox *outbox, |
1112 | struct mlx4_cmd_info *cmd); |
1113 | int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave, |
1114 | struct mlx4_vhcr *vhcr, |
1115 | struct mlx4_cmd_mailbox *inbox, |
1116 | struct mlx4_cmd_mailbox *outbox, |
1117 | struct mlx4_cmd_info *cmd); |
1118 | int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave, |
1119 | struct mlx4_vhcr *vhcr, |
1120 | struct mlx4_cmd_mailbox *inbox, |
1121 | struct mlx4_cmd_mailbox *outbox, |
1122 | struct mlx4_cmd_info *cmd); |
1123 | int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
1124 | struct mlx4_vhcr *vhcr, |
1125 | struct mlx4_cmd_mailbox *inbox, |
1126 | struct mlx4_cmd_mailbox *outbox, |
1127 | struct mlx4_cmd_info *cmd); |
1128 | int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
1129 | struct mlx4_vhcr *vhcr, |
1130 | struct mlx4_cmd_mailbox *inbox, |
1131 | struct mlx4_cmd_mailbox *outbox, |
1132 | struct mlx4_cmd_info *cmd); |
1133 | int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
1134 | struct mlx4_vhcr *vhcr, |
1135 | struct mlx4_cmd_mailbox *inbox, |
1136 | struct mlx4_cmd_mailbox *outbox, |
1137 | struct mlx4_cmd_info *cmd); |
1138 | int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave, |
1139 | struct mlx4_vhcr *vhcr, |
1140 | struct mlx4_cmd_mailbox *inbox, |
1141 | struct mlx4_cmd_mailbox *outbox, |
1142 | struct mlx4_cmd_info *cmd); |
1143 | int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave, |
1144 | struct mlx4_vhcr *vhcr, |
1145 | struct mlx4_cmd_mailbox *inbox, |
1146 | struct mlx4_cmd_mailbox *outbox, |
1147 | struct mlx4_cmd_info *cmd); |
1148 | int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, |
1149 | struct mlx4_vhcr *vhcr, |
1150 | struct mlx4_cmd_mailbox *inbox, |
1151 | struct mlx4_cmd_mailbox *outbox, |
1152 | struct mlx4_cmd_info *cmd); |
1153 | int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, |
1154 | struct mlx4_vhcr *vhcr, |
1155 | struct mlx4_cmd_mailbox *inbox, |
1156 | struct mlx4_cmd_mailbox *outbox, |
1157 | struct mlx4_cmd_info *cmd); |
1158 | int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, |
1159 | struct mlx4_vhcr *vhcr, |
1160 | struct mlx4_cmd_mailbox *inbox, |
1161 | struct mlx4_cmd_mailbox *outbox, |
1162 | struct mlx4_cmd_info *cmd); |
1163 | int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
1164 | struct mlx4_vhcr *vhcr, |
1165 | struct mlx4_cmd_mailbox *inbox, |
1166 | struct mlx4_cmd_mailbox *outbox, |
1167 | struct mlx4_cmd_info *cmd); |
1168 | int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
1169 | struct mlx4_vhcr *vhcr, |
1170 | struct mlx4_cmd_mailbox *inbox, |
1171 | struct mlx4_cmd_mailbox *outbox, |
1172 | struct mlx4_cmd_info *cmd); |
1173 | int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
1174 | struct mlx4_vhcr *vhcr, |
1175 | struct mlx4_cmd_mailbox *inbox, |
1176 | struct mlx4_cmd_mailbox *outbox, |
1177 | struct mlx4_cmd_info *cmd); |
1178 | int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave, |
1179 | struct mlx4_vhcr *vhcr, |
1180 | struct mlx4_cmd_mailbox *inbox, |
1181 | struct mlx4_cmd_mailbox *outbox, |
1182 | struct mlx4_cmd_info *cmd); |
1183 | int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, |
1184 | struct mlx4_vhcr *vhcr, |
1185 | struct mlx4_cmd_mailbox *inbox, |
1186 | struct mlx4_cmd_mailbox *outbox, |
1187 | struct mlx4_cmd_info *cmd); |
1188 | int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave, |
1189 | struct mlx4_vhcr *vhcr, |
1190 | struct mlx4_cmd_mailbox *inbox, |
1191 | struct mlx4_cmd_mailbox *outbox, |
1192 | struct mlx4_cmd_info *cmd); |
1193 | int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave, |
1194 | struct mlx4_vhcr *vhcr, |
1195 | struct mlx4_cmd_mailbox *inbox, |
1196 | struct mlx4_cmd_mailbox *outbox, |
1197 | struct mlx4_cmd_info *cmd); |
1198 | int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave, |
1199 | struct mlx4_vhcr *vhcr, |
1200 | struct mlx4_cmd_mailbox *inbox, |
1201 | struct mlx4_cmd_mailbox *outbox, |
1202 | struct mlx4_cmd_info *cmd); |
1203 | int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave, |
1204 | struct mlx4_vhcr *vhcr, |
1205 | struct mlx4_cmd_mailbox *inbox, |
1206 | struct mlx4_cmd_mailbox *outbox, |
1207 | struct mlx4_cmd_info *cmd); |
1208 | |
1209 | int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); |
1210 | |
1211 | enum { |
1212 | MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, |
1213 | MLX4_CMD_CLEANUP_POOL = 1UL << 1, |
1214 | MLX4_CMD_CLEANUP_HCR = 1UL << 2, |
1215 | MLX4_CMD_CLEANUP_VHCR = 1UL << 3, |
1216 | MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 |
1217 | }; |
1218 | |
1219 | int mlx4_cmd_init(struct mlx4_dev *dev); |
1220 | void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); |
1221 | int mlx4_multi_func_init(struct mlx4_dev *dev); |
1222 | int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev); |
1223 | void mlx4_multi_func_cleanup(struct mlx4_dev *dev); |
1224 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); |
1225 | int mlx4_cmd_use_events(struct mlx4_dev *dev); |
1226 | void mlx4_cmd_use_polling(struct mlx4_dev *dev); |
1227 | |
1228 | int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, |
1229 | u16 op, unsigned long timeout); |
1230 | |
1231 | void mlx4_cq_tasklet_cb(struct tasklet_struct *t); |
1232 | void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); |
1233 | void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); |
1234 | |
1235 | void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); |
1236 | |
1237 | void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); |
1238 | |
1239 | void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); |
1240 | int mlx4_comm_internal_err(u32 slave_read); |
1241 | |
1242 | int mlx4_crdump_collect(struct mlx4_dev *dev); |
1243 | |
1244 | int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, |
1245 | enum mlx4_port_type *type); |
1246 | void mlx4_do_sense_ports(struct mlx4_dev *dev, |
1247 | enum mlx4_port_type *stype, |
1248 | enum mlx4_port_type *defaults); |
1249 | void mlx4_start_sense(struct mlx4_dev *dev); |
1250 | void mlx4_stop_sense(struct mlx4_dev *dev); |
1251 | void mlx4_sense_init(struct mlx4_dev *dev); |
1252 | int mlx4_check_port_params(struct mlx4_dev *dev, |
1253 | enum mlx4_port_type *port_type); |
1254 | int mlx4_change_port_types(struct mlx4_dev *dev, |
1255 | enum mlx4_port_type *port_types); |
1256 | |
1257 | void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); |
1258 | void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); |
1259 | void mlx4_init_roce_gid_table(struct mlx4_dev *dev, |
1260 | struct mlx4_roce_gid_table *table); |
1261 | void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); |
1262 | int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); |
1263 | int mlx4_bond_vlan_table(struct mlx4_dev *dev); |
1264 | int mlx4_unbond_vlan_table(struct mlx4_dev *dev); |
1265 | int mlx4_bond_mac_table(struct mlx4_dev *dev); |
1266 | int mlx4_unbond_mac_table(struct mlx4_dev *dev); |
1267 | |
1268 | int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); |
1269 | /* resource tracker functions*/ |
1270 | int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, |
1271 | enum mlx4_resource resource_type, |
1272 | u64 resource_id, int *slave); |
1273 | void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); |
1274 | void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); |
1275 | int mlx4_init_resource_tracker(struct mlx4_dev *dev); |
1276 | |
1277 | void mlx4_free_resource_tracker(struct mlx4_dev *dev, |
1278 | enum mlx4_res_tracker_free_type type); |
1279 | |
1280 | int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, |
1281 | struct mlx4_vhcr *vhcr, |
1282 | struct mlx4_cmd_mailbox *inbox, |
1283 | struct mlx4_cmd_mailbox *outbox, |
1284 | struct mlx4_cmd_info *cmd); |
1285 | int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, |
1286 | struct mlx4_vhcr *vhcr, |
1287 | struct mlx4_cmd_mailbox *inbox, |
1288 | struct mlx4_cmd_mailbox *outbox, |
1289 | struct mlx4_cmd_info *cmd); |
1290 | int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave, |
1291 | struct mlx4_vhcr *vhcr, |
1292 | struct mlx4_cmd_mailbox *inbox, |
1293 | struct mlx4_cmd_mailbox *outbox, |
1294 | struct mlx4_cmd_info *cmd); |
1295 | int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, |
1296 | struct mlx4_vhcr *vhcr, |
1297 | struct mlx4_cmd_mailbox *inbox, |
1298 | struct mlx4_cmd_mailbox *outbox, |
1299 | struct mlx4_cmd_info *cmd); |
1300 | int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, |
1301 | struct mlx4_vhcr *vhcr, |
1302 | struct mlx4_cmd_mailbox *inbox, |
1303 | struct mlx4_cmd_mailbox *outbox, |
1304 | struct mlx4_cmd_info *cmd); |
1305 | int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, |
1306 | struct mlx4_vhcr *vhcr, |
1307 | struct mlx4_cmd_mailbox *inbox, |
1308 | struct mlx4_cmd_mailbox *outbox, |
1309 | struct mlx4_cmd_info *cmd); |
1310 | int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); |
1311 | |
1312 | int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port, |
1313 | int *gid_tbl_len, int *pkey_tbl_len); |
1314 | |
1315 | int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, |
1316 | struct mlx4_vhcr *vhcr, |
1317 | struct mlx4_cmd_mailbox *inbox, |
1318 | struct mlx4_cmd_mailbox *outbox, |
1319 | struct mlx4_cmd_info *cmd); |
1320 | |
1321 | int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, |
1322 | struct mlx4_vhcr *vhcr, |
1323 | struct mlx4_cmd_mailbox *inbox, |
1324 | struct mlx4_cmd_mailbox *outbox, |
1325 | struct mlx4_cmd_info *cmd); |
1326 | |
1327 | int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, |
1328 | struct mlx4_vhcr *vhcr, |
1329 | struct mlx4_cmd_mailbox *inbox, |
1330 | struct mlx4_cmd_mailbox *outbox, |
1331 | struct mlx4_cmd_info *cmd); |
1332 | int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
1333 | enum mlx4_protocol prot, enum mlx4_steer_type steer); |
1334 | int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], |
1335 | int block_mcast_loopback, enum mlx4_protocol prot, |
1336 | enum mlx4_steer_type steer); |
1337 | int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, |
1338 | u8 gid[16], u8 port, |
1339 | int block_mcast_loopback, |
1340 | enum mlx4_protocol prot, u64 *reg_id); |
1341 | int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, |
1342 | struct mlx4_vhcr *vhcr, |
1343 | struct mlx4_cmd_mailbox *inbox, |
1344 | struct mlx4_cmd_mailbox *outbox, |
1345 | struct mlx4_cmd_info *cmd); |
1346 | int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave, |
1347 | struct mlx4_vhcr *vhcr, |
1348 | struct mlx4_cmd_mailbox *inbox, |
1349 | struct mlx4_cmd_mailbox *outbox, |
1350 | struct mlx4_cmd_info *cmd); |
1351 | int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function, |
1352 | int port, void *buf); |
1353 | int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave, |
1354 | struct mlx4_vhcr *vhcr, |
1355 | struct mlx4_cmd_mailbox *inbox, |
1356 | struct mlx4_cmd_mailbox *outbox, |
1357 | struct mlx4_cmd_info *cmd); |
1358 | int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave, |
1359 | struct mlx4_vhcr *vhcr, |
1360 | struct mlx4_cmd_mailbox *inbox, |
1361 | struct mlx4_cmd_mailbox *outbox, |
1362 | struct mlx4_cmd_info *cmd); |
1363 | int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, |
1364 | struct mlx4_vhcr *vhcr, |
1365 | struct mlx4_cmd_mailbox *inbox, |
1366 | struct mlx4_cmd_mailbox *outbox, |
1367 | struct mlx4_cmd_info *cmd); |
1368 | int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, |
1369 | struct mlx4_vhcr *vhcr, |
1370 | struct mlx4_cmd_mailbox *inbox, |
1371 | struct mlx4_cmd_mailbox *outbox, |
1372 | struct mlx4_cmd_info *cmd); |
1373 | int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, |
1374 | struct mlx4_vhcr *vhcr, |
1375 | struct mlx4_cmd_mailbox *inbox, |
1376 | struct mlx4_cmd_mailbox *outbox, |
1377 | struct mlx4_cmd_info *cmd); |
1378 | int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave, |
1379 | struct mlx4_vhcr *vhcr, |
1380 | struct mlx4_cmd_mailbox *inbox, |
1381 | struct mlx4_cmd_mailbox *outbox, |
1382 | struct mlx4_cmd_info *cmd); |
1383 | |
1384 | int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); |
1385 | int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); |
1386 | |
1387 | static inline void set_param_l(u64 *arg, u32 val) |
1388 | { |
1389 | *arg = (*arg & 0xffffffff00000000ULL) | (u64) val; |
1390 | } |
1391 | |
1392 | static inline void set_param_h(u64 *arg, u32 val) |
1393 | { |
1394 | *arg = (*arg & 0xffffffff) | ((u64) val << 32); |
1395 | } |
1396 | |
1397 | static inline u32 get_param_l(u64 *arg) |
1398 | { |
1399 | return (u32) (*arg & 0xffffffff); |
1400 | } |
1401 | |
1402 | static inline u32 get_param_h(u64 *arg) |
1403 | { |
1404 | return (u32)(*arg >> 32); |
1405 | } |
1406 | |
1407 | static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev) |
1408 | { |
1409 | return &mlx4_priv(dev)->mfunc.master.res_tracker.lock; |
1410 | } |
1411 | |
1412 | #define NOT_MASKED_PD_BITS 17 |
1413 | |
1414 | void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); |
1415 | |
1416 | void mlx4_init_quotas(struct mlx4_dev *dev); |
1417 | |
1418 | /* for VFs, replace zero MACs with randomly-generated MACs at driver start */ |
1419 | void mlx4_replace_zero_macs(struct mlx4_dev *dev); |
1420 | int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); |
1421 | /* Returns the VF index of slave */ |
1422 | int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); |
1423 | int mlx4_config_mad_demux(struct mlx4_dev *dev); |
1424 | int mlx4_do_bond(struct mlx4_dev *dev, bool enable); |
1425 | int mlx4_bond_fs_rules(struct mlx4_dev *dev); |
1426 | int mlx4_unbond_fs_rules(struct mlx4_dev *dev); |
1427 | |
1428 | enum mlx4_zone_flags { |
1429 | MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, |
1430 | MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1, |
1431 | MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2, |
1432 | MLX4_ZONE_USE_RR = 1UL << 3, |
1433 | }; |
1434 | |
1435 | enum mlx4_zone_alloc_flags { |
1436 | /* No two objects could overlap between zones. UID |
1437 | * could be left unused. If this flag is given and |
1438 | * two overlapped zones are used, an object will be free'd |
1439 | * from the smallest possible matching zone. |
1440 | */ |
1441 | MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0, |
1442 | }; |
1443 | |
1444 | struct mlx4_zone_allocator; |
1445 | |
1446 | /* Create a new zone allocator */ |
1447 | struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags); |
1448 | |
1449 | /* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator |
1450 | * <zone_alloc>. Allocating an object from this zone adds an offset <offset>. |
1451 | * Similarly, when searching for an object to free, this offset it taken into |
1452 | * account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap> |
1453 | * is given through the MLX4_ZONE_USE_RR flag in <flags>. |
1454 | * When an allocation fails, <zone_alloc> tries to allocate from other zones |
1455 | * according to the policy set by <flags>. <puid> is the unique identifier |
1456 | * received to this zone. |
1457 | */ |
1458 | int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, |
1459 | struct mlx4_bitmap *bitmap, |
1460 | u32 flags, |
1461 | int priority, |
1462 | int offset, |
1463 | u32 *puid); |
1464 | |
1465 | /* Remove bitmap indicated by <uid> from <zone_alloc> */ |
1466 | int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid); |
1467 | |
1468 | /* Delete the zone allocator <zone_alloc. This function doesn't destroy |
1469 | * the attached bitmaps. |
1470 | */ |
1471 | void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc); |
1472 | |
1473 | /* Allocate <count> objects with align <align> and skip_mask <skip_mask> |
1474 | * from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually |
1475 | * allocated from is returned in <puid>. If the allocation fails, a negative |
1476 | * number is returned. Otherwise, the offset of the first object is returned. |
1477 | */ |
1478 | u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, |
1479 | int align, u32 skip_mask, u32 *puid); |
1480 | |
1481 | /* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator |
1482 | * <zones>. |
1483 | */ |
1484 | u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, |
1485 | u32 uid, u32 obj, u32 count); |
1486 | |
1487 | /* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of |
1488 | * specifying the uid when freeing an object, zone allocator could figure it by |
1489 | * itself. Other parameters are similar to mlx4_zone_free. |
1490 | */ |
1491 | u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count); |
1492 | |
1493 | /* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */ |
1494 | struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid); |
1495 | |
1496 | #endif /* MLX4_H */ |
1497 | |