1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 |
2 | /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */ |
3 | |
4 | #include <linux/kernel.h> |
5 | #include <linux/module.h> |
6 | #include <linux/export.h> |
7 | #include <linux/err.h> |
8 | #include <linux/device.h> |
9 | #include <linux/pci.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/wait.h> |
12 | #include <linux/types.h> |
13 | #include <linux/skbuff.h> |
14 | #include <linux/if_vlan.h> |
15 | #include <linux/log2.h> |
16 | #include <linux/string.h> |
17 | |
18 | #include "pci_hw.h" |
19 | #include "pci.h" |
20 | #include "core.h" |
21 | #include "cmd.h" |
22 | #include "port.h" |
23 | #include "resources.h" |
24 | |
25 | #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ |
26 | iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) |
27 | #define mlxsw_pci_read32(mlxsw_pci, reg) \ |
28 | ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) |
29 | |
30 | enum mlxsw_pci_queue_type { |
31 | MLXSW_PCI_QUEUE_TYPE_SDQ, |
32 | MLXSW_PCI_QUEUE_TYPE_RDQ, |
33 | MLXSW_PCI_QUEUE_TYPE_CQ, |
34 | MLXSW_PCI_QUEUE_TYPE_EQ, |
35 | }; |
36 | |
37 | #define MLXSW_PCI_QUEUE_TYPE_COUNT 4 |
38 | |
39 | static const u16 mlxsw_pci_doorbell_type_offset[] = { |
40 | MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */ |
41 | MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */ |
42 | MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ |
43 | MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ |
44 | }; |
45 | |
46 | static const u16 mlxsw_pci_doorbell_arm_type_offset[] = { |
47 | 0, /* unused */ |
48 | 0, /* unused */ |
49 | MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ |
50 | MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ |
51 | }; |
52 | |
53 | struct mlxsw_pci_mem_item { |
54 | char *buf; |
55 | dma_addr_t mapaddr; |
56 | size_t size; |
57 | }; |
58 | |
59 | struct mlxsw_pci_queue_elem_info { |
60 | char *elem; /* pointer to actual dma mapped element mem chunk */ |
61 | union { |
62 | struct { |
63 | struct sk_buff *skb; |
64 | } sdq; |
65 | struct { |
66 | struct sk_buff *skb; |
67 | } rdq; |
68 | } u; |
69 | }; |
70 | |
71 | struct mlxsw_pci_queue { |
72 | spinlock_t lock; /* for queue accesses */ |
73 | struct mlxsw_pci_mem_item mem_item; |
74 | struct mlxsw_pci_queue_elem_info *elem_info; |
75 | u16 producer_counter; |
76 | u16 consumer_counter; |
77 | u16 count; /* number of elements in queue */ |
78 | u8 num; /* queue number */ |
79 | u8 elem_size; /* size of one element */ |
80 | enum mlxsw_pci_queue_type type; |
81 | struct tasklet_struct tasklet; /* queue processing tasklet */ |
82 | struct mlxsw_pci *pci; |
83 | union { |
84 | struct { |
85 | u32 comp_sdq_count; |
86 | u32 comp_rdq_count; |
87 | enum mlxsw_pci_cqe_v v; |
88 | } cq; |
89 | struct { |
90 | u32 ev_cmd_count; |
91 | u32 ev_comp_count; |
92 | u32 ev_other_count; |
93 | } eq; |
94 | } u; |
95 | }; |
96 | |
97 | struct mlxsw_pci_queue_type_group { |
98 | struct mlxsw_pci_queue *q; |
99 | u8 count; /* number of queues in group */ |
100 | }; |
101 | |
102 | struct mlxsw_pci { |
103 | struct pci_dev *pdev; |
104 | u8 __iomem *hw_addr; |
105 | u64 free_running_clock_offset; |
106 | u64 utc_sec_offset; |
107 | u64 utc_nsec_offset; |
108 | bool lag_mode_support; |
109 | enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode; |
110 | struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; |
111 | u32 doorbell_offset; |
112 | struct mlxsw_core *core; |
113 | struct { |
114 | struct mlxsw_pci_mem_item *items; |
115 | unsigned int count; |
116 | } fw_area; |
117 | struct { |
118 | struct mlxsw_pci_mem_item out_mbox; |
119 | struct mlxsw_pci_mem_item in_mbox; |
120 | struct mutex lock; /* Lock access to command registers */ |
121 | bool nopoll; |
122 | wait_queue_head_t wait; |
123 | bool wait_done; |
124 | struct { |
125 | u8 status; |
126 | u64 out_param; |
127 | } comp; |
128 | } cmd; |
129 | struct mlxsw_bus_info bus_info; |
130 | const struct pci_device_id *id; |
131 | enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */ |
132 | u8 num_sdq_cqs; /* Number of CQs used for SDQs */ |
133 | }; |
134 | |
135 | static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) |
136 | { |
137 | tasklet_schedule(t: &q->tasklet); |
138 | } |
139 | |
140 | static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, |
141 | size_t elem_size, int elem_index) |
142 | { |
143 | return q->mem_item.buf + (elem_size * elem_index); |
144 | } |
145 | |
146 | static struct mlxsw_pci_queue_elem_info * |
147 | mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) |
148 | { |
149 | return &q->elem_info[elem_index]; |
150 | } |
151 | |
152 | static struct mlxsw_pci_queue_elem_info * |
153 | mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) |
154 | { |
155 | int index = q->producer_counter & (q->count - 1); |
156 | |
157 | if ((u16) (q->producer_counter - q->consumer_counter) == q->count) |
158 | return NULL; |
159 | return mlxsw_pci_queue_elem_info_get(q, elem_index: index); |
160 | } |
161 | |
162 | static struct mlxsw_pci_queue_elem_info * |
163 | mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q) |
164 | { |
165 | int index = q->consumer_counter & (q->count - 1); |
166 | |
167 | return mlxsw_pci_queue_elem_info_get(q, elem_index: index); |
168 | } |
169 | |
170 | static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index) |
171 | { |
172 | return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem; |
173 | } |
174 | |
175 | static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) |
176 | { |
177 | return owner_bit != !!(q->consumer_counter & q->count); |
178 | } |
179 | |
180 | static struct mlxsw_pci_queue_type_group * |
181 | mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, |
182 | enum mlxsw_pci_queue_type q_type) |
183 | { |
184 | return &mlxsw_pci->queues[q_type]; |
185 | } |
186 | |
187 | static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci, |
188 | enum mlxsw_pci_queue_type q_type) |
189 | { |
190 | struct mlxsw_pci_queue_type_group *queue_group; |
191 | |
192 | queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type); |
193 | return queue_group->count; |
194 | } |
195 | |
196 | static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) |
197 | { |
198 | return __mlxsw_pci_queue_count(mlxsw_pci, q_type: MLXSW_PCI_QUEUE_TYPE_SDQ); |
199 | } |
200 | |
201 | static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) |
202 | { |
203 | return __mlxsw_pci_queue_count(mlxsw_pci, q_type: MLXSW_PCI_QUEUE_TYPE_CQ); |
204 | } |
205 | |
206 | static struct mlxsw_pci_queue * |
207 | __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, |
208 | enum mlxsw_pci_queue_type q_type, u8 q_num) |
209 | { |
210 | return &mlxsw_pci->queues[q_type].q[q_num]; |
211 | } |
212 | |
213 | static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci, |
214 | u8 q_num) |
215 | { |
216 | return __mlxsw_pci_queue_get(mlxsw_pci, |
217 | q_type: MLXSW_PCI_QUEUE_TYPE_SDQ, q_num); |
218 | } |
219 | |
220 | static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci, |
221 | u8 q_num) |
222 | { |
223 | return __mlxsw_pci_queue_get(mlxsw_pci, |
224 | q_type: MLXSW_PCI_QUEUE_TYPE_RDQ, q_num); |
225 | } |
226 | |
227 | static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci, |
228 | u8 q_num) |
229 | { |
230 | return __mlxsw_pci_queue_get(mlxsw_pci, q_type: MLXSW_PCI_QUEUE_TYPE_CQ, q_num); |
231 | } |
232 | |
233 | static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci, |
234 | u8 q_num) |
235 | { |
236 | return __mlxsw_pci_queue_get(mlxsw_pci, q_type: MLXSW_PCI_QUEUE_TYPE_EQ, q_num); |
237 | } |
238 | |
239 | static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci, |
240 | struct mlxsw_pci_queue *q, |
241 | u16 val) |
242 | { |
243 | mlxsw_pci_write32(mlxsw_pci, |
244 | DOORBELL(mlxsw_pci->doorbell_offset, |
245 | mlxsw_pci_doorbell_type_offset[q->type], |
246 | q->num), val); |
247 | } |
248 | |
249 | static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci, |
250 | struct mlxsw_pci_queue *q, |
251 | u16 val) |
252 | { |
253 | mlxsw_pci_write32(mlxsw_pci, |
254 | DOORBELL(mlxsw_pci->doorbell_offset, |
255 | mlxsw_pci_doorbell_arm_type_offset[q->type], |
256 | q->num), val); |
257 | } |
258 | |
259 | static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci, |
260 | struct mlxsw_pci_queue *q) |
261 | { |
262 | wmb(); /* ensure all writes are done before we ring a bell */ |
263 | __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, val: q->producer_counter); |
264 | } |
265 | |
266 | static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci, |
267 | struct mlxsw_pci_queue *q) |
268 | { |
269 | wmb(); /* ensure all writes are done before we ring a bell */ |
270 | __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, |
271 | val: q->consumer_counter + q->count); |
272 | } |
273 | |
274 | static void |
275 | mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci, |
276 | struct mlxsw_pci_queue *q) |
277 | { |
278 | wmb(); /* ensure all writes are done before we ring a bell */ |
279 | __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, val: q->consumer_counter); |
280 | } |
281 | |
282 | static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, |
283 | int page_index) |
284 | { |
285 | return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index; |
286 | } |
287 | |
288 | static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, |
289 | struct mlxsw_pci_queue *q) |
290 | { |
291 | int tclass; |
292 | int lp; |
293 | int i; |
294 | int err; |
295 | |
296 | q->producer_counter = 0; |
297 | q->consumer_counter = 0; |
298 | tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC : |
299 | MLXSW_PCI_SDQ_CTL_TC; |
300 | lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE : |
301 | MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE; |
302 | |
303 | /* Set CQ of same number of this SDQ. */ |
304 | mlxsw_cmd_mbox_sw2hw_dq_cq_set(buf: mbox, val: q->num); |
305 | mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(buf: mbox, val: lp); |
306 | mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(buf: mbox, val: tclass); |
307 | mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(buf: mbox, val: 3); /* 8 pages */ |
308 | for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { |
309 | dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, page_index: i); |
310 | |
311 | mlxsw_cmd_mbox_sw2hw_dq_pa_set(buf: mbox, index: i, val: mapaddr); |
312 | } |
313 | |
314 | err = mlxsw_cmd_sw2hw_sdq(mlxsw_core: mlxsw_pci->core, in_mbox: mbox, dq_number: q->num); |
315 | if (err) |
316 | return err; |
317 | mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); |
318 | return 0; |
319 | } |
320 | |
321 | static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, |
322 | struct mlxsw_pci_queue *q) |
323 | { |
324 | mlxsw_cmd_hw2sw_sdq(mlxsw_core: mlxsw_pci->core, dq_number: q->num); |
325 | } |
326 | |
327 | static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, |
328 | int index, char *frag_data, size_t frag_len, |
329 | int direction) |
330 | { |
331 | struct pci_dev *pdev = mlxsw_pci->pdev; |
332 | dma_addr_t mapaddr; |
333 | |
334 | mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction); |
335 | if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) { |
336 | dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n" ); |
337 | return -EIO; |
338 | } |
339 | mlxsw_pci_wqe_address_set(buf: wqe, index, val: mapaddr); |
340 | mlxsw_pci_wqe_byte_count_set(buf: wqe, index, val: frag_len); |
341 | return 0; |
342 | } |
343 | |
344 | static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, |
345 | int index, int direction) |
346 | { |
347 | struct pci_dev *pdev = mlxsw_pci->pdev; |
348 | size_t frag_len = mlxsw_pci_wqe_byte_count_get(buf: wqe, index); |
349 | dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(buf: wqe, index); |
350 | |
351 | if (!frag_len) |
352 | return; |
353 | dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction); |
354 | } |
355 | |
356 | static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, |
357 | struct mlxsw_pci_queue_elem_info *elem_info, |
358 | gfp_t gfp) |
359 | { |
360 | size_t buf_len = MLXSW_PORT_MAX_MTU; |
361 | char *wqe = elem_info->elem; |
362 | struct sk_buff *skb; |
363 | int err; |
364 | |
365 | skb = __netdev_alloc_skb_ip_align(NULL, length: buf_len, gfp); |
366 | if (!skb) |
367 | return -ENOMEM; |
368 | |
369 | err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, index: 0, frag_data: skb->data, |
370 | frag_len: buf_len, direction: DMA_FROM_DEVICE); |
371 | if (err) |
372 | goto err_frag_map; |
373 | |
374 | elem_info->u.rdq.skb = skb; |
375 | return 0; |
376 | |
377 | err_frag_map: |
378 | dev_kfree_skb_any(skb); |
379 | return err; |
380 | } |
381 | |
382 | static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci, |
383 | struct mlxsw_pci_queue_elem_info *elem_info) |
384 | { |
385 | struct sk_buff *skb; |
386 | char *wqe; |
387 | |
388 | skb = elem_info->u.rdq.skb; |
389 | wqe = elem_info->elem; |
390 | |
391 | mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, index: 0, direction: DMA_FROM_DEVICE); |
392 | dev_kfree_skb_any(skb); |
393 | } |
394 | |
395 | static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, |
396 | struct mlxsw_pci_queue *q) |
397 | { |
398 | struct mlxsw_pci_queue_elem_info *elem_info; |
399 | u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci); |
400 | int i; |
401 | int err; |
402 | |
403 | q->producer_counter = 0; |
404 | q->consumer_counter = 0; |
405 | |
406 | /* Set CQ of same number of this RDQ with base |
407 | * above SDQ count as the lower ones are assigned to SDQs. |
408 | */ |
409 | mlxsw_cmd_mbox_sw2hw_dq_cq_set(buf: mbox, val: sdq_count + q->num); |
410 | mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(buf: mbox, val: 3); /* 8 pages */ |
411 | for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { |
412 | dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, page_index: i); |
413 | |
414 | mlxsw_cmd_mbox_sw2hw_dq_pa_set(buf: mbox, index: i, val: mapaddr); |
415 | } |
416 | |
417 | err = mlxsw_cmd_sw2hw_rdq(mlxsw_core: mlxsw_pci->core, in_mbox: mbox, dq_number: q->num); |
418 | if (err) |
419 | return err; |
420 | |
421 | mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); |
422 | |
423 | for (i = 0; i < q->count; i++) { |
424 | elem_info = mlxsw_pci_queue_elem_info_producer_get(q); |
425 | BUG_ON(!elem_info); |
426 | err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL); |
427 | if (err) |
428 | goto rollback; |
429 | /* Everything is set up, ring doorbell to pass elem to HW */ |
430 | q->producer_counter++; |
431 | mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); |
432 | } |
433 | |
434 | return 0; |
435 | |
436 | rollback: |
437 | for (i--; i >= 0; i--) { |
438 | elem_info = mlxsw_pci_queue_elem_info_get(q, elem_index: i); |
439 | mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); |
440 | } |
441 | mlxsw_cmd_hw2sw_rdq(mlxsw_core: mlxsw_pci->core, dq_number: q->num); |
442 | |
443 | return err; |
444 | } |
445 | |
446 | static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, |
447 | struct mlxsw_pci_queue *q) |
448 | { |
449 | struct mlxsw_pci_queue_elem_info *elem_info; |
450 | int i; |
451 | |
452 | mlxsw_cmd_hw2sw_rdq(mlxsw_core: mlxsw_pci->core, dq_number: q->num); |
453 | for (i = 0; i < q->count; i++) { |
454 | elem_info = mlxsw_pci_queue_elem_info_get(q, elem_index: i); |
455 | mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); |
456 | } |
457 | } |
458 | |
459 | static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci, |
460 | struct mlxsw_pci_queue *q) |
461 | { |
462 | q->u.cq.v = mlxsw_pci->max_cqe_ver; |
463 | |
464 | if (q->u.cq.v == MLXSW_PCI_CQE_V2 && |
465 | q->num < mlxsw_pci->num_sdq_cqs && |
466 | !mlxsw_core_sdq_supports_cqe_v2(mlxsw_core: mlxsw_pci->core)) |
467 | q->u.cq.v = MLXSW_PCI_CQE_V1; |
468 | } |
469 | |
470 | static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, |
471 | struct mlxsw_pci_queue *q) |
472 | { |
473 | int i; |
474 | int err; |
475 | |
476 | q->consumer_counter = 0; |
477 | |
478 | for (i = 0; i < q->count; i++) { |
479 | char *elem = mlxsw_pci_queue_elem_get(q, elem_index: i); |
480 | |
481 | mlxsw_pci_cqe_owner_set(v: q->u.cq.v, cqe: elem, val: 1); |
482 | } |
483 | |
484 | if (q->u.cq.v == MLXSW_PCI_CQE_V1) |
485 | mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(buf: mbox, |
486 | val: MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); |
487 | else if (q->u.cq.v == MLXSW_PCI_CQE_V2) |
488 | mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(buf: mbox, |
489 | val: MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); |
490 | |
491 | mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(buf: mbox, MLXSW_PCI_EQ_COMP_NUM); |
492 | mlxsw_cmd_mbox_sw2hw_cq_st_set(buf: mbox, val: 0); |
493 | mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(buf: mbox, ilog2(q->count)); |
494 | for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { |
495 | dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, page_index: i); |
496 | |
497 | mlxsw_cmd_mbox_sw2hw_cq_pa_set(buf: mbox, index: i, val: mapaddr); |
498 | } |
499 | err = mlxsw_cmd_sw2hw_cq(mlxsw_core: mlxsw_pci->core, in_mbox: mbox, cq_number: q->num); |
500 | if (err) |
501 | return err; |
502 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); |
503 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); |
504 | return 0; |
505 | } |
506 | |
507 | static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, |
508 | struct mlxsw_pci_queue *q) |
509 | { |
510 | mlxsw_cmd_hw2sw_cq(mlxsw_core: mlxsw_pci->core, cq_number: q->num); |
511 | } |
512 | |
513 | static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci, |
514 | ptrdiff_t off) |
515 | { |
516 | return ioread32be(mlxsw_pci->hw_addr + off); |
517 | } |
518 | |
519 | static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci, |
520 | struct sk_buff *skb, |
521 | enum mlxsw_pci_cqe_v cqe_v, char *cqe) |
522 | { |
523 | u8 ts_type; |
524 | |
525 | if (cqe_v != MLXSW_PCI_CQE_V2) |
526 | return; |
527 | |
528 | ts_type = mlxsw_pci_cqe2_time_stamp_type_get(buf: cqe); |
529 | |
530 | if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC && |
531 | ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC) |
532 | return; |
533 | |
534 | mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe); |
535 | mlxsw_skb_cb(skb)->cqe_ts.nsec = |
536 | mlxsw_pci_cqe2_time_stamp_nsec_get(cqe); |
537 | } |
538 | |
539 | static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, |
540 | struct mlxsw_pci_queue *q, |
541 | u16 consumer_counter_limit, |
542 | enum mlxsw_pci_cqe_v cqe_v, |
543 | char *cqe) |
544 | { |
545 | struct pci_dev *pdev = mlxsw_pci->pdev; |
546 | struct mlxsw_pci_queue_elem_info *elem_info; |
547 | struct mlxsw_tx_info tx_info; |
548 | char *wqe; |
549 | struct sk_buff *skb; |
550 | int i; |
551 | |
552 | spin_lock(lock: &q->lock); |
553 | elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); |
554 | tx_info = mlxsw_skb_cb(skb: elem_info->u.sdq.skb)->tx_info; |
555 | skb = elem_info->u.sdq.skb; |
556 | wqe = elem_info->elem; |
557 | for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) |
558 | mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, index: i, direction: DMA_TO_DEVICE); |
559 | |
560 | if (unlikely(!tx_info.is_emad && |
561 | skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { |
562 | mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe); |
563 | mlxsw_core_ptp_transmitted(mlxsw_core: mlxsw_pci->core, skb, |
564 | local_port: tx_info.local_port); |
565 | skb = NULL; |
566 | } |
567 | |
568 | if (skb) |
569 | dev_kfree_skb_any(skb); |
570 | elem_info->u.sdq.skb = NULL; |
571 | |
572 | if (q->consumer_counter++ != consumer_counter_limit) |
573 | dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n" ); |
574 | spin_unlock(lock: &q->lock); |
575 | } |
576 | |
577 | static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb, |
578 | const char *cqe) |
579 | { |
580 | struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); |
581 | |
582 | if (mlxsw_pci_cqe2_tx_lag_get(buf: cqe)) { |
583 | cb->rx_md_info.tx_port_is_lag = true; |
584 | cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(buf: cqe); |
585 | cb->rx_md_info.tx_lag_port_index = |
586 | mlxsw_pci_cqe2_tx_lag_subport_get(buf: cqe); |
587 | } else { |
588 | cb->rx_md_info.tx_port_is_lag = false; |
589 | cb->rx_md_info.tx_sys_port = |
590 | mlxsw_pci_cqe2_tx_system_port_get(buf: cqe); |
591 | } |
592 | |
593 | if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT && |
594 | cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID) |
595 | cb->rx_md_info.tx_port_valid = 1; |
596 | else |
597 | cb->rx_md_info.tx_port_valid = 0; |
598 | } |
599 | |
600 | static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe) |
601 | { |
602 | struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb); |
603 | |
604 | cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe); |
605 | if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID) |
606 | cb->rx_md_info.tx_congestion_valid = 1; |
607 | else |
608 | cb->rx_md_info.tx_congestion_valid = 0; |
609 | cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT; |
610 | |
611 | cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(buf: cqe); |
612 | if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID) |
613 | cb->rx_md_info.latency_valid = 1; |
614 | else |
615 | cb->rx_md_info.latency_valid = 0; |
616 | |
617 | cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(buf: cqe); |
618 | if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID) |
619 | cb->rx_md_info.tx_tc_valid = 1; |
620 | else |
621 | cb->rx_md_info.tx_tc_valid = 0; |
622 | |
623 | mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); |
624 | } |
625 | |
626 | static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, |
627 | struct mlxsw_pci_queue *q, |
628 | u16 consumer_counter_limit, |
629 | enum mlxsw_pci_cqe_v cqe_v, char *cqe) |
630 | { |
631 | struct pci_dev *pdev = mlxsw_pci->pdev; |
632 | struct mlxsw_pci_queue_elem_info *elem_info; |
633 | struct mlxsw_rx_info rx_info = {}; |
634 | char wqe[MLXSW_PCI_WQE_SIZE]; |
635 | struct sk_buff *skb; |
636 | u16 byte_count; |
637 | int err; |
638 | |
639 | elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); |
640 | skb = elem_info->u.rdq.skb; |
641 | memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE); |
642 | |
643 | if (q->consumer_counter++ != consumer_counter_limit) |
644 | dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n" ); |
645 | |
646 | err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC); |
647 | if (err) { |
648 | dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n" ); |
649 | goto out; |
650 | } |
651 | |
652 | mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, index: 0, direction: DMA_FROM_DEVICE); |
653 | |
654 | if (mlxsw_pci_cqe_lag_get(v: cqe_v, cqe)) { |
655 | rx_info.is_lag = true; |
656 | rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(v: cqe_v, cqe); |
657 | rx_info.lag_port_index = |
658 | mlxsw_pci_cqe_lag_subport_get(v: cqe_v, cqe); |
659 | } else { |
660 | rx_info.is_lag = false; |
661 | rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(buf: cqe); |
662 | } |
663 | |
664 | rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(buf: cqe); |
665 | |
666 | if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL || |
667 | rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) { |
668 | u32 cookie_index = 0; |
669 | |
670 | if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) |
671 | cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(buf: cqe); |
672 | mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index; |
673 | } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 && |
674 | rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 && |
675 | mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { |
676 | rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(buf: cqe); |
677 | mlxsw_pci_cqe_rdq_md_init(skb, cqe); |
678 | } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE && |
679 | mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { |
680 | mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe); |
681 | } |
682 | |
683 | mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe); |
684 | |
685 | byte_count = mlxsw_pci_cqe_byte_count_get(buf: cqe); |
686 | if (mlxsw_pci_cqe_crc_get(v: cqe_v, cqe)) |
687 | byte_count -= ETH_FCS_LEN; |
688 | skb_put(skb, len: byte_count); |
689 | mlxsw_core_skb_receive(mlxsw_core: mlxsw_pci->core, skb, rx_info: &rx_info); |
690 | |
691 | out: |
692 | /* Everything is set up, ring doorbell to pass elem to HW */ |
693 | q->producer_counter++; |
694 | mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); |
695 | return; |
696 | } |
697 | |
698 | static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) |
699 | { |
700 | struct mlxsw_pci_queue_elem_info *elem_info; |
701 | char *elem; |
702 | bool owner_bit; |
703 | |
704 | elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); |
705 | elem = elem_info->elem; |
706 | owner_bit = mlxsw_pci_cqe_owner_get(v: q->u.cq.v, cqe: elem); |
707 | if (mlxsw_pci_elem_hw_owned(q, owner_bit)) |
708 | return NULL; |
709 | q->consumer_counter++; |
710 | rmb(); /* make sure we read owned bit before the rest of elem */ |
711 | return elem; |
712 | } |
713 | |
714 | static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t) |
715 | { |
716 | struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); |
717 | struct mlxsw_pci *mlxsw_pci = q->pci; |
718 | char *cqe; |
719 | int items = 0; |
720 | int credits = q->count >> 1; |
721 | |
722 | while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { |
723 | u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(buf: cqe); |
724 | u8 sendq = mlxsw_pci_cqe_sr_get(v: q->u.cq.v, cqe); |
725 | u8 dqn = mlxsw_pci_cqe_dqn_get(v: q->u.cq.v, cqe); |
726 | char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; |
727 | |
728 | memcpy(ncqe, cqe, q->elem_size); |
729 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); |
730 | |
731 | if (sendq) { |
732 | struct mlxsw_pci_queue *sdq; |
733 | |
734 | sdq = mlxsw_pci_sdq_get(mlxsw_pci, q_num: dqn); |
735 | mlxsw_pci_cqe_sdq_handle(mlxsw_pci, q: sdq, |
736 | consumer_counter_limit: wqe_counter, cqe_v: q->u.cq.v, cqe: ncqe); |
737 | q->u.cq.comp_sdq_count++; |
738 | } else { |
739 | struct mlxsw_pci_queue *rdq; |
740 | |
741 | rdq = mlxsw_pci_rdq_get(mlxsw_pci, q_num: dqn); |
742 | mlxsw_pci_cqe_rdq_handle(mlxsw_pci, q: rdq, |
743 | consumer_counter_limit: wqe_counter, cqe_v: q->u.cq.v, cqe: ncqe); |
744 | q->u.cq.comp_rdq_count++; |
745 | } |
746 | if (++items == credits) |
747 | break; |
748 | } |
749 | if (items) |
750 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); |
751 | } |
752 | |
753 | static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) |
754 | { |
755 | return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : |
756 | MLXSW_PCI_CQE01_COUNT; |
757 | } |
758 | |
759 | static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) |
760 | { |
761 | return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : |
762 | MLXSW_PCI_CQE01_SIZE; |
763 | } |
764 | |
765 | static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, |
766 | struct mlxsw_pci_queue *q) |
767 | { |
768 | int i; |
769 | int err; |
770 | |
771 | q->consumer_counter = 0; |
772 | |
773 | for (i = 0; i < q->count; i++) { |
774 | char *elem = mlxsw_pci_queue_elem_get(q, elem_index: i); |
775 | |
776 | mlxsw_pci_eqe_owner_set(buf: elem, val: 1); |
777 | } |
778 | |
779 | mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(buf: mbox, val: 1); /* MSI-X used */ |
780 | mlxsw_cmd_mbox_sw2hw_eq_st_set(buf: mbox, val: 1); /* armed */ |
781 | mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(buf: mbox, ilog2(q->count)); |
782 | for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { |
783 | dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, page_index: i); |
784 | |
785 | mlxsw_cmd_mbox_sw2hw_eq_pa_set(buf: mbox, index: i, val: mapaddr); |
786 | } |
787 | err = mlxsw_cmd_sw2hw_eq(mlxsw_core: mlxsw_pci->core, in_mbox: mbox, eq_number: q->num); |
788 | if (err) |
789 | return err; |
790 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); |
791 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); |
792 | return 0; |
793 | } |
794 | |
795 | static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, |
796 | struct mlxsw_pci_queue *q) |
797 | { |
798 | mlxsw_cmd_hw2sw_eq(mlxsw_core: mlxsw_pci->core, eq_number: q->num); |
799 | } |
800 | |
801 | static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) |
802 | { |
803 | mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(buf: eqe); |
804 | mlxsw_pci->cmd.comp.out_param = |
805 | ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(buf: eqe)) << 32 | |
806 | mlxsw_pci_eqe_cmd_out_param_l_get(buf: eqe); |
807 | mlxsw_pci->cmd.wait_done = true; |
808 | wake_up(&mlxsw_pci->cmd.wait); |
809 | } |
810 | |
811 | static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) |
812 | { |
813 | struct mlxsw_pci_queue_elem_info *elem_info; |
814 | char *elem; |
815 | bool owner_bit; |
816 | |
817 | elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); |
818 | elem = elem_info->elem; |
819 | owner_bit = mlxsw_pci_eqe_owner_get(buf: elem); |
820 | if (mlxsw_pci_elem_hw_owned(q, owner_bit)) |
821 | return NULL; |
822 | q->consumer_counter++; |
823 | rmb(); /* make sure we read owned bit before the rest of elem */ |
824 | return elem; |
825 | } |
826 | |
827 | static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t) |
828 | { |
829 | struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); |
830 | struct mlxsw_pci *mlxsw_pci = q->pci; |
831 | u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); |
832 | unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; |
833 | char *eqe; |
834 | u8 cqn; |
835 | bool cq_handle = false; |
836 | int items = 0; |
837 | int credits = q->count >> 1; |
838 | |
839 | memset(&active_cqns, 0, sizeof(active_cqns)); |
840 | |
841 | while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { |
842 | |
843 | /* Command interface completion events are always received on |
844 | * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events |
845 | * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1). |
846 | */ |
847 | switch (q->num) { |
848 | case MLXSW_PCI_EQ_ASYNC_NUM: |
849 | mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); |
850 | q->u.eq.ev_cmd_count++; |
851 | break; |
852 | case MLXSW_PCI_EQ_COMP_NUM: |
853 | cqn = mlxsw_pci_eqe_cqn_get(buf: eqe); |
854 | set_bit(nr: cqn, addr: active_cqns); |
855 | cq_handle = true; |
856 | q->u.eq.ev_comp_count++; |
857 | break; |
858 | default: |
859 | q->u.eq.ev_other_count++; |
860 | } |
861 | if (++items == credits) |
862 | break; |
863 | } |
864 | if (items) { |
865 | mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); |
866 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); |
867 | } |
868 | |
869 | if (!cq_handle) |
870 | return; |
871 | for_each_set_bit(cqn, active_cqns, cq_count) { |
872 | q = mlxsw_pci_cq_get(mlxsw_pci, q_num: cqn); |
873 | mlxsw_pci_queue_tasklet_schedule(q); |
874 | } |
875 | } |
876 | |
877 | struct mlxsw_pci_queue_ops { |
878 | const char *name; |
879 | enum mlxsw_pci_queue_type type; |
880 | void (*pre_init)(struct mlxsw_pci *mlxsw_pci, |
881 | struct mlxsw_pci_queue *q); |
882 | int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, |
883 | struct mlxsw_pci_queue *q); |
884 | void (*fini)(struct mlxsw_pci *mlxsw_pci, |
885 | struct mlxsw_pci_queue *q); |
886 | void (*tasklet)(struct tasklet_struct *t); |
887 | u16 (*elem_count_f)(const struct mlxsw_pci_queue *q); |
888 | u8 (*elem_size_f)(const struct mlxsw_pci_queue *q); |
889 | u16 elem_count; |
890 | u8 elem_size; |
891 | }; |
892 | |
893 | static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = { |
894 | .type = MLXSW_PCI_QUEUE_TYPE_SDQ, |
895 | .init = mlxsw_pci_sdq_init, |
896 | .fini = mlxsw_pci_sdq_fini, |
897 | .elem_count = MLXSW_PCI_WQE_COUNT, |
898 | .elem_size = MLXSW_PCI_WQE_SIZE, |
899 | }; |
900 | |
901 | static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { |
902 | .type = MLXSW_PCI_QUEUE_TYPE_RDQ, |
903 | .init = mlxsw_pci_rdq_init, |
904 | .fini = mlxsw_pci_rdq_fini, |
905 | .elem_count = MLXSW_PCI_WQE_COUNT, |
906 | .elem_size = MLXSW_PCI_WQE_SIZE |
907 | }; |
908 | |
909 | static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { |
910 | .type = MLXSW_PCI_QUEUE_TYPE_CQ, |
911 | .pre_init = mlxsw_pci_cq_pre_init, |
912 | .init = mlxsw_pci_cq_init, |
913 | .fini = mlxsw_pci_cq_fini, |
914 | .tasklet = mlxsw_pci_cq_tasklet, |
915 | .elem_count_f = mlxsw_pci_cq_elem_count, |
916 | .elem_size_f = mlxsw_pci_cq_elem_size |
917 | }; |
918 | |
919 | static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { |
920 | .type = MLXSW_PCI_QUEUE_TYPE_EQ, |
921 | .init = mlxsw_pci_eq_init, |
922 | .fini = mlxsw_pci_eq_fini, |
923 | .tasklet = mlxsw_pci_eq_tasklet, |
924 | .elem_count = MLXSW_PCI_EQE_COUNT, |
925 | .elem_size = MLXSW_PCI_EQE_SIZE |
926 | }; |
927 | |
928 | static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, |
929 | const struct mlxsw_pci_queue_ops *q_ops, |
930 | struct mlxsw_pci_queue *q, u8 q_num) |
931 | { |
932 | struct mlxsw_pci_mem_item *mem_item = &q->mem_item; |
933 | int i; |
934 | int err; |
935 | |
936 | q->num = q_num; |
937 | if (q_ops->pre_init) |
938 | q_ops->pre_init(mlxsw_pci, q); |
939 | |
940 | spin_lock_init(&q->lock); |
941 | q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) : |
942 | q_ops->elem_count; |
943 | q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) : |
944 | q_ops->elem_size; |
945 | q->type = q_ops->type; |
946 | q->pci = mlxsw_pci; |
947 | |
948 | if (q_ops->tasklet) |
949 | tasklet_setup(t: &q->tasklet, callback: q_ops->tasklet); |
950 | |
951 | mem_item->size = MLXSW_PCI_AQ_SIZE; |
952 | mem_item->buf = dma_alloc_coherent(dev: &mlxsw_pci->pdev->dev, |
953 | size: mem_item->size, dma_handle: &mem_item->mapaddr, |
954 | GFP_KERNEL); |
955 | if (!mem_item->buf) |
956 | return -ENOMEM; |
957 | |
958 | q->elem_info = kcalloc(n: q->count, size: sizeof(*q->elem_info), GFP_KERNEL); |
959 | if (!q->elem_info) { |
960 | err = -ENOMEM; |
961 | goto err_elem_info_alloc; |
962 | } |
963 | |
964 | /* Initialize dma mapped elements info elem_info for |
965 | * future easy access. |
966 | */ |
967 | for (i = 0; i < q->count; i++) { |
968 | struct mlxsw_pci_queue_elem_info *elem_info; |
969 | |
970 | elem_info = mlxsw_pci_queue_elem_info_get(q, elem_index: i); |
971 | elem_info->elem = |
972 | __mlxsw_pci_queue_elem_get(q, elem_size: q->elem_size, elem_index: i); |
973 | } |
974 | |
975 | mlxsw_cmd_mbox_zero(mbox); |
976 | err = q_ops->init(mlxsw_pci, mbox, q); |
977 | if (err) |
978 | goto err_q_ops_init; |
979 | return 0; |
980 | |
981 | err_q_ops_init: |
982 | kfree(objp: q->elem_info); |
983 | err_elem_info_alloc: |
984 | dma_free_coherent(dev: &mlxsw_pci->pdev->dev, size: mem_item->size, |
985 | cpu_addr: mem_item->buf, dma_handle: mem_item->mapaddr); |
986 | return err; |
987 | } |
988 | |
989 | static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci, |
990 | const struct mlxsw_pci_queue_ops *q_ops, |
991 | struct mlxsw_pci_queue *q) |
992 | { |
993 | struct mlxsw_pci_mem_item *mem_item = &q->mem_item; |
994 | |
995 | q_ops->fini(mlxsw_pci, q); |
996 | kfree(objp: q->elem_info); |
997 | dma_free_coherent(dev: &mlxsw_pci->pdev->dev, size: mem_item->size, |
998 | cpu_addr: mem_item->buf, dma_handle: mem_item->mapaddr); |
999 | } |
1000 | |
1001 | static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, |
1002 | const struct mlxsw_pci_queue_ops *q_ops, |
1003 | u8 num_qs) |
1004 | { |
1005 | struct mlxsw_pci_queue_type_group *queue_group; |
1006 | int i; |
1007 | int err; |
1008 | |
1009 | queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type: q_ops->type); |
1010 | queue_group->q = kcalloc(n: num_qs, size: sizeof(*queue_group->q), GFP_KERNEL); |
1011 | if (!queue_group->q) |
1012 | return -ENOMEM; |
1013 | |
1014 | for (i = 0; i < num_qs; i++) { |
1015 | err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops, |
1016 | q: &queue_group->q[i], q_num: i); |
1017 | if (err) |
1018 | goto err_queue_init; |
1019 | } |
1020 | queue_group->count = num_qs; |
1021 | |
1022 | return 0; |
1023 | |
1024 | err_queue_init: |
1025 | for (i--; i >= 0; i--) |
1026 | mlxsw_pci_queue_fini(mlxsw_pci, q_ops, q: &queue_group->q[i]); |
1027 | kfree(objp: queue_group->q); |
1028 | return err; |
1029 | } |
1030 | |
1031 | static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci, |
1032 | const struct mlxsw_pci_queue_ops *q_ops) |
1033 | { |
1034 | struct mlxsw_pci_queue_type_group *queue_group; |
1035 | int i; |
1036 | |
1037 | queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type: q_ops->type); |
1038 | for (i = 0; i < queue_group->count; i++) |
1039 | mlxsw_pci_queue_fini(mlxsw_pci, q_ops, q: &queue_group->q[i]); |
1040 | kfree(objp: queue_group->q); |
1041 | } |
1042 | |
1043 | static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) |
1044 | { |
1045 | struct pci_dev *pdev = mlxsw_pci->pdev; |
1046 | u8 num_sdqs; |
1047 | u8 sdq_log2sz; |
1048 | u8 num_rdqs; |
1049 | u8 rdq_log2sz; |
1050 | u8 num_cqs; |
1051 | u8 cq_log2sz; |
1052 | u8 cqv2_log2sz; |
1053 | u8 num_eqs; |
1054 | u8 eq_log2sz; |
1055 | int err; |
1056 | |
1057 | mlxsw_cmd_mbox_zero(mbox); |
1058 | err = mlxsw_cmd_query_aq_cap(mlxsw_core: mlxsw_pci->core, out_mbox: mbox); |
1059 | if (err) |
1060 | return err; |
1061 | |
1062 | num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(buf: mbox); |
1063 | sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(buf: mbox); |
1064 | num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(buf: mbox); |
1065 | rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(buf: mbox); |
1066 | num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(buf: mbox); |
1067 | cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(buf: mbox); |
1068 | cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(buf: mbox); |
1069 | num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(buf: mbox); |
1070 | eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(buf: mbox); |
1071 | |
1072 | if (num_sdqs + num_rdqs > num_cqs || |
1073 | num_sdqs < MLXSW_PCI_SDQS_MIN || |
1074 | num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { |
1075 | dev_err(&pdev->dev, "Unsupported number of queues\n" ); |
1076 | return -EINVAL; |
1077 | } |
1078 | |
1079 | if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || |
1080 | (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || |
1081 | (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) || |
1082 | (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 && |
1083 | (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) || |
1084 | (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { |
1085 | dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n" ); |
1086 | return -EINVAL; |
1087 | } |
1088 | |
1089 | mlxsw_pci->num_sdq_cqs = num_sdqs; |
1090 | |
1091 | err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, q_ops: &mlxsw_pci_eq_ops, |
1092 | num_qs: num_eqs); |
1093 | if (err) { |
1094 | dev_err(&pdev->dev, "Failed to initialize event queues\n" ); |
1095 | return err; |
1096 | } |
1097 | |
1098 | err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, q_ops: &mlxsw_pci_cq_ops, |
1099 | num_qs: num_cqs); |
1100 | if (err) { |
1101 | dev_err(&pdev->dev, "Failed to initialize completion queues\n" ); |
1102 | goto err_cqs_init; |
1103 | } |
1104 | |
1105 | err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, q_ops: &mlxsw_pci_sdq_ops, |
1106 | num_qs: num_sdqs); |
1107 | if (err) { |
1108 | dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n" ); |
1109 | goto err_sdqs_init; |
1110 | } |
1111 | |
1112 | err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, q_ops: &mlxsw_pci_rdq_ops, |
1113 | num_qs: num_rdqs); |
1114 | if (err) { |
1115 | dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n" ); |
1116 | goto err_rdqs_init; |
1117 | } |
1118 | |
1119 | /* We have to poll in command interface until queues are initialized */ |
1120 | mlxsw_pci->cmd.nopoll = true; |
1121 | return 0; |
1122 | |
1123 | err_rdqs_init: |
1124 | mlxsw_pci_queue_group_fini(mlxsw_pci, q_ops: &mlxsw_pci_sdq_ops); |
1125 | err_sdqs_init: |
1126 | mlxsw_pci_queue_group_fini(mlxsw_pci, q_ops: &mlxsw_pci_cq_ops); |
1127 | err_cqs_init: |
1128 | mlxsw_pci_queue_group_fini(mlxsw_pci, q_ops: &mlxsw_pci_eq_ops); |
1129 | return err; |
1130 | } |
1131 | |
1132 | static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci) |
1133 | { |
1134 | mlxsw_pci->cmd.nopoll = false; |
1135 | mlxsw_pci_queue_group_fini(mlxsw_pci, q_ops: &mlxsw_pci_rdq_ops); |
1136 | mlxsw_pci_queue_group_fini(mlxsw_pci, q_ops: &mlxsw_pci_sdq_ops); |
1137 | mlxsw_pci_queue_group_fini(mlxsw_pci, q_ops: &mlxsw_pci_cq_ops); |
1138 | mlxsw_pci_queue_group_fini(mlxsw_pci, q_ops: &mlxsw_pci_eq_ops); |
1139 | } |
1140 | |
1141 | static void |
1142 | mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, |
1143 | char *mbox, int index, |
1144 | const struct mlxsw_swid_config *swid) |
1145 | { |
1146 | u8 mask = 0; |
1147 | |
1148 | if (swid->used_type) { |
1149 | mlxsw_cmd_mbox_config_profile_swid_config_type_set( |
1150 | buf: mbox, index, val: swid->type); |
1151 | mask |= 1; |
1152 | } |
1153 | if (swid->used_properties) { |
1154 | mlxsw_cmd_mbox_config_profile_swid_config_properties_set( |
1155 | buf: mbox, index, val: swid->properties); |
1156 | mask |= 2; |
1157 | } |
1158 | mlxsw_cmd_mbox_config_profile_swid_config_mask_set(buf: mbox, index, val: mask); |
1159 | } |
1160 | |
1161 | static int |
1162 | mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci, |
1163 | const struct mlxsw_config_profile *profile, |
1164 | struct mlxsw_res *res) |
1165 | { |
1166 | u64 single_size, double_size, linear_size; |
1167 | int err; |
1168 | |
1169 | err = mlxsw_core_kvd_sizes_get(mlxsw_core: mlxsw_pci->core, profile, |
1170 | p_single_size: &single_size, p_double_size: &double_size, |
1171 | p_linear_size: &linear_size); |
1172 | if (err) |
1173 | return err; |
1174 | |
1175 | MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size); |
1176 | MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size); |
1177 | MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size); |
1178 | |
1179 | return 0; |
1180 | } |
1181 | |
1182 | static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, |
1183 | const struct mlxsw_config_profile *profile, |
1184 | struct mlxsw_res *res) |
1185 | { |
1186 | int i; |
1187 | int err; |
1188 | |
1189 | mlxsw_cmd_mbox_zero(mbox); |
1190 | |
1191 | if (profile->used_max_vepa_channels) { |
1192 | mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set( |
1193 | buf: mbox, val: 1); |
1194 | mlxsw_cmd_mbox_config_profile_max_vepa_channels_set( |
1195 | buf: mbox, val: profile->max_vepa_channels); |
1196 | } |
1197 | if (profile->used_max_lag) { |
1198 | mlxsw_cmd_mbox_config_profile_set_max_lag_set(buf: mbox, val: 1); |
1199 | mlxsw_cmd_mbox_config_profile_max_lag_set(buf: mbox, |
1200 | val: profile->max_lag); |
1201 | } |
1202 | if (profile->used_max_mid) { |
1203 | mlxsw_cmd_mbox_config_profile_set_max_mid_set( |
1204 | buf: mbox, val: 1); |
1205 | mlxsw_cmd_mbox_config_profile_max_mid_set( |
1206 | buf: mbox, val: profile->max_mid); |
1207 | } |
1208 | if (profile->used_max_pgt) { |
1209 | mlxsw_cmd_mbox_config_profile_set_max_pgt_set( |
1210 | buf: mbox, val: 1); |
1211 | mlxsw_cmd_mbox_config_profile_max_pgt_set( |
1212 | buf: mbox, val: profile->max_pgt); |
1213 | } |
1214 | if (profile->used_max_system_port) { |
1215 | mlxsw_cmd_mbox_config_profile_set_max_system_port_set( |
1216 | buf: mbox, val: 1); |
1217 | mlxsw_cmd_mbox_config_profile_max_system_port_set( |
1218 | buf: mbox, val: profile->max_system_port); |
1219 | } |
1220 | if (profile->used_max_vlan_groups) { |
1221 | mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set( |
1222 | buf: mbox, val: 1); |
1223 | mlxsw_cmd_mbox_config_profile_max_vlan_groups_set( |
1224 | buf: mbox, val: profile->max_vlan_groups); |
1225 | } |
1226 | if (profile->used_max_regions) { |
1227 | mlxsw_cmd_mbox_config_profile_set_max_regions_set( |
1228 | buf: mbox, val: 1); |
1229 | mlxsw_cmd_mbox_config_profile_max_regions_set( |
1230 | buf: mbox, val: profile->max_regions); |
1231 | } |
1232 | if (profile->used_flood_tables) { |
1233 | mlxsw_cmd_mbox_config_profile_set_flood_tables_set( |
1234 | buf: mbox, val: 1); |
1235 | mlxsw_cmd_mbox_config_profile_max_flood_tables_set( |
1236 | buf: mbox, val: profile->max_flood_tables); |
1237 | mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( |
1238 | buf: mbox, val: profile->max_vid_flood_tables); |
1239 | mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set( |
1240 | buf: mbox, val: profile->max_fid_offset_flood_tables); |
1241 | mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set( |
1242 | buf: mbox, val: profile->fid_offset_flood_table_size); |
1243 | mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set( |
1244 | buf: mbox, val: profile->max_fid_flood_tables); |
1245 | mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set( |
1246 | buf: mbox, val: profile->fid_flood_table_size); |
1247 | } |
1248 | if (profile->used_flood_mode) { |
1249 | mlxsw_cmd_mbox_config_profile_set_flood_mode_set( |
1250 | buf: mbox, val: 1); |
1251 | mlxsw_cmd_mbox_config_profile_flood_mode_set( |
1252 | buf: mbox, val: profile->flood_mode); |
1253 | } |
1254 | if (profile->used_max_ib_mc) { |
1255 | mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set( |
1256 | buf: mbox, val: 1); |
1257 | mlxsw_cmd_mbox_config_profile_max_ib_mc_set( |
1258 | buf: mbox, val: profile->max_ib_mc); |
1259 | } |
1260 | if (profile->used_max_pkey) { |
1261 | mlxsw_cmd_mbox_config_profile_set_max_pkey_set( |
1262 | buf: mbox, val: 1); |
1263 | mlxsw_cmd_mbox_config_profile_max_pkey_set( |
1264 | buf: mbox, val: profile->max_pkey); |
1265 | } |
1266 | if (profile->used_ar_sec) { |
1267 | mlxsw_cmd_mbox_config_profile_set_ar_sec_set( |
1268 | buf: mbox, val: 1); |
1269 | mlxsw_cmd_mbox_config_profile_ar_sec_set( |
1270 | buf: mbox, val: profile->ar_sec); |
1271 | } |
1272 | if (profile->used_adaptive_routing_group_cap) { |
1273 | mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set( |
1274 | buf: mbox, val: 1); |
1275 | mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( |
1276 | buf: mbox, val: profile->adaptive_routing_group_cap); |
1277 | } |
1278 | if (profile->used_ubridge) { |
1279 | mlxsw_cmd_mbox_config_profile_set_ubridge_set(buf: mbox, val: 1); |
1280 | mlxsw_cmd_mbox_config_profile_ubridge_set(buf: mbox, |
1281 | val: profile->ubridge); |
1282 | } |
1283 | if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) { |
1284 | err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res); |
1285 | if (err) |
1286 | return err; |
1287 | |
1288 | mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(buf: mbox, val: 1); |
1289 | mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(buf: mbox, |
1290 | MLXSW_RES_GET(res, KVD_LINEAR_SIZE)); |
1291 | mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(buf: mbox, |
1292 | val: 1); |
1293 | mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(buf: mbox, |
1294 | MLXSW_RES_GET(res, KVD_SINGLE_SIZE)); |
1295 | mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set( |
1296 | buf: mbox, val: 1); |
1297 | mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(buf: mbox, |
1298 | MLXSW_RES_GET(res, KVD_DOUBLE_SIZE)); |
1299 | } |
1300 | |
1301 | for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) |
1302 | mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, index: i, |
1303 | swid: &profile->swid_config[i]); |
1304 | |
1305 | if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) { |
1306 | mlxsw_cmd_mbox_config_profile_set_cqe_version_set(buf: mbox, val: 1); |
1307 | mlxsw_cmd_mbox_config_profile_cqe_version_set(buf: mbox, val: 1); |
1308 | } |
1309 | |
1310 | if (profile->used_cqe_time_stamp_type) { |
1311 | mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(buf: mbox, |
1312 | val: 1); |
1313 | mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(buf: mbox, |
1314 | val: profile->cqe_time_stamp_type); |
1315 | } |
1316 | |
1317 | if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) { |
1318 | enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode = |
1319 | MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW; |
1320 | |
1321 | mlxsw_cmd_mbox_config_profile_set_lag_mode_set(buf: mbox, val: 1); |
1322 | mlxsw_cmd_mbox_config_profile_lag_mode_set(buf: mbox, val: lag_mode); |
1323 | mlxsw_pci->lag_mode = lag_mode; |
1324 | } else { |
1325 | mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW; |
1326 | } |
1327 | return mlxsw_cmd_config_profile_set(mlxsw_core: mlxsw_pci->core, in_mbox: mbox); |
1328 | } |
1329 | |
1330 | static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) |
1331 | { |
1332 | struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info; |
1333 | int err; |
1334 | |
1335 | mlxsw_cmd_mbox_zero(mbox); |
1336 | err = mlxsw_cmd_boardinfo(mlxsw_core: mlxsw_pci->core, out_mbox: mbox); |
1337 | if (err) |
1338 | return err; |
1339 | mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(buf: mbox, dst: bus_info->vsd); |
1340 | mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(buf: mbox, dst: bus_info->psid); |
1341 | return 0; |
1342 | } |
1343 | |
1344 | static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, |
1345 | u16 num_pages) |
1346 | { |
1347 | struct mlxsw_pci_mem_item *mem_item; |
1348 | int nent = 0; |
1349 | int i; |
1350 | int err; |
1351 | |
1352 | mlxsw_pci->fw_area.items = kcalloc(n: num_pages, size: sizeof(*mem_item), |
1353 | GFP_KERNEL); |
1354 | if (!mlxsw_pci->fw_area.items) |
1355 | return -ENOMEM; |
1356 | mlxsw_pci->fw_area.count = num_pages; |
1357 | |
1358 | mlxsw_cmd_mbox_zero(mbox); |
1359 | for (i = 0; i < num_pages; i++) { |
1360 | mem_item = &mlxsw_pci->fw_area.items[i]; |
1361 | |
1362 | mem_item->size = MLXSW_PCI_PAGE_SIZE; |
1363 | mem_item->buf = dma_alloc_coherent(dev: &mlxsw_pci->pdev->dev, |
1364 | size: mem_item->size, |
1365 | dma_handle: &mem_item->mapaddr, GFP_KERNEL); |
1366 | if (!mem_item->buf) { |
1367 | err = -ENOMEM; |
1368 | goto err_alloc; |
1369 | } |
1370 | mlxsw_cmd_mbox_map_fa_pa_set(buf: mbox, index: nent, val: mem_item->mapaddr); |
1371 | mlxsw_cmd_mbox_map_fa_log2size_set(buf: mbox, index: nent, val: 0); /* 1 page */ |
1372 | if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { |
1373 | err = mlxsw_cmd_map_fa(mlxsw_core: mlxsw_pci->core, in_mbox: mbox, vpm_entries_count: nent); |
1374 | if (err) |
1375 | goto err_cmd_map_fa; |
1376 | nent = 0; |
1377 | mlxsw_cmd_mbox_zero(mbox); |
1378 | } |
1379 | } |
1380 | |
1381 | if (nent) { |
1382 | err = mlxsw_cmd_map_fa(mlxsw_core: mlxsw_pci->core, in_mbox: mbox, vpm_entries_count: nent); |
1383 | if (err) |
1384 | goto err_cmd_map_fa; |
1385 | } |
1386 | |
1387 | return 0; |
1388 | |
1389 | err_cmd_map_fa: |
1390 | err_alloc: |
1391 | for (i--; i >= 0; i--) { |
1392 | mem_item = &mlxsw_pci->fw_area.items[i]; |
1393 | |
1394 | dma_free_coherent(dev: &mlxsw_pci->pdev->dev, size: mem_item->size, |
1395 | cpu_addr: mem_item->buf, dma_handle: mem_item->mapaddr); |
1396 | } |
1397 | kfree(objp: mlxsw_pci->fw_area.items); |
1398 | return err; |
1399 | } |
1400 | |
1401 | static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) |
1402 | { |
1403 | struct mlxsw_pci_mem_item *mem_item; |
1404 | int i; |
1405 | |
1406 | mlxsw_cmd_unmap_fa(mlxsw_core: mlxsw_pci->core); |
1407 | |
1408 | for (i = 0; i < mlxsw_pci->fw_area.count; i++) { |
1409 | mem_item = &mlxsw_pci->fw_area.items[i]; |
1410 | |
1411 | dma_free_coherent(dev: &mlxsw_pci->pdev->dev, size: mem_item->size, |
1412 | cpu_addr: mem_item->buf, dma_handle: mem_item->mapaddr); |
1413 | } |
1414 | kfree(objp: mlxsw_pci->fw_area.items); |
1415 | } |
1416 | |
1417 | static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id) |
1418 | { |
1419 | struct mlxsw_pci *mlxsw_pci = dev_id; |
1420 | struct mlxsw_pci_queue *q; |
1421 | int i; |
1422 | |
1423 | for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) { |
1424 | q = mlxsw_pci_eq_get(mlxsw_pci, q_num: i); |
1425 | mlxsw_pci_queue_tasklet_schedule(q); |
1426 | } |
1427 | return IRQ_HANDLED; |
1428 | } |
1429 | |
1430 | static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci, |
1431 | struct mlxsw_pci_mem_item *mbox) |
1432 | { |
1433 | struct pci_dev *pdev = mlxsw_pci->pdev; |
1434 | int err = 0; |
1435 | |
1436 | mbox->size = MLXSW_CMD_MBOX_SIZE; |
1437 | mbox->buf = dma_alloc_coherent(dev: &pdev->dev, MLXSW_CMD_MBOX_SIZE, |
1438 | dma_handle: &mbox->mapaddr, GFP_KERNEL); |
1439 | if (!mbox->buf) { |
1440 | dev_err(&pdev->dev, "Failed allocating memory for mailbox\n" ); |
1441 | err = -ENOMEM; |
1442 | } |
1443 | |
1444 | return err; |
1445 | } |
1446 | |
1447 | static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, |
1448 | struct mlxsw_pci_mem_item *mbox) |
1449 | { |
1450 | struct pci_dev *pdev = mlxsw_pci->pdev; |
1451 | |
1452 | dma_free_coherent(dev: &pdev->dev, MLXSW_CMD_MBOX_SIZE, cpu_addr: mbox->buf, |
1453 | dma_handle: mbox->mapaddr); |
1454 | } |
1455 | |
1456 | static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, |
1457 | const struct pci_device_id *id, |
1458 | u32 *p_sys_status) |
1459 | { |
1460 | unsigned long end; |
1461 | u32 val; |
1462 | |
1463 | /* We must wait for the HW to become responsive. */ |
1464 | msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); |
1465 | |
1466 | end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); |
1467 | do { |
1468 | val = mlxsw_pci_read32(mlxsw_pci, FW_READY); |
1469 | if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) |
1470 | return 0; |
1471 | cond_resched(); |
1472 | } while (time_before(jiffies, end)); |
1473 | |
1474 | *p_sys_status = val & MLXSW_PCI_FW_READY_MASK; |
1475 | |
1476 | return -EBUSY; |
1477 | } |
1478 | |
1479 | static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, |
1480 | const struct pci_device_id *id) |
1481 | { |
1482 | struct pci_dev *pdev = mlxsw_pci->pdev; |
1483 | char mrsr_pl[MLXSW_REG_MRSR_LEN]; |
1484 | u32 sys_status; |
1485 | int err; |
1486 | |
1487 | err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, p_sys_status: &sys_status); |
1488 | if (err) { |
1489 | dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n" , |
1490 | sys_status); |
1491 | return err; |
1492 | } |
1493 | |
1494 | mlxsw_reg_mrsr_pack(payload: mrsr_pl); |
1495 | err = mlxsw_reg_write(mlxsw_core: mlxsw_pci->core, MLXSW_REG(mrsr), payload: mrsr_pl); |
1496 | if (err) |
1497 | return err; |
1498 | |
1499 | err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, p_sys_status: &sys_status); |
1500 | if (err) { |
1501 | dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n" , |
1502 | sys_status); |
1503 | return err; |
1504 | } |
1505 | |
1506 | return 0; |
1507 | } |
1508 | |
1509 | static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) |
1510 | { |
1511 | int err; |
1512 | |
1513 | err = pci_alloc_irq_vectors(dev: mlxsw_pci->pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSIX); |
1514 | if (err < 0) |
1515 | dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n" ); |
1516 | return err; |
1517 | } |
1518 | |
1519 | static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) |
1520 | { |
1521 | pci_free_irq_vectors(dev: mlxsw_pci->pdev); |
1522 | } |
1523 | |
1524 | static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, |
1525 | const struct mlxsw_config_profile *profile, |
1526 | struct mlxsw_res *res) |
1527 | { |
1528 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1529 | struct pci_dev *pdev = mlxsw_pci->pdev; |
1530 | char *mbox; |
1531 | u16 num_pages; |
1532 | int err; |
1533 | |
1534 | mlxsw_pci->core = mlxsw_core; |
1535 | |
1536 | mbox = mlxsw_cmd_mbox_alloc(); |
1537 | if (!mbox) |
1538 | return -ENOMEM; |
1539 | |
1540 | err = mlxsw_pci_sw_reset(mlxsw_pci, id: mlxsw_pci->id); |
1541 | if (err) |
1542 | goto err_sw_reset; |
1543 | |
1544 | err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); |
1545 | if (err < 0) { |
1546 | dev_err(&pdev->dev, "MSI-X init failed\n" ); |
1547 | goto err_alloc_irq; |
1548 | } |
1549 | |
1550 | err = mlxsw_cmd_query_fw(mlxsw_core, out_mbox: mbox); |
1551 | if (err) |
1552 | goto err_query_fw; |
1553 | |
1554 | mlxsw_pci->bus_info.fw_rev.major = |
1555 | mlxsw_cmd_mbox_query_fw_fw_rev_major_get(buf: mbox); |
1556 | mlxsw_pci->bus_info.fw_rev.minor = |
1557 | mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(buf: mbox); |
1558 | mlxsw_pci->bus_info.fw_rev.subminor = |
1559 | mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(buf: mbox); |
1560 | |
1561 | if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(buf: mbox) != 1) { |
1562 | dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n" ); |
1563 | err = -EINVAL; |
1564 | goto err_iface_rev; |
1565 | } |
1566 | if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(buf: mbox) != 0) { |
1567 | dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n" ); |
1568 | err = -EINVAL; |
1569 | goto err_doorbell_page_bar; |
1570 | } |
1571 | |
1572 | mlxsw_pci->doorbell_offset = |
1573 | mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(buf: mbox); |
1574 | |
1575 | if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(buf: mbox) != 0) { |
1576 | dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n" ); |
1577 | err = -EINVAL; |
1578 | goto err_fr_rn_clk_bar; |
1579 | } |
1580 | |
1581 | mlxsw_pci->free_running_clock_offset = |
1582 | mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(buf: mbox); |
1583 | |
1584 | if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(buf: mbox) != 0) { |
1585 | dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n" ); |
1586 | err = -EINVAL; |
1587 | goto err_utc_sec_bar; |
1588 | } |
1589 | |
1590 | mlxsw_pci->utc_sec_offset = |
1591 | mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(buf: mbox); |
1592 | |
1593 | if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(buf: mbox) != 0) { |
1594 | dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n" ); |
1595 | err = -EINVAL; |
1596 | goto err_utc_nsec_bar; |
1597 | } |
1598 | |
1599 | mlxsw_pci->utc_nsec_offset = |
1600 | mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(buf: mbox); |
1601 | |
1602 | mlxsw_pci->lag_mode_support = |
1603 | mlxsw_cmd_mbox_query_fw_lag_mode_support_get(buf: mbox); |
1604 | num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(buf: mbox); |
1605 | err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); |
1606 | if (err) |
1607 | goto err_fw_area_init; |
1608 | |
1609 | err = mlxsw_pci_boardinfo(mlxsw_pci, mbox); |
1610 | if (err) |
1611 | goto err_boardinfo; |
1612 | |
1613 | err = mlxsw_core_resources_query(mlxsw_core, mbox, res); |
1614 | if (err) |
1615 | goto err_query_resources; |
1616 | |
1617 | if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) && |
1618 | MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2)) |
1619 | mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2; |
1620 | else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) && |
1621 | MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1)) |
1622 | mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1; |
1623 | else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) && |
1624 | MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) || |
1625 | !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) { |
1626 | mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0; |
1627 | } else { |
1628 | dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n" ); |
1629 | goto err_cqe_v_check; |
1630 | } |
1631 | |
1632 | err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); |
1633 | if (err) |
1634 | goto err_config_profile; |
1635 | |
1636 | /* Some resources depend on details of config_profile, such as unified |
1637 | * bridge model. Query the resources again to get correct values. |
1638 | */ |
1639 | err = mlxsw_core_resources_query(mlxsw_core, mbox, res); |
1640 | if (err) |
1641 | goto err_requery_resources; |
1642 | |
1643 | err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); |
1644 | if (err) |
1645 | goto err_aqs_init; |
1646 | |
1647 | err = request_irq(irq: pci_irq_vector(dev: pdev, nr: 0), |
1648 | handler: mlxsw_pci_eq_irq_handler, flags: 0, |
1649 | name: mlxsw_pci->bus_info.device_kind, dev: mlxsw_pci); |
1650 | if (err) { |
1651 | dev_err(&pdev->dev, "IRQ request failed\n" ); |
1652 | goto err_request_eq_irq; |
1653 | } |
1654 | |
1655 | goto mbox_put; |
1656 | |
1657 | err_request_eq_irq: |
1658 | mlxsw_pci_aqs_fini(mlxsw_pci); |
1659 | err_aqs_init: |
1660 | err_requery_resources: |
1661 | err_config_profile: |
1662 | err_cqe_v_check: |
1663 | err_query_resources: |
1664 | err_boardinfo: |
1665 | mlxsw_pci_fw_area_fini(mlxsw_pci); |
1666 | err_fw_area_init: |
1667 | err_utc_nsec_bar: |
1668 | err_utc_sec_bar: |
1669 | err_fr_rn_clk_bar: |
1670 | err_doorbell_page_bar: |
1671 | err_iface_rev: |
1672 | err_query_fw: |
1673 | mlxsw_pci_free_irq_vectors(mlxsw_pci); |
1674 | err_alloc_irq: |
1675 | err_sw_reset: |
1676 | mbox_put: |
1677 | mlxsw_cmd_mbox_free(mbox); |
1678 | return err; |
1679 | } |
1680 | |
1681 | static void mlxsw_pci_fini(void *bus_priv) |
1682 | { |
1683 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1684 | |
1685 | free_irq(pci_irq_vector(dev: mlxsw_pci->pdev, nr: 0), mlxsw_pci); |
1686 | mlxsw_pci_aqs_fini(mlxsw_pci); |
1687 | mlxsw_pci_fw_area_fini(mlxsw_pci); |
1688 | mlxsw_pci_free_irq_vectors(mlxsw_pci); |
1689 | } |
1690 | |
1691 | static struct mlxsw_pci_queue * |
1692 | mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, |
1693 | const struct mlxsw_tx_info *tx_info) |
1694 | { |
1695 | u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1; |
1696 | u8 sdqn; |
1697 | |
1698 | if (tx_info->is_emad) { |
1699 | sdqn = MLXSW_PCI_SDQ_EMAD_INDEX; |
1700 | } else { |
1701 | BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0); |
1702 | sdqn = 1 + (tx_info->local_port % ctl_sdq_count); |
1703 | } |
1704 | |
1705 | return mlxsw_pci_sdq_get(mlxsw_pci, q_num: sdqn); |
1706 | } |
1707 | |
1708 | static bool mlxsw_pci_skb_transmit_busy(void *bus_priv, |
1709 | const struct mlxsw_tx_info *tx_info) |
1710 | { |
1711 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1712 | struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); |
1713 | |
1714 | return !mlxsw_pci_queue_elem_info_producer_get(q); |
1715 | } |
1716 | |
1717 | static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, |
1718 | const struct mlxsw_tx_info *tx_info) |
1719 | { |
1720 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1721 | struct mlxsw_pci_queue *q; |
1722 | struct mlxsw_pci_queue_elem_info *elem_info; |
1723 | char *wqe; |
1724 | int i; |
1725 | int err; |
1726 | |
1727 | if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) { |
1728 | err = skb_linearize(skb); |
1729 | if (err) |
1730 | return err; |
1731 | } |
1732 | |
1733 | q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); |
1734 | spin_lock_bh(lock: &q->lock); |
1735 | elem_info = mlxsw_pci_queue_elem_info_producer_get(q); |
1736 | if (!elem_info) { |
1737 | /* queue is full */ |
1738 | err = -EAGAIN; |
1739 | goto unlock; |
1740 | } |
1741 | mlxsw_skb_cb(skb)->tx_info = *tx_info; |
1742 | elem_info->u.sdq.skb = skb; |
1743 | |
1744 | wqe = elem_info->elem; |
1745 | mlxsw_pci_wqe_c_set(buf: wqe, val: 1); /* always report completion */ |
1746 | mlxsw_pci_wqe_lp_set(buf: wqe, val: 0); |
1747 | mlxsw_pci_wqe_type_set(buf: wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); |
1748 | |
1749 | err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, index: 0, frag_data: skb->data, |
1750 | frag_len: skb_headlen(skb), direction: DMA_TO_DEVICE); |
1751 | if (err) |
1752 | goto unlock; |
1753 | |
1754 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1755 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1756 | |
1757 | err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, index: i + 1, |
1758 | frag_data: skb_frag_address(frag), |
1759 | frag_len: skb_frag_size(frag), |
1760 | direction: DMA_TO_DEVICE); |
1761 | if (err) |
1762 | goto unmap_frags; |
1763 | } |
1764 | |
1765 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) |
1766 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1767 | |
1768 | /* Set unused sq entries byte count to zero. */ |
1769 | for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) |
1770 | mlxsw_pci_wqe_byte_count_set(buf: wqe, index: i, val: 0); |
1771 | |
1772 | /* Everything is set up, ring producer doorbell to get HW going */ |
1773 | q->producer_counter++; |
1774 | mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); |
1775 | |
1776 | goto unlock; |
1777 | |
1778 | unmap_frags: |
1779 | for (; i >= 0; i--) |
1780 | mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, index: i, direction: DMA_TO_DEVICE); |
1781 | unlock: |
1782 | spin_unlock_bh(lock: &q->lock); |
1783 | return err; |
1784 | } |
1785 | |
1786 | static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, |
1787 | u32 in_mod, bool out_mbox_direct, |
1788 | char *in_mbox, size_t in_mbox_size, |
1789 | char *out_mbox, size_t out_mbox_size, |
1790 | u8 *p_status) |
1791 | { |
1792 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1793 | dma_addr_t in_mapaddr = 0, out_mapaddr = 0; |
1794 | bool evreq = mlxsw_pci->cmd.nopoll; |
1795 | unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); |
1796 | bool *p_wait_done = &mlxsw_pci->cmd.wait_done; |
1797 | int err; |
1798 | |
1799 | *p_status = MLXSW_CMD_STATUS_OK; |
1800 | |
1801 | err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock); |
1802 | if (err) |
1803 | return err; |
1804 | |
1805 | if (in_mbox) { |
1806 | memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); |
1807 | in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; |
1808 | } |
1809 | mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); |
1810 | mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); |
1811 | |
1812 | if (out_mbox) |
1813 | out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; |
1814 | mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); |
1815 | mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); |
1816 | |
1817 | mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); |
1818 | mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); |
1819 | |
1820 | *p_wait_done = false; |
1821 | |
1822 | wmb(); /* all needs to be written before we write control register */ |
1823 | mlxsw_pci_write32(mlxsw_pci, CIR_CTRL, |
1824 | MLXSW_PCI_CIR_CTRL_GO_BIT | |
1825 | (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) | |
1826 | (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) | |
1827 | opcode); |
1828 | |
1829 | if (!evreq) { |
1830 | unsigned long end; |
1831 | |
1832 | end = jiffies + timeout; |
1833 | do { |
1834 | u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); |
1835 | |
1836 | if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { |
1837 | *p_wait_done = true; |
1838 | *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; |
1839 | break; |
1840 | } |
1841 | cond_resched(); |
1842 | } while (time_before(jiffies, end)); |
1843 | } else { |
1844 | wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout); |
1845 | *p_status = mlxsw_pci->cmd.comp.status; |
1846 | } |
1847 | |
1848 | err = 0; |
1849 | if (*p_wait_done) { |
1850 | if (*p_status) |
1851 | err = -EIO; |
1852 | } else { |
1853 | err = -ETIMEDOUT; |
1854 | } |
1855 | |
1856 | if (!err && out_mbox && out_mbox_direct) { |
1857 | /* Some commands don't use output param as address to mailbox |
1858 | * but they store output directly into registers. In that case, |
1859 | * copy registers into mbox buffer. |
1860 | */ |
1861 | __be32 tmp; |
1862 | |
1863 | if (!evreq) { |
1864 | tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, |
1865 | CIR_OUT_PARAM_HI)); |
1866 | memcpy(out_mbox, &tmp, sizeof(tmp)); |
1867 | tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, |
1868 | CIR_OUT_PARAM_LO)); |
1869 | memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); |
1870 | } |
1871 | } else if (!err && out_mbox) { |
1872 | memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); |
1873 | } |
1874 | |
1875 | mutex_unlock(lock: &mlxsw_pci->cmd.lock); |
1876 | |
1877 | return err; |
1878 | } |
1879 | |
1880 | static u32 mlxsw_pci_read_frc_h(void *bus_priv) |
1881 | { |
1882 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1883 | u64 frc_offset_h; |
1884 | |
1885 | frc_offset_h = mlxsw_pci->free_running_clock_offset; |
1886 | return mlxsw_pci_read32_off(mlxsw_pci, off: frc_offset_h); |
1887 | } |
1888 | |
1889 | static u32 mlxsw_pci_read_frc_l(void *bus_priv) |
1890 | { |
1891 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1892 | u64 frc_offset_l; |
1893 | |
1894 | frc_offset_l = mlxsw_pci->free_running_clock_offset + 4; |
1895 | return mlxsw_pci_read32_off(mlxsw_pci, off: frc_offset_l); |
1896 | } |
1897 | |
1898 | static u32 mlxsw_pci_read_utc_sec(void *bus_priv) |
1899 | { |
1900 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1901 | |
1902 | return mlxsw_pci_read32_off(mlxsw_pci, off: mlxsw_pci->utc_sec_offset); |
1903 | } |
1904 | |
1905 | static u32 mlxsw_pci_read_utc_nsec(void *bus_priv) |
1906 | { |
1907 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1908 | |
1909 | return mlxsw_pci_read32_off(mlxsw_pci, off: mlxsw_pci->utc_nsec_offset); |
1910 | } |
1911 | |
1912 | static enum mlxsw_cmd_mbox_config_profile_lag_mode |
1913 | mlxsw_pci_lag_mode(void *bus_priv) |
1914 | { |
1915 | struct mlxsw_pci *mlxsw_pci = bus_priv; |
1916 | |
1917 | return mlxsw_pci->lag_mode; |
1918 | } |
1919 | |
1920 | static const struct mlxsw_bus mlxsw_pci_bus = { |
1921 | .kind = "pci" , |
1922 | .init = mlxsw_pci_init, |
1923 | .fini = mlxsw_pci_fini, |
1924 | .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, |
1925 | .skb_transmit = mlxsw_pci_skb_transmit, |
1926 | .cmd_exec = mlxsw_pci_cmd_exec, |
1927 | .read_frc_h = mlxsw_pci_read_frc_h, |
1928 | .read_frc_l = mlxsw_pci_read_frc_l, |
1929 | .read_utc_sec = mlxsw_pci_read_utc_sec, |
1930 | .read_utc_nsec = mlxsw_pci_read_utc_nsec, |
1931 | .lag_mode = mlxsw_pci_lag_mode, |
1932 | .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, |
1933 | }; |
1934 | |
1935 | static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) |
1936 | { |
1937 | int err; |
1938 | |
1939 | mutex_init(&mlxsw_pci->cmd.lock); |
1940 | init_waitqueue_head(&mlxsw_pci->cmd.wait); |
1941 | |
1942 | err = mlxsw_pci_mbox_alloc(mlxsw_pci, mbox: &mlxsw_pci->cmd.in_mbox); |
1943 | if (err) |
1944 | goto err_in_mbox_alloc; |
1945 | |
1946 | err = mlxsw_pci_mbox_alloc(mlxsw_pci, mbox: &mlxsw_pci->cmd.out_mbox); |
1947 | if (err) |
1948 | goto err_out_mbox_alloc; |
1949 | |
1950 | return 0; |
1951 | |
1952 | err_out_mbox_alloc: |
1953 | mlxsw_pci_mbox_free(mlxsw_pci, mbox: &mlxsw_pci->cmd.in_mbox); |
1954 | err_in_mbox_alloc: |
1955 | mutex_destroy(lock: &mlxsw_pci->cmd.lock); |
1956 | return err; |
1957 | } |
1958 | |
1959 | static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci) |
1960 | { |
1961 | mlxsw_pci_mbox_free(mlxsw_pci, mbox: &mlxsw_pci->cmd.out_mbox); |
1962 | mlxsw_pci_mbox_free(mlxsw_pci, mbox: &mlxsw_pci->cmd.in_mbox); |
1963 | mutex_destroy(lock: &mlxsw_pci->cmd.lock); |
1964 | } |
1965 | |
1966 | static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
1967 | { |
1968 | const char *driver_name = dev_driver_string(dev: &pdev->dev); |
1969 | struct mlxsw_pci *mlxsw_pci; |
1970 | int err; |
1971 | |
1972 | mlxsw_pci = kzalloc(size: sizeof(*mlxsw_pci), GFP_KERNEL); |
1973 | if (!mlxsw_pci) |
1974 | return -ENOMEM; |
1975 | |
1976 | err = pci_enable_device(dev: pdev); |
1977 | if (err) { |
1978 | dev_err(&pdev->dev, "pci_enable_device failed\n" ); |
1979 | goto err_pci_enable_device; |
1980 | } |
1981 | |
1982 | err = pci_request_regions(pdev, driver_name); |
1983 | if (err) { |
1984 | dev_err(&pdev->dev, "pci_request_regions failed\n" ); |
1985 | goto err_pci_request_regions; |
1986 | } |
1987 | |
1988 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
1989 | if (err) { |
1990 | err = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
1991 | if (err) { |
1992 | dev_err(&pdev->dev, "dma_set_mask failed\n" ); |
1993 | goto err_pci_set_dma_mask; |
1994 | } |
1995 | } |
1996 | |
1997 | if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) { |
1998 | dev_err(&pdev->dev, "invalid PCI region size\n" ); |
1999 | err = -EINVAL; |
2000 | goto err_pci_resource_len_check; |
2001 | } |
2002 | |
2003 | mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0), |
2004 | pci_resource_len(pdev, 0)); |
2005 | if (!mlxsw_pci->hw_addr) { |
2006 | dev_err(&pdev->dev, "ioremap failed\n" ); |
2007 | err = -EIO; |
2008 | goto err_ioremap; |
2009 | } |
2010 | pci_set_master(dev: pdev); |
2011 | |
2012 | mlxsw_pci->pdev = pdev; |
2013 | pci_set_drvdata(pdev, data: mlxsw_pci); |
2014 | |
2015 | err = mlxsw_pci_cmd_init(mlxsw_pci); |
2016 | if (err) |
2017 | goto err_pci_cmd_init; |
2018 | |
2019 | mlxsw_pci->bus_info.device_kind = driver_name; |
2020 | mlxsw_pci->bus_info.device_name = pci_name(pdev: mlxsw_pci->pdev); |
2021 | mlxsw_pci->bus_info.dev = &pdev->dev; |
2022 | mlxsw_pci->bus_info.read_clock_capable = true; |
2023 | mlxsw_pci->id = id; |
2024 | |
2025 | err = mlxsw_core_bus_device_register(mlxsw_bus_info: &mlxsw_pci->bus_info, |
2026 | mlxsw_bus: &mlxsw_pci_bus, bus_priv: mlxsw_pci, reload: false, |
2027 | NULL, NULL); |
2028 | if (err) { |
2029 | dev_err(&pdev->dev, "cannot register bus device\n" ); |
2030 | goto err_bus_device_register; |
2031 | } |
2032 | |
2033 | return 0; |
2034 | |
2035 | err_bus_device_register: |
2036 | mlxsw_pci_cmd_fini(mlxsw_pci); |
2037 | err_pci_cmd_init: |
2038 | iounmap(addr: mlxsw_pci->hw_addr); |
2039 | err_ioremap: |
2040 | err_pci_resource_len_check: |
2041 | err_pci_set_dma_mask: |
2042 | pci_release_regions(pdev); |
2043 | err_pci_request_regions: |
2044 | pci_disable_device(dev: pdev); |
2045 | err_pci_enable_device: |
2046 | kfree(objp: mlxsw_pci); |
2047 | return err; |
2048 | } |
2049 | |
2050 | static void mlxsw_pci_remove(struct pci_dev *pdev) |
2051 | { |
2052 | struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); |
2053 | |
2054 | mlxsw_core_bus_device_unregister(mlxsw_core: mlxsw_pci->core, reload: false); |
2055 | mlxsw_pci_cmd_fini(mlxsw_pci); |
2056 | iounmap(addr: mlxsw_pci->hw_addr); |
2057 | pci_release_regions(mlxsw_pci->pdev); |
2058 | pci_disable_device(dev: mlxsw_pci->pdev); |
2059 | kfree(objp: mlxsw_pci); |
2060 | } |
2061 | |
2062 | int mlxsw_pci_driver_register(struct pci_driver *pci_driver) |
2063 | { |
2064 | pci_driver->probe = mlxsw_pci_probe; |
2065 | pci_driver->remove = mlxsw_pci_remove; |
2066 | pci_driver->shutdown = mlxsw_pci_remove; |
2067 | return pci_register_driver(pci_driver); |
2068 | } |
2069 | EXPORT_SYMBOL(mlxsw_pci_driver_register); |
2070 | |
2071 | void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver) |
2072 | { |
2073 | pci_unregister_driver(dev: pci_driver); |
2074 | } |
2075 | EXPORT_SYMBOL(mlxsw_pci_driver_unregister); |
2076 | |
2077 | static int __init mlxsw_pci_module_init(void) |
2078 | { |
2079 | return 0; |
2080 | } |
2081 | |
2082 | static void __exit mlxsw_pci_module_exit(void) |
2083 | { |
2084 | } |
2085 | |
2086 | module_init(mlxsw_pci_module_init); |
2087 | module_exit(mlxsw_pci_module_exit); |
2088 | |
2089 | MODULE_LICENSE("Dual BSD/GPL" ); |
2090 | MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>" ); |
2091 | MODULE_DESCRIPTION("Mellanox switch PCI interface driver" ); |
2092 | |