1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Huawei HiNIC PCI Express Linux driver |
4 | * Copyright(c) 2017 Huawei Technologies Co., Ltd |
5 | */ |
6 | |
7 | #include <linux/kernel.h> |
8 | #include <linux/types.h> |
9 | #include <linux/errno.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/device.h> |
12 | #include <linux/workqueue.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/dma-mapping.h> |
16 | #include <linux/log2.h> |
17 | #include <asm/byteorder.h> |
18 | #include <asm/barrier.h> |
19 | |
20 | #include "hinic_hw_dev.h" |
21 | #include "hinic_hw_csr.h" |
22 | #include "hinic_hw_if.h" |
23 | #include "hinic_hw_eqs.h" |
24 | |
25 | #define HINIC_EQS_WQ_NAME "hinic_eqs" |
26 | |
27 | #define GET_EQ_NUM_PAGES(eq, pg_size) \ |
28 | (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) |
29 | |
30 | #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) |
31 | |
32 | #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ |
33 | HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ |
34 | HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) |
35 | |
36 | #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ |
37 | HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ |
38 | HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) |
39 | |
40 | #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ |
41 | HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ |
42 | HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) |
43 | |
44 | #define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ |
45 | HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ |
46 | HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) |
47 | |
48 | #define GET_EQ_ELEMENT(eq, idx) \ |
49 | ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ |
50 | (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) |
51 | |
52 | #define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \ |
53 | GET_EQ_ELEMENT(eq, idx)) |
54 | |
55 | #define GET_CEQ_ELEM(eq, idx) ((u32 *) \ |
56 | GET_EQ_ELEMENT(eq, idx)) |
57 | |
58 | #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) |
59 | |
60 | #define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx) |
61 | |
62 | #define PAGE_IN_4K(page_size) ((page_size) >> 12) |
63 | #define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size))) |
64 | |
65 | #define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) |
66 | #define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq))) |
67 | |
68 | #define EQ_MAX_PAGES 8 |
69 | |
70 | #define CEQE_TYPE_SHIFT 23 |
71 | #define CEQE_TYPE_MASK 0x7 |
72 | |
73 | #define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \ |
74 | CEQE_TYPE_MASK) |
75 | |
76 | #define CEQE_DATA_MASK 0x3FFFFFF |
77 | #define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK) |
78 | |
79 | #define aeq_to_aeqs(eq) \ |
80 | container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) |
81 | |
82 | #define ceq_to_ceqs(eq) \ |
83 | container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) |
84 | |
85 | #define work_to_aeq_work(work) \ |
86 | container_of(work, struct hinic_eq_work, work) |
87 | |
88 | #define DMA_ATTR_AEQ_DEFAULT 0 |
89 | #define DMA_ATTR_CEQ_DEFAULT 0 |
90 | |
91 | /* No coalescence */ |
92 | #define THRESH_CEQ_DEFAULT 0 |
93 | |
94 | enum eq_int_mode { |
95 | EQ_INT_MODE_ARMED, |
96 | EQ_INT_MODE_ALWAYS |
97 | }; |
98 | |
99 | enum eq_arm_state { |
100 | EQ_NOT_ARMED, |
101 | EQ_ARMED |
102 | }; |
103 | |
104 | /** |
105 | * hinic_aeq_register_hw_cb - register AEQ callback for specific event |
106 | * @aeqs: pointer to Async eqs of the chip |
107 | * @event: aeq event to register callback for it |
108 | * @handle: private data will be used by the callback |
109 | * @hwe_handler: callback function |
110 | **/ |
111 | void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, |
112 | enum hinic_aeq_type event, void *handle, |
113 | void (*hwe_handler)(void *handle, void *data, |
114 | u8 size)) |
115 | { |
116 | struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; |
117 | |
118 | hwe_cb->hwe_handler = hwe_handler; |
119 | hwe_cb->handle = handle; |
120 | hwe_cb->hwe_state = HINIC_EQE_ENABLED; |
121 | } |
122 | |
123 | /** |
124 | * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event |
125 | * @aeqs: pointer to Async eqs of the chip |
126 | * @event: aeq event to unregister callback for it |
127 | **/ |
128 | void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, |
129 | enum hinic_aeq_type event) |
130 | { |
131 | struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; |
132 | |
133 | hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED; |
134 | |
135 | while (hwe_cb->hwe_state & HINIC_EQE_RUNNING) |
136 | schedule(); |
137 | |
138 | hwe_cb->hwe_handler = NULL; |
139 | } |
140 | |
141 | /** |
142 | * hinic_ceq_register_cb - register CEQ callback for specific event |
143 | * @ceqs: pointer to Completion eqs part of the chip |
144 | * @event: ceq event to register callback for it |
145 | * @handle: private data will be used by the callback |
146 | * @handler: callback function |
147 | **/ |
148 | void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, |
149 | enum hinic_ceq_type event, void *handle, |
150 | void (*handler)(void *handle, u32 ceqe_data)) |
151 | { |
152 | struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; |
153 | |
154 | ceq_cb->handler = handler; |
155 | ceq_cb->handle = handle; |
156 | ceq_cb->ceqe_state = HINIC_EQE_ENABLED; |
157 | } |
158 | |
159 | /** |
160 | * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event |
161 | * @ceqs: pointer to Completion eqs part of the chip |
162 | * @event: ceq event to unregister callback for it |
163 | **/ |
164 | void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, |
165 | enum hinic_ceq_type event) |
166 | { |
167 | struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; |
168 | |
169 | ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED; |
170 | |
171 | while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING) |
172 | schedule(); |
173 | |
174 | ceq_cb->handler = NULL; |
175 | } |
176 | |
177 | static u8 eq_cons_idx_checksum_set(u32 val) |
178 | { |
179 | u8 checksum = 0; |
180 | int idx; |
181 | |
182 | for (idx = 0; idx < 32; idx += 4) |
183 | checksum ^= ((val >> idx) & 0xF); |
184 | |
185 | return (checksum & 0xF); |
186 | } |
187 | |
188 | /** |
189 | * eq_update_ci - update the HW cons idx of event queue |
190 | * @eq: the event queue to update the cons idx for |
191 | * @arm_state: the arm bit value of eq's interrupt |
192 | **/ |
193 | static void eq_update_ci(struct hinic_eq *eq, u32 arm_state) |
194 | { |
195 | u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); |
196 | |
197 | /* Read Modify Write */ |
198 | val = hinic_hwif_read_reg(hwif: eq->hwif, reg: addr); |
199 | |
200 | val = HINIC_EQ_CI_CLEAR(val, IDX) & |
201 | HINIC_EQ_CI_CLEAR(val, WRAPPED) & |
202 | HINIC_EQ_CI_CLEAR(val, INT_ARMED) & |
203 | HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM); |
204 | |
205 | val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | |
206 | HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | |
207 | HINIC_EQ_CI_SET(arm_state, INT_ARMED); |
208 | |
209 | val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); |
210 | |
211 | hinic_hwif_write_reg(hwif: eq->hwif, reg: addr, val); |
212 | } |
213 | |
214 | /** |
215 | * aeq_irq_handler - handler for the AEQ event |
216 | * @eq: the Async Event Queue that received the event |
217 | **/ |
218 | static void aeq_irq_handler(struct hinic_eq *eq) |
219 | { |
220 | struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); |
221 | struct hinic_hwif *hwif = aeqs->hwif; |
222 | struct pci_dev *pdev = hwif->pdev; |
223 | struct hinic_aeq_elem *aeqe_curr; |
224 | struct hinic_hw_event_cb *hwe_cb; |
225 | enum hinic_aeq_type event; |
226 | unsigned long eqe_state; |
227 | u32 aeqe_desc; |
228 | int i, size; |
229 | |
230 | for (i = 0; i < eq->q_len; i++) { |
231 | aeqe_curr = GET_CURR_AEQ_ELEM(eq); |
232 | |
233 | /* Data in HW is in Big endian Format */ |
234 | aeqe_desc = be32_to_cpu(aeqe_curr->desc); |
235 | |
236 | /* HW toggles the wrapped bit, when it adds eq element */ |
237 | if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) |
238 | break; |
239 | |
240 | dma_rmb(); |
241 | |
242 | event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); |
243 | if (event >= HINIC_MAX_AEQ_EVENTS) { |
244 | dev_err(&pdev->dev, "Unknown AEQ Event %d\n" , event); |
245 | return; |
246 | } |
247 | |
248 | if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { |
249 | hwe_cb = &aeqs->hwe_cb[event]; |
250 | |
251 | size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE); |
252 | |
253 | eqe_state = cmpxchg(&hwe_cb->hwe_state, |
254 | HINIC_EQE_ENABLED, |
255 | HINIC_EQE_ENABLED | |
256 | HINIC_EQE_RUNNING); |
257 | if (eqe_state == HINIC_EQE_ENABLED && |
258 | hwe_cb->hwe_handler) |
259 | hwe_cb->hwe_handler(hwe_cb->handle, |
260 | aeqe_curr->data, size); |
261 | else |
262 | dev_err(&pdev->dev, "Unhandled AEQ Event %d\n" , |
263 | event); |
264 | |
265 | hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING; |
266 | } |
267 | |
268 | eq->cons_idx++; |
269 | |
270 | if (eq->cons_idx == eq->q_len) { |
271 | eq->cons_idx = 0; |
272 | eq->wrapped = !eq->wrapped; |
273 | } |
274 | } |
275 | } |
276 | |
277 | /** |
278 | * ceq_event_handler - handler for the ceq events |
279 | * @ceqs: ceqs part of the chip |
280 | * @ceqe: ceq element that describes the event |
281 | **/ |
282 | static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) |
283 | { |
284 | struct hinic_hwif *hwif = ceqs->hwif; |
285 | struct pci_dev *pdev = hwif->pdev; |
286 | struct hinic_ceq_cb *ceq_cb; |
287 | enum hinic_ceq_type event; |
288 | unsigned long eqe_state; |
289 | |
290 | event = CEQE_TYPE(ceqe); |
291 | if (event >= HINIC_MAX_CEQ_EVENTS) { |
292 | dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n" , event); |
293 | return; |
294 | } |
295 | |
296 | ceq_cb = &ceqs->ceq_cb[event]; |
297 | |
298 | eqe_state = cmpxchg(&ceq_cb->ceqe_state, |
299 | HINIC_EQE_ENABLED, |
300 | HINIC_EQE_ENABLED | HINIC_EQE_RUNNING); |
301 | |
302 | if (eqe_state == HINIC_EQE_ENABLED && ceq_cb->handler) |
303 | ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe)); |
304 | else |
305 | dev_err(&pdev->dev, "Unhandled CEQ Event %d\n" , event); |
306 | |
307 | ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING; |
308 | } |
309 | |
310 | /** |
311 | * ceq_irq_handler - handler for the CEQ event |
312 | * @eq: the Completion Event Queue that received the event |
313 | **/ |
314 | static void ceq_irq_handler(struct hinic_eq *eq) |
315 | { |
316 | struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); |
317 | u32 ceqe; |
318 | int i; |
319 | |
320 | for (i = 0; i < eq->q_len; i++) { |
321 | ceqe = *(GET_CURR_CEQ_ELEM(eq)); |
322 | |
323 | /* Data in HW is in Big endian Format */ |
324 | ceqe = be32_to_cpu(ceqe); |
325 | |
326 | /* HW toggles the wrapped bit, when it adds eq element event */ |
327 | if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) |
328 | break; |
329 | |
330 | ceq_event_handler(ceqs, ceqe); |
331 | |
332 | eq->cons_idx++; |
333 | |
334 | if (eq->cons_idx == eq->q_len) { |
335 | eq->cons_idx = 0; |
336 | eq->wrapped = !eq->wrapped; |
337 | } |
338 | } |
339 | } |
340 | |
341 | /** |
342 | * eq_irq_handler - handler for the EQ event |
343 | * @data: the Event Queue that received the event |
344 | **/ |
345 | static void eq_irq_handler(void *data) |
346 | { |
347 | struct hinic_eq *eq = data; |
348 | |
349 | if (eq->type == HINIC_AEQ) |
350 | aeq_irq_handler(eq); |
351 | else if (eq->type == HINIC_CEQ) |
352 | ceq_irq_handler(eq); |
353 | |
354 | eq_update_ci(eq, arm_state: EQ_ARMED); |
355 | } |
356 | |
357 | /** |
358 | * eq_irq_work - the work of the EQ that received the event |
359 | * @work: the work struct that is associated with the EQ |
360 | **/ |
361 | static void eq_irq_work(struct work_struct *work) |
362 | { |
363 | struct hinic_eq_work *aeq_work = work_to_aeq_work(work); |
364 | struct hinic_eq *aeq; |
365 | |
366 | aeq = aeq_work->data; |
367 | eq_irq_handler(data: aeq); |
368 | } |
369 | |
370 | /** |
371 | * ceq_tasklet - the tasklet of the EQ that received the event |
372 | * @t: the tasklet struct pointer |
373 | **/ |
374 | static void ceq_tasklet(struct tasklet_struct *t) |
375 | { |
376 | struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet); |
377 | |
378 | eq_irq_handler(data: ceq); |
379 | } |
380 | |
381 | /** |
382 | * aeq_interrupt - aeq interrupt handler |
383 | * @irq: irq number |
384 | * @data: the Async Event Queue that collected the event |
385 | **/ |
386 | static irqreturn_t aeq_interrupt(int irq, void *data) |
387 | { |
388 | struct hinic_eq_work *aeq_work; |
389 | struct hinic_eq *aeq = data; |
390 | struct hinic_aeqs *aeqs; |
391 | |
392 | /* clear resend timer cnt register */ |
393 | hinic_msix_attr_cnt_clear(hwif: aeq->hwif, msix_index: aeq->msix_entry.entry); |
394 | |
395 | aeq_work = &aeq->aeq_work; |
396 | aeq_work->data = aeq; |
397 | |
398 | aeqs = aeq_to_aeqs(aeq); |
399 | queue_work(wq: aeqs->workq, work: &aeq_work->work); |
400 | |
401 | return IRQ_HANDLED; |
402 | } |
403 | |
404 | /** |
405 | * ceq_interrupt - ceq interrupt handler |
406 | * @irq: irq number |
407 | * @data: the Completion Event Queue that collected the event |
408 | **/ |
409 | static irqreturn_t ceq_interrupt(int irq, void *data) |
410 | { |
411 | struct hinic_eq *ceq = data; |
412 | |
413 | /* clear resend timer cnt register */ |
414 | hinic_msix_attr_cnt_clear(hwif: ceq->hwif, msix_index: ceq->msix_entry.entry); |
415 | |
416 | tasklet_schedule(t: &ceq->ceq_tasklet); |
417 | |
418 | return IRQ_HANDLED; |
419 | } |
420 | |
421 | static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr) |
422 | { |
423 | struct msix_entry *msix_entry = &eq->msix_entry; |
424 | enum hinic_eq_type type = eq->type; |
425 | u32 val, ctrl0; |
426 | |
427 | if (type == HINIC_AEQ) { |
428 | /* RMW Ctrl0 */ |
429 | addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); |
430 | |
431 | val = hinic_hwif_read_reg(hwif: eq->hwif, reg: addr); |
432 | |
433 | val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) & |
434 | HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & |
435 | HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & |
436 | HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE); |
437 | |
438 | ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) | |
439 | HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) | |
440 | HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), |
441 | PCI_INTF_IDX) | |
442 | HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE); |
443 | |
444 | val |= ctrl0; |
445 | } else { |
446 | /* RMW Ctrl0 */ |
447 | addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); |
448 | |
449 | val = hinic_hwif_read_reg(hwif: eq->hwif, reg: addr); |
450 | |
451 | val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) & |
452 | HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) & |
453 | HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) & |
454 | HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & |
455 | HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE); |
456 | |
457 | ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) | |
458 | HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) | |
459 | HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) | |
460 | HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), |
461 | PCI_INTF_IDX) | |
462 | HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE); |
463 | |
464 | val |= ctrl0; |
465 | } |
466 | return val; |
467 | } |
468 | |
469 | static void set_ctrl0(struct hinic_eq *eq) |
470 | { |
471 | u32 val, addr; |
472 | |
473 | if (eq->type == HINIC_AEQ) |
474 | addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); |
475 | else |
476 | addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); |
477 | |
478 | val = get_ctrl0_val(eq, addr); |
479 | |
480 | hinic_hwif_write_reg(hwif: eq->hwif, reg: addr, val); |
481 | } |
482 | |
483 | static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr) |
484 | { |
485 | u32 page_size_val, elem_size, val, ctrl1; |
486 | enum hinic_eq_type type = eq->type; |
487 | |
488 | if (type == HINIC_AEQ) { |
489 | /* RMW Ctrl1 */ |
490 | addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); |
491 | |
492 | page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); |
493 | elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); |
494 | |
495 | val = hinic_hwif_read_reg(hwif: eq->hwif, reg: addr); |
496 | |
497 | val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) & |
498 | HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) & |
499 | HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE); |
500 | |
501 | ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) | |
502 | HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | |
503 | HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); |
504 | |
505 | val |= ctrl1; |
506 | } else { |
507 | /* RMW Ctrl1 */ |
508 | addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); |
509 | |
510 | page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); |
511 | |
512 | val = hinic_hwif_read_reg(hwif: eq->hwif, reg: addr); |
513 | |
514 | val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) & |
515 | HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE); |
516 | |
517 | ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) | |
518 | HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); |
519 | |
520 | val |= ctrl1; |
521 | } |
522 | return val; |
523 | } |
524 | |
525 | static void set_ctrl1(struct hinic_eq *eq) |
526 | { |
527 | u32 addr, val; |
528 | |
529 | if (eq->type == HINIC_AEQ) |
530 | addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); |
531 | else |
532 | addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); |
533 | |
534 | val = get_ctrl1_val(eq, addr); |
535 | |
536 | hinic_hwif_write_reg(hwif: eq->hwif, reg: addr, val); |
537 | } |
538 | |
539 | static int set_ceq_ctrl_reg(struct hinic_eq *eq) |
540 | { |
541 | struct hinic_ceq_ctrl_reg ceq_ctrl = {0}; |
542 | struct hinic_hwdev *hwdev = eq->hwdev; |
543 | u16 out_size = sizeof(ceq_ctrl); |
544 | u16 in_size = sizeof(ceq_ctrl); |
545 | struct hinic_pfhwdev *pfhwdev; |
546 | u32 addr; |
547 | int err; |
548 | |
549 | pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); |
550 | |
551 | addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); |
552 | ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr); |
553 | addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); |
554 | ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr); |
555 | |
556 | ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); |
557 | ceq_ctrl.q_id = eq->q_id; |
558 | |
559 | err = hinic_msg_to_mgmt(pf_to_mgmt: &pfhwdev->pf_to_mgmt, mod: HINIC_MOD_COMM, |
560 | cmd: HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, |
561 | buf_in: &ceq_ctrl, in_size, |
562 | buf_out: &ceq_ctrl, out_size: &out_size, sync: HINIC_MGMT_MSG_SYNC); |
563 | if (err || !out_size || ceq_ctrl.status) { |
564 | dev_err(&hwdev->hwif->pdev->dev, |
565 | "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n" , |
566 | eq->q_id, err, ceq_ctrl.status, out_size); |
567 | return -EFAULT; |
568 | } |
569 | |
570 | return 0; |
571 | } |
572 | |
573 | /** |
574 | * set_eq_ctrls - setting eq's ctrl registers |
575 | * @eq: the Event Queue for setting |
576 | **/ |
577 | static int set_eq_ctrls(struct hinic_eq *eq) |
578 | { |
579 | if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ) |
580 | return set_ceq_ctrl_reg(eq); |
581 | |
582 | set_ctrl0(eq); |
583 | set_ctrl1(eq); |
584 | return 0; |
585 | } |
586 | |
587 | /** |
588 | * aeq_elements_init - initialize all the elements in the aeq |
589 | * @eq: the Async Event Queue |
590 | * @init_val: value to initialize the elements with it |
591 | **/ |
592 | static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) |
593 | { |
594 | struct hinic_aeq_elem *aeqe; |
595 | int i; |
596 | |
597 | for (i = 0; i < eq->q_len; i++) { |
598 | aeqe = GET_AEQ_ELEM(eq, i); |
599 | aeqe->desc = cpu_to_be32(init_val); |
600 | } |
601 | |
602 | wmb(); /* Write the initilzation values */ |
603 | } |
604 | |
605 | /** |
606 | * ceq_elements_init - Initialize all the elements in the ceq |
607 | * @eq: the event queue |
608 | * @init_val: value to init with it the elements |
609 | **/ |
610 | static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) |
611 | { |
612 | u32 *ceqe; |
613 | int i; |
614 | |
615 | for (i = 0; i < eq->q_len; i++) { |
616 | ceqe = GET_CEQ_ELEM(eq, i); |
617 | *(ceqe) = cpu_to_be32(init_val); |
618 | } |
619 | |
620 | wmb(); /* Write the initilzation values */ |
621 | } |
622 | |
623 | /** |
624 | * alloc_eq_pages - allocate the pages for the queue |
625 | * @eq: the event queue |
626 | * |
627 | * Return 0 - Success, Negative - Failure |
628 | **/ |
629 | static int alloc_eq_pages(struct hinic_eq *eq) |
630 | { |
631 | struct hinic_hwif *hwif = eq->hwif; |
632 | struct pci_dev *pdev = hwif->pdev; |
633 | u32 init_val, addr, val; |
634 | int err, pg; |
635 | |
636 | eq->dma_addr = devm_kcalloc(dev: &pdev->dev, n: eq->num_pages, |
637 | size: sizeof(*eq->dma_addr), GFP_KERNEL); |
638 | if (!eq->dma_addr) |
639 | return -ENOMEM; |
640 | |
641 | eq->virt_addr = devm_kcalloc(dev: &pdev->dev, n: eq->num_pages, |
642 | size: sizeof(*eq->virt_addr), GFP_KERNEL); |
643 | if (!eq->virt_addr) { |
644 | err = -ENOMEM; |
645 | goto err_virt_addr_alloc; |
646 | } |
647 | |
648 | for (pg = 0; pg < eq->num_pages; pg++) { |
649 | eq->virt_addr[pg] = dma_alloc_coherent(dev: &pdev->dev, |
650 | size: eq->page_size, |
651 | dma_handle: &eq->dma_addr[pg], |
652 | GFP_KERNEL); |
653 | if (!eq->virt_addr[pg]) { |
654 | err = -ENOMEM; |
655 | goto err_dma_alloc; |
656 | } |
657 | |
658 | addr = EQ_HI_PHYS_ADDR_REG(eq, pg); |
659 | val = upper_32_bits(eq->dma_addr[pg]); |
660 | |
661 | hinic_hwif_write_reg(hwif, reg: addr, val); |
662 | |
663 | addr = EQ_LO_PHYS_ADDR_REG(eq, pg); |
664 | val = lower_32_bits(eq->dma_addr[pg]); |
665 | |
666 | hinic_hwif_write_reg(hwif, reg: addr, val); |
667 | } |
668 | |
669 | init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED); |
670 | |
671 | if (eq->type == HINIC_AEQ) |
672 | aeq_elements_init(eq, init_val); |
673 | else if (eq->type == HINIC_CEQ) |
674 | ceq_elements_init(eq, init_val); |
675 | |
676 | return 0; |
677 | |
678 | err_dma_alloc: |
679 | while (--pg >= 0) |
680 | dma_free_coherent(dev: &pdev->dev, size: eq->page_size, |
681 | cpu_addr: eq->virt_addr[pg], |
682 | dma_handle: eq->dma_addr[pg]); |
683 | |
684 | devm_kfree(dev: &pdev->dev, p: eq->virt_addr); |
685 | |
686 | err_virt_addr_alloc: |
687 | devm_kfree(dev: &pdev->dev, p: eq->dma_addr); |
688 | return err; |
689 | } |
690 | |
691 | /** |
692 | * free_eq_pages - free the pages of the queue |
693 | * @eq: the Event Queue |
694 | **/ |
695 | static void free_eq_pages(struct hinic_eq *eq) |
696 | { |
697 | struct hinic_hwif *hwif = eq->hwif; |
698 | struct pci_dev *pdev = hwif->pdev; |
699 | int pg; |
700 | |
701 | for (pg = 0; pg < eq->num_pages; pg++) |
702 | dma_free_coherent(dev: &pdev->dev, size: eq->page_size, |
703 | cpu_addr: eq->virt_addr[pg], |
704 | dma_handle: eq->dma_addr[pg]); |
705 | |
706 | devm_kfree(dev: &pdev->dev, p: eq->virt_addr); |
707 | devm_kfree(dev: &pdev->dev, p: eq->dma_addr); |
708 | } |
709 | |
710 | /** |
711 | * init_eq - initialize Event Queue |
712 | * @eq: the event queue |
713 | * @hwif: the HW interface of a PCI function device |
714 | * @type: the type of the event queue, aeq or ceq |
715 | * @q_id: Queue id number |
716 | * @q_len: the number of EQ elements |
717 | * @page_size: the page size of the pages in the event queue |
718 | * @entry: msix entry associated with the event queue |
719 | * |
720 | * Return 0 - Success, Negative - Failure |
721 | **/ |
722 | static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, |
723 | enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, |
724 | struct msix_entry entry) |
725 | { |
726 | struct pci_dev *pdev = hwif->pdev; |
727 | int err; |
728 | |
729 | eq->hwif = hwif; |
730 | eq->type = type; |
731 | eq->q_id = q_id; |
732 | eq->q_len = q_len; |
733 | eq->page_size = page_size; |
734 | |
735 | /* Clear PI and CI, also clear the ARM bit */ |
736 | hinic_hwif_write_reg(hwif: eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), val: 0); |
737 | hinic_hwif_write_reg(hwif: eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), val: 0); |
738 | |
739 | eq->cons_idx = 0; |
740 | eq->wrapped = 0; |
741 | |
742 | if (type == HINIC_AEQ) { |
743 | eq->elem_size = HINIC_AEQE_SIZE; |
744 | } else if (type == HINIC_CEQ) { |
745 | eq->elem_size = HINIC_CEQE_SIZE; |
746 | } else { |
747 | dev_err(&pdev->dev, "Invalid EQ type\n" ); |
748 | return -EINVAL; |
749 | } |
750 | |
751 | eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); |
752 | eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size); |
753 | |
754 | eq->msix_entry = entry; |
755 | |
756 | if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { |
757 | dev_err(&pdev->dev, "num elements in eq page != power of 2\n" ); |
758 | return -EINVAL; |
759 | } |
760 | |
761 | if (eq->num_pages > EQ_MAX_PAGES) { |
762 | dev_err(&pdev->dev, "too many pages for eq\n" ); |
763 | return -EINVAL; |
764 | } |
765 | |
766 | err = set_eq_ctrls(eq); |
767 | if (err) { |
768 | dev_err(&pdev->dev, "Failed to set eq ctrls\n" ); |
769 | return err; |
770 | } |
771 | |
772 | eq_update_ci(eq, arm_state: EQ_ARMED); |
773 | |
774 | err = alloc_eq_pages(eq); |
775 | if (err) { |
776 | dev_err(&pdev->dev, "Failed to allocate pages for eq\n" ); |
777 | return err; |
778 | } |
779 | |
780 | if (type == HINIC_AEQ) { |
781 | struct hinic_eq_work *aeq_work = &eq->aeq_work; |
782 | |
783 | INIT_WORK(&aeq_work->work, eq_irq_work); |
784 | } else if (type == HINIC_CEQ) { |
785 | tasklet_setup(t: &eq->ceq_tasklet, callback: ceq_tasklet); |
786 | } |
787 | |
788 | /* set the attributes of the msix entry */ |
789 | hinic_msix_attr_set(hwif: eq->hwif, msix_index: eq->msix_entry.entry, |
790 | HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT, |
791 | HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT, |
792 | HINIC_EQ_MSIX_LLI_TIMER_DEFAULT, |
793 | HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT, |
794 | HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT); |
795 | |
796 | if (type == HINIC_AEQ) { |
797 | snprintf(buf: eq->irq_name, size: sizeof(eq->irq_name), fmt: "hinic_aeq%d@pci:%s" , eq->q_id, |
798 | pci_name(pdev)); |
799 | err = request_irq(irq: entry.vector, handler: aeq_interrupt, flags: 0, name: eq->irq_name, dev: eq); |
800 | } else if (type == HINIC_CEQ) { |
801 | snprintf(buf: eq->irq_name, size: sizeof(eq->irq_name), fmt: "hinic_ceq%d@pci:%s" , eq->q_id, |
802 | pci_name(pdev)); |
803 | err = request_irq(irq: entry.vector, handler: ceq_interrupt, flags: 0, name: eq->irq_name, dev: eq); |
804 | } |
805 | |
806 | if (err) { |
807 | dev_err(&pdev->dev, "Failed to request irq for the EQ\n" ); |
808 | goto err_req_irq; |
809 | } |
810 | |
811 | return 0; |
812 | |
813 | err_req_irq: |
814 | free_eq_pages(eq); |
815 | return err; |
816 | } |
817 | |
818 | /** |
819 | * remove_eq - remove Event Queue |
820 | * @eq: the event queue |
821 | **/ |
822 | static void remove_eq(struct hinic_eq *eq) |
823 | { |
824 | hinic_set_msix_state(hwif: eq->hwif, msix_idx: eq->msix_entry.entry, |
825 | flag: HINIC_MSIX_DISABLE); |
826 | free_irq(eq->msix_entry.vector, eq); |
827 | |
828 | if (eq->type == HINIC_AEQ) { |
829 | struct hinic_eq_work *aeq_work = &eq->aeq_work; |
830 | |
831 | cancel_work_sync(work: &aeq_work->work); |
832 | /* clear aeq_len to avoid hw access host memory */ |
833 | hinic_hwif_write_reg(hwif: eq->hwif, |
834 | HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), val: 0); |
835 | } else if (eq->type == HINIC_CEQ) { |
836 | tasklet_kill(t: &eq->ceq_tasklet); |
837 | /* clear ceq_len to avoid hw access host memory */ |
838 | hinic_hwif_write_reg(hwif: eq->hwif, |
839 | HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), val: 0); |
840 | } |
841 | |
842 | /* update cons_idx to avoid invalid interrupt */ |
843 | eq->cons_idx = hinic_hwif_read_reg(hwif: eq->hwif, EQ_PROD_IDX_REG_ADDR(eq)); |
844 | eq_update_ci(eq, arm_state: EQ_NOT_ARMED); |
845 | |
846 | free_eq_pages(eq); |
847 | } |
848 | |
849 | /** |
850 | * hinic_aeqs_init - initialize all the aeqs |
851 | * @aeqs: pointer to Async eqs of the chip |
852 | * @hwif: the HW interface of a PCI function device |
853 | * @num_aeqs: number of AEQs |
854 | * @q_len: number of EQ elements |
855 | * @page_size: the page size of the pages in the event queue |
856 | * @msix_entries: msix entries associated with the event queues |
857 | * |
858 | * Return 0 - Success, negative - Failure |
859 | **/ |
860 | int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, |
861 | int num_aeqs, u32 q_len, u32 page_size, |
862 | struct msix_entry *msix_entries) |
863 | { |
864 | struct pci_dev *pdev = hwif->pdev; |
865 | int err, i, q_id; |
866 | |
867 | aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); |
868 | if (!aeqs->workq) |
869 | return -ENOMEM; |
870 | |
871 | aeqs->hwif = hwif; |
872 | aeqs->num_aeqs = num_aeqs; |
873 | |
874 | for (q_id = 0; q_id < num_aeqs; q_id++) { |
875 | err = init_eq(eq: &aeqs->aeq[q_id], hwif, type: HINIC_AEQ, q_id, q_len, |
876 | page_size, entry: msix_entries[q_id]); |
877 | if (err) { |
878 | dev_err(&pdev->dev, "Failed to init aeq %d\n" , q_id); |
879 | goto err_init_aeq; |
880 | } |
881 | } |
882 | |
883 | return 0; |
884 | |
885 | err_init_aeq: |
886 | for (i = 0; i < q_id; i++) |
887 | remove_eq(eq: &aeqs->aeq[i]); |
888 | |
889 | destroy_workqueue(wq: aeqs->workq); |
890 | return err; |
891 | } |
892 | |
893 | /** |
894 | * hinic_aeqs_free - free all the aeqs |
895 | * @aeqs: pointer to Async eqs of the chip |
896 | **/ |
897 | void hinic_aeqs_free(struct hinic_aeqs *aeqs) |
898 | { |
899 | int q_id; |
900 | |
901 | for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++) |
902 | remove_eq(eq: &aeqs->aeq[q_id]); |
903 | |
904 | destroy_workqueue(wq: aeqs->workq); |
905 | } |
906 | |
907 | /** |
908 | * hinic_ceqs_init - init all the ceqs |
909 | * @ceqs: ceqs part of the chip |
910 | * @hwif: the hardware interface of a pci function device |
911 | * @num_ceqs: number of CEQs |
912 | * @q_len: number of EQ elements |
913 | * @page_size: the page size of the event queue |
914 | * @msix_entries: msix entries associated with the event queues |
915 | * |
916 | * Return 0 - Success, Negative - Failure |
917 | **/ |
918 | int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, |
919 | int num_ceqs, u32 q_len, u32 page_size, |
920 | struct msix_entry *msix_entries) |
921 | { |
922 | struct pci_dev *pdev = hwif->pdev; |
923 | int i, q_id, err; |
924 | |
925 | ceqs->hwif = hwif; |
926 | ceqs->num_ceqs = num_ceqs; |
927 | |
928 | for (q_id = 0; q_id < num_ceqs; q_id++) { |
929 | ceqs->ceq[q_id].hwdev = ceqs->hwdev; |
930 | err = init_eq(eq: &ceqs->ceq[q_id], hwif, type: HINIC_CEQ, q_id, q_len, |
931 | page_size, entry: msix_entries[q_id]); |
932 | if (err) { |
933 | dev_err(&pdev->dev, "Failed to init ceq %d\n" , q_id); |
934 | goto err_init_ceq; |
935 | } |
936 | } |
937 | |
938 | return 0; |
939 | |
940 | err_init_ceq: |
941 | for (i = 0; i < q_id; i++) |
942 | remove_eq(eq: &ceqs->ceq[i]); |
943 | |
944 | return err; |
945 | } |
946 | |
947 | /** |
948 | * hinic_ceqs_free - free all the ceqs |
949 | * @ceqs: ceqs part of the chip |
950 | **/ |
951 | void hinic_ceqs_free(struct hinic_ceqs *ceqs) |
952 | { |
953 | int q_id; |
954 | |
955 | for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) |
956 | remove_eq(eq: &ceqs->ceq[q_id]); |
957 | } |
958 | |
959 | void hinic_dump_ceq_info(struct hinic_hwdev *hwdev) |
960 | { |
961 | struct hinic_eq *eq = NULL; |
962 | u32 addr, ci, pi; |
963 | int q_id; |
964 | |
965 | for (q_id = 0; q_id < hwdev->func_to_io.ceqs.num_ceqs; q_id++) { |
966 | eq = &hwdev->func_to_io.ceqs.ceq[q_id]; |
967 | addr = EQ_CONS_IDX_REG_ADDR(eq); |
968 | ci = hinic_hwif_read_reg(hwif: hwdev->hwif, reg: addr); |
969 | addr = EQ_PROD_IDX_REG_ADDR(eq); |
970 | pi = hinic_hwif_read_reg(hwif: hwdev->hwif, reg: addr); |
971 | dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n" , |
972 | q_id, ci, eq->cons_idx, pi, |
973 | eq->ceq_tasklet.state, |
974 | eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq)))); |
975 | } |
976 | } |
977 | |
978 | void hinic_dump_aeq_info(struct hinic_hwdev *hwdev) |
979 | { |
980 | struct hinic_aeq_elem *aeqe_pos = NULL; |
981 | struct hinic_eq *eq = NULL; |
982 | u32 addr, ci, pi; |
983 | int q_id; |
984 | |
985 | for (q_id = 0; q_id < hwdev->aeqs.num_aeqs; q_id++) { |
986 | eq = &hwdev->aeqs.aeq[q_id]; |
987 | addr = EQ_CONS_IDX_REG_ADDR(eq); |
988 | ci = hinic_hwif_read_reg(hwif: hwdev->hwif, reg: addr); |
989 | addr = EQ_PROD_IDX_REG_ADDR(eq); |
990 | pi = hinic_hwif_read_reg(hwif: hwdev->hwif, reg: addr); |
991 | aeqe_pos = GET_CURR_AEQ_ELEM(eq); |
992 | dev_err(&hwdev->hwif->pdev->dev, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n" , |
993 | q_id, ci, pi, work_busy(&eq->aeq_work.work), |
994 | eq->wrapped, be32_to_cpu(aeqe_pos->desc)); |
995 | } |
996 | } |
997 | |