1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) |
2 | /* QLogic qed NIC Driver |
3 | * Copyright (c) 2015-2017 QLogic Corporation |
4 | * Copyright (c) 2019-2020 Marvell International Ltd. |
5 | */ |
6 | |
7 | #include <linux/types.h> |
8 | #include <asm/byteorder.h> |
9 | #include <linux/io.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/errno.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/mutex.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/string.h> |
18 | #include <linux/vmalloc.h> |
19 | #include <linux/etherdevice.h> |
20 | #include <linux/qed/qed_chain.h> |
21 | #include <linux/qed/qed_if.h> |
22 | #include "qed.h" |
23 | #include "qed_cxt.h" |
24 | #include "qed_dcbx.h" |
25 | #include "qed_dev_api.h" |
26 | #include "qed_fcoe.h" |
27 | #include "qed_hsi.h" |
28 | #include "qed_iro_hsi.h" |
29 | #include "qed_hw.h" |
30 | #include "qed_init_ops.h" |
31 | #include "qed_int.h" |
32 | #include "qed_iscsi.h" |
33 | #include "qed_ll2.h" |
34 | #include "qed_mcp.h" |
35 | #include "qed_ooo.h" |
36 | #include "qed_reg_addr.h" |
37 | #include "qed_sp.h" |
38 | #include "qed_sriov.h" |
39 | #include "qed_vf.h" |
40 | #include "qed_rdma.h" |
41 | #include "qed_nvmetcp.h" |
42 | |
43 | static DEFINE_SPINLOCK(qm_lock); |
44 | |
45 | /******************** Doorbell Recovery *******************/ |
46 | /* The doorbell recovery mechanism consists of a list of entries which represent |
47 | * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each |
48 | * entity needs to register with the mechanism and provide the parameters |
49 | * describing it's doorbell, including a location where last used doorbell data |
50 | * can be found. The doorbell execute function will traverse the list and |
51 | * doorbell all of the registered entries. |
52 | */ |
53 | struct qed_db_recovery_entry { |
54 | struct list_head list_entry; |
55 | void __iomem *db_addr; |
56 | void *db_data; |
57 | enum qed_db_rec_width db_width; |
58 | enum qed_db_rec_space db_space; |
59 | u8 hwfn_idx; |
60 | }; |
61 | |
62 | /* Display a single doorbell recovery entry */ |
63 | static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, |
64 | struct qed_db_recovery_entry *db_entry, |
65 | char *action) |
66 | { |
67 | DP_VERBOSE(p_hwfn, |
68 | QED_MSG_SPQ, |
69 | "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n" , |
70 | action, |
71 | db_entry, |
72 | db_entry->db_addr, |
73 | db_entry->db_data, |
74 | db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b" , |
75 | db_entry->db_space == DB_REC_USER ? "user" : "kernel" , |
76 | db_entry->hwfn_idx); |
77 | } |
78 | |
79 | /* Doorbell address sanity (address within doorbell bar range) */ |
80 | static bool qed_db_rec_sanity(struct qed_dev *cdev, |
81 | void __iomem *db_addr, |
82 | enum qed_db_rec_width db_width, |
83 | void *db_data) |
84 | { |
85 | u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; |
86 | |
87 | /* Make sure doorbell address is within the doorbell bar */ |
88 | if (db_addr < cdev->doorbells || |
89 | (u8 __iomem *)db_addr + width > |
90 | (u8 __iomem *)cdev->doorbells + cdev->db_size) { |
91 | WARN(true, |
92 | "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n" , |
93 | db_addr, |
94 | cdev->doorbells, |
95 | (u8 __iomem *)cdev->doorbells + cdev->db_size); |
96 | return false; |
97 | } |
98 | |
99 | /* ake sure doorbell data pointer is not null */ |
100 | if (!db_data) { |
101 | WARN(true, "Illegal doorbell data pointer: %p" , db_data); |
102 | return false; |
103 | } |
104 | |
105 | return true; |
106 | } |
107 | |
108 | /* Find hwfn according to the doorbell address */ |
109 | static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev, |
110 | void __iomem *db_addr) |
111 | { |
112 | struct qed_hwfn *p_hwfn; |
113 | |
114 | /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ |
115 | if (cdev->num_hwfns > 1) |
116 | p_hwfn = db_addr < cdev->hwfns[1].doorbells ? |
117 | &cdev->hwfns[0] : &cdev->hwfns[1]; |
118 | else |
119 | p_hwfn = QED_LEADING_HWFN(cdev); |
120 | |
121 | return p_hwfn; |
122 | } |
123 | |
124 | /* Add a new entry to the doorbell recovery mechanism */ |
125 | int qed_db_recovery_add(struct qed_dev *cdev, |
126 | void __iomem *db_addr, |
127 | void *db_data, |
128 | enum qed_db_rec_width db_width, |
129 | enum qed_db_rec_space db_space) |
130 | { |
131 | struct qed_db_recovery_entry *db_entry; |
132 | struct qed_hwfn *p_hwfn; |
133 | |
134 | /* Shortcircuit VFs, for now */ |
135 | if (IS_VF(cdev)) { |
136 | DP_VERBOSE(cdev, |
137 | QED_MSG_IOV, "db recovery - skipping VF doorbell\n" ); |
138 | return 0; |
139 | } |
140 | |
141 | /* Sanitize doorbell address */ |
142 | if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) |
143 | return -EINVAL; |
144 | |
145 | /* Obtain hwfn from doorbell address */ |
146 | p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); |
147 | |
148 | /* Create entry */ |
149 | db_entry = kzalloc(size: sizeof(*db_entry), GFP_KERNEL); |
150 | if (!db_entry) { |
151 | DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n" ); |
152 | return -ENOMEM; |
153 | } |
154 | |
155 | /* Populate entry */ |
156 | db_entry->db_addr = db_addr; |
157 | db_entry->db_data = db_data; |
158 | db_entry->db_width = db_width; |
159 | db_entry->db_space = db_space; |
160 | db_entry->hwfn_idx = p_hwfn->my_id; |
161 | |
162 | /* Display */ |
163 | qed_db_recovery_dp_entry(p_hwfn, db_entry, action: "Adding" ); |
164 | |
165 | /* Protect the list */ |
166 | spin_lock_bh(lock: &p_hwfn->db_recovery_info.lock); |
167 | list_add_tail(new: &db_entry->list_entry, head: &p_hwfn->db_recovery_info.list); |
168 | spin_unlock_bh(lock: &p_hwfn->db_recovery_info.lock); |
169 | |
170 | return 0; |
171 | } |
172 | |
173 | /* Remove an entry from the doorbell recovery mechanism */ |
174 | int qed_db_recovery_del(struct qed_dev *cdev, |
175 | void __iomem *db_addr, void *db_data) |
176 | { |
177 | struct qed_db_recovery_entry *db_entry = NULL; |
178 | struct qed_hwfn *p_hwfn; |
179 | int rc = -EINVAL; |
180 | |
181 | /* Shortcircuit VFs, for now */ |
182 | if (IS_VF(cdev)) { |
183 | DP_VERBOSE(cdev, |
184 | QED_MSG_IOV, "db recovery - skipping VF doorbell\n" ); |
185 | return 0; |
186 | } |
187 | |
188 | /* Obtain hwfn from doorbell address */ |
189 | p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); |
190 | |
191 | /* Protect the list */ |
192 | spin_lock_bh(lock: &p_hwfn->db_recovery_info.lock); |
193 | list_for_each_entry(db_entry, |
194 | &p_hwfn->db_recovery_info.list, list_entry) { |
195 | /* search according to db_data addr since db_addr is not unique (roce) */ |
196 | if (db_entry->db_data == db_data) { |
197 | qed_db_recovery_dp_entry(p_hwfn, db_entry, action: "Deleting" ); |
198 | list_del(entry: &db_entry->list_entry); |
199 | rc = 0; |
200 | break; |
201 | } |
202 | } |
203 | |
204 | spin_unlock_bh(lock: &p_hwfn->db_recovery_info.lock); |
205 | |
206 | if (rc == -EINVAL) |
207 | |
208 | DP_NOTICE(p_hwfn, |
209 | "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n" , |
210 | db_data, db_addr); |
211 | else |
212 | kfree(objp: db_entry); |
213 | |
214 | return rc; |
215 | } |
216 | |
217 | /* Initialize the doorbell recovery mechanism */ |
218 | static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn) |
219 | { |
220 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n" ); |
221 | |
222 | /* Make sure db_size was set in cdev */ |
223 | if (!p_hwfn->cdev->db_size) { |
224 | DP_ERR(p_hwfn->cdev, "db_size not set\n" ); |
225 | return -EINVAL; |
226 | } |
227 | |
228 | INIT_LIST_HEAD(list: &p_hwfn->db_recovery_info.list); |
229 | spin_lock_init(&p_hwfn->db_recovery_info.lock); |
230 | p_hwfn->db_recovery_info.db_recovery_counter = 0; |
231 | |
232 | return 0; |
233 | } |
234 | |
235 | /* Destroy the doorbell recovery mechanism */ |
236 | static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) |
237 | { |
238 | struct qed_db_recovery_entry *db_entry = NULL; |
239 | |
240 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n" ); |
241 | if (!list_empty(head: &p_hwfn->db_recovery_info.list)) { |
242 | DP_VERBOSE(p_hwfn, |
243 | QED_MSG_SPQ, |
244 | "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n" ); |
245 | while (!list_empty(head: &p_hwfn->db_recovery_info.list)) { |
246 | db_entry = |
247 | list_first_entry(&p_hwfn->db_recovery_info.list, |
248 | struct qed_db_recovery_entry, |
249 | list_entry); |
250 | qed_db_recovery_dp_entry(p_hwfn, db_entry, action: "Purging" ); |
251 | list_del(entry: &db_entry->list_entry); |
252 | kfree(objp: db_entry); |
253 | } |
254 | } |
255 | p_hwfn->db_recovery_info.db_recovery_counter = 0; |
256 | } |
257 | |
258 | /* Print the content of the doorbell recovery mechanism */ |
259 | void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) |
260 | { |
261 | struct qed_db_recovery_entry *db_entry = NULL; |
262 | |
263 | DP_NOTICE(p_hwfn, |
264 | "Displaying doorbell recovery database. Counter was %d\n" , |
265 | p_hwfn->db_recovery_info.db_recovery_counter); |
266 | |
267 | /* Protect the list */ |
268 | spin_lock_bh(lock: &p_hwfn->db_recovery_info.lock); |
269 | list_for_each_entry(db_entry, |
270 | &p_hwfn->db_recovery_info.list, list_entry) { |
271 | qed_db_recovery_dp_entry(p_hwfn, db_entry, action: "Printing" ); |
272 | } |
273 | |
274 | spin_unlock_bh(lock: &p_hwfn->db_recovery_info.lock); |
275 | } |
276 | |
277 | /* Ring the doorbell of a single doorbell recovery entry */ |
278 | static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, |
279 | struct qed_db_recovery_entry *db_entry) |
280 | { |
281 | /* Print according to width */ |
282 | if (db_entry->db_width == DB_REC_WIDTH_32B) { |
283 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
284 | "ringing doorbell address %p data %x\n" , |
285 | db_entry->db_addr, |
286 | *(u32 *)db_entry->db_data); |
287 | } else { |
288 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, |
289 | "ringing doorbell address %p data %llx\n" , |
290 | db_entry->db_addr, |
291 | *(u64 *)(db_entry->db_data)); |
292 | } |
293 | |
294 | /* Sanity */ |
295 | if (!qed_db_rec_sanity(cdev: p_hwfn->cdev, db_addr: db_entry->db_addr, |
296 | db_width: db_entry->db_width, db_data: db_entry->db_data)) |
297 | return; |
298 | |
299 | /* Flush the write combined buffer. Since there are multiple doorbelling |
300 | * entities using the same address, if we don't flush, a transaction |
301 | * could be lost. |
302 | */ |
303 | wmb(); |
304 | |
305 | /* Ring the doorbell */ |
306 | if (db_entry->db_width == DB_REC_WIDTH_32B) |
307 | DIRECT_REG_WR(db_entry->db_addr, |
308 | *(u32 *)(db_entry->db_data)); |
309 | else |
310 | DIRECT_REG_WR64(db_entry->db_addr, |
311 | *(u64 *)(db_entry->db_data)); |
312 | |
313 | /* Flush the write combined buffer. Next doorbell may come from a |
314 | * different entity to the same address... |
315 | */ |
316 | wmb(); |
317 | } |
318 | |
319 | /* Traverse the doorbell recovery entry list and ring all the doorbells */ |
320 | void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) |
321 | { |
322 | struct qed_db_recovery_entry *db_entry = NULL; |
323 | |
324 | DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n" , |
325 | p_hwfn->db_recovery_info.db_recovery_counter); |
326 | |
327 | /* Track amount of times recovery was executed */ |
328 | p_hwfn->db_recovery_info.db_recovery_counter++; |
329 | |
330 | /* Protect the list */ |
331 | spin_lock_bh(lock: &p_hwfn->db_recovery_info.lock); |
332 | list_for_each_entry(db_entry, |
333 | &p_hwfn->db_recovery_info.list, list_entry) |
334 | qed_db_recovery_ring(p_hwfn, db_entry); |
335 | spin_unlock_bh(lock: &p_hwfn->db_recovery_info.lock); |
336 | } |
337 | |
338 | /******************** Doorbell Recovery end ****************/ |
339 | |
340 | /********************************** NIG LLH ***********************************/ |
341 | |
342 | enum qed_llh_filter_type { |
343 | QED_LLH_FILTER_TYPE_MAC, |
344 | QED_LLH_FILTER_TYPE_PROTOCOL, |
345 | }; |
346 | |
347 | struct qed_llh_mac_filter { |
348 | u8 addr[ETH_ALEN]; |
349 | }; |
350 | |
351 | struct qed_llh_protocol_filter { |
352 | enum qed_llh_prot_filter_type_t type; |
353 | u16 source_port_or_eth_type; |
354 | u16 dest_port; |
355 | }; |
356 | |
357 | union qed_llh_filter { |
358 | struct qed_llh_mac_filter mac; |
359 | struct qed_llh_protocol_filter protocol; |
360 | }; |
361 | |
362 | struct qed_llh_filter_info { |
363 | bool b_enabled; |
364 | u32 ref_cnt; |
365 | enum qed_llh_filter_type type; |
366 | union qed_llh_filter filter; |
367 | }; |
368 | |
369 | struct qed_llh_info { |
370 | /* Number of LLH filters banks */ |
371 | u8 num_ppfid; |
372 | |
373 | #define MAX_NUM_PPFID 8 |
374 | u8 ppfid_array[MAX_NUM_PPFID]; |
375 | |
376 | /* Array of filters arrays: |
377 | * "num_ppfid" elements of filters banks, where each is an array of |
378 | * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters. |
379 | */ |
380 | struct qed_llh_filter_info **pp_filters; |
381 | }; |
382 | |
383 | static void qed_llh_free(struct qed_dev *cdev) |
384 | { |
385 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
386 | u32 i; |
387 | |
388 | if (p_llh_info) { |
389 | if (p_llh_info->pp_filters) |
390 | for (i = 0; i < p_llh_info->num_ppfid; i++) |
391 | kfree(objp: p_llh_info->pp_filters[i]); |
392 | |
393 | kfree(objp: p_llh_info->pp_filters); |
394 | } |
395 | |
396 | kfree(objp: p_llh_info); |
397 | cdev->p_llh_info = NULL; |
398 | } |
399 | |
400 | static int qed_llh_alloc(struct qed_dev *cdev) |
401 | { |
402 | struct qed_llh_info *p_llh_info; |
403 | u32 size, i; |
404 | |
405 | p_llh_info = kzalloc(size: sizeof(*p_llh_info), GFP_KERNEL); |
406 | if (!p_llh_info) |
407 | return -ENOMEM; |
408 | cdev->p_llh_info = p_llh_info; |
409 | |
410 | for (i = 0; i < MAX_NUM_PPFID; i++) { |
411 | if (!(cdev->ppfid_bitmap & (0x1 << i))) |
412 | continue; |
413 | |
414 | p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i; |
415 | DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n" , |
416 | p_llh_info->num_ppfid, i); |
417 | p_llh_info->num_ppfid++; |
418 | } |
419 | |
420 | size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters); |
421 | p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL); |
422 | if (!p_llh_info->pp_filters) |
423 | return -ENOMEM; |
424 | |
425 | size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE * |
426 | sizeof(**p_llh_info->pp_filters); |
427 | for (i = 0; i < p_llh_info->num_ppfid; i++) { |
428 | p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL); |
429 | if (!p_llh_info->pp_filters[i]) |
430 | return -ENOMEM; |
431 | } |
432 | |
433 | return 0; |
434 | } |
435 | |
436 | static int qed_llh_shadow_sanity(struct qed_dev *cdev, |
437 | u8 ppfid, u8 filter_idx, const char *action) |
438 | { |
439 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
440 | |
441 | if (ppfid >= p_llh_info->num_ppfid) { |
442 | DP_NOTICE(cdev, |
443 | "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n" , |
444 | action, ppfid, p_llh_info->num_ppfid); |
445 | return -EINVAL; |
446 | } |
447 | |
448 | if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { |
449 | DP_NOTICE(cdev, |
450 | "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n" , |
451 | action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE); |
452 | return -EINVAL; |
453 | } |
454 | |
455 | return 0; |
456 | } |
457 | |
458 | #define QED_LLH_INVALID_FILTER_IDX 0xff |
459 | |
460 | static int |
461 | qed_llh_shadow_search_filter(struct qed_dev *cdev, |
462 | u8 ppfid, |
463 | union qed_llh_filter *p_filter, u8 *p_filter_idx) |
464 | { |
465 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
466 | struct qed_llh_filter_info *p_filters; |
467 | int rc; |
468 | u8 i; |
469 | |
470 | rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx: 0, action: "search" ); |
471 | if (rc) |
472 | return rc; |
473 | |
474 | *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; |
475 | |
476 | p_filters = p_llh_info->pp_filters[ppfid]; |
477 | for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { |
478 | if (!memcmp(p: p_filter, q: &p_filters[i].filter, |
479 | size: sizeof(*p_filter))) { |
480 | *p_filter_idx = i; |
481 | break; |
482 | } |
483 | } |
484 | |
485 | return 0; |
486 | } |
487 | |
488 | static int |
489 | qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx) |
490 | { |
491 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
492 | struct qed_llh_filter_info *p_filters; |
493 | int rc; |
494 | u8 i; |
495 | |
496 | rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx: 0, action: "get_free_idx" ); |
497 | if (rc) |
498 | return rc; |
499 | |
500 | *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; |
501 | |
502 | p_filters = p_llh_info->pp_filters[ppfid]; |
503 | for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { |
504 | if (!p_filters[i].b_enabled) { |
505 | *p_filter_idx = i; |
506 | break; |
507 | } |
508 | } |
509 | |
510 | return 0; |
511 | } |
512 | |
513 | static int |
514 | __qed_llh_shadow_add_filter(struct qed_dev *cdev, |
515 | u8 ppfid, |
516 | u8 filter_idx, |
517 | enum qed_llh_filter_type type, |
518 | union qed_llh_filter *p_filter, u32 *p_ref_cnt) |
519 | { |
520 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
521 | struct qed_llh_filter_info *p_filters; |
522 | int rc; |
523 | |
524 | rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, action: "add" ); |
525 | if (rc) |
526 | return rc; |
527 | |
528 | p_filters = p_llh_info->pp_filters[ppfid]; |
529 | if (!p_filters[filter_idx].ref_cnt) { |
530 | p_filters[filter_idx].b_enabled = true; |
531 | p_filters[filter_idx].type = type; |
532 | memcpy(&p_filters[filter_idx].filter, p_filter, |
533 | sizeof(p_filters[filter_idx].filter)); |
534 | } |
535 | |
536 | *p_ref_cnt = ++p_filters[filter_idx].ref_cnt; |
537 | |
538 | return 0; |
539 | } |
540 | |
541 | static int |
542 | qed_llh_shadow_add_filter(struct qed_dev *cdev, |
543 | u8 ppfid, |
544 | enum qed_llh_filter_type type, |
545 | union qed_llh_filter *p_filter, |
546 | u8 *p_filter_idx, u32 *p_ref_cnt) |
547 | { |
548 | int rc; |
549 | |
550 | /* Check if the same filter already exist */ |
551 | rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); |
552 | if (rc) |
553 | return rc; |
554 | |
555 | /* Find a new entry in case of a new filter */ |
556 | if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { |
557 | rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx); |
558 | if (rc) |
559 | return rc; |
560 | } |
561 | |
562 | /* No free entry was found */ |
563 | if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { |
564 | DP_NOTICE(cdev, |
565 | "Failed to find an empty LLH filter to utilize [ppfid %d]\n" , |
566 | ppfid); |
567 | return -EINVAL; |
568 | } |
569 | |
570 | return __qed_llh_shadow_add_filter(cdev, ppfid, filter_idx: *p_filter_idx, type, |
571 | p_filter, p_ref_cnt); |
572 | } |
573 | |
574 | static int |
575 | __qed_llh_shadow_remove_filter(struct qed_dev *cdev, |
576 | u8 ppfid, u8 filter_idx, u32 *p_ref_cnt) |
577 | { |
578 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
579 | struct qed_llh_filter_info *p_filters; |
580 | int rc; |
581 | |
582 | rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, action: "remove" ); |
583 | if (rc) |
584 | return rc; |
585 | |
586 | p_filters = p_llh_info->pp_filters[ppfid]; |
587 | if (!p_filters[filter_idx].ref_cnt) { |
588 | DP_NOTICE(cdev, |
589 | "LLH shadow: trying to remove a filter with ref_cnt=0\n" ); |
590 | return -EINVAL; |
591 | } |
592 | |
593 | *p_ref_cnt = --p_filters[filter_idx].ref_cnt; |
594 | if (!p_filters[filter_idx].ref_cnt) |
595 | memset(&p_filters[filter_idx], |
596 | 0, sizeof(p_filters[filter_idx])); |
597 | |
598 | return 0; |
599 | } |
600 | |
601 | static int |
602 | qed_llh_shadow_remove_filter(struct qed_dev *cdev, |
603 | u8 ppfid, |
604 | union qed_llh_filter *p_filter, |
605 | u8 *p_filter_idx, u32 *p_ref_cnt) |
606 | { |
607 | int rc; |
608 | |
609 | rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); |
610 | if (rc) |
611 | return rc; |
612 | |
613 | /* No matching filter was found */ |
614 | if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { |
615 | DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n" ); |
616 | return -EINVAL; |
617 | } |
618 | |
619 | return __qed_llh_shadow_remove_filter(cdev, ppfid, filter_idx: *p_filter_idx, |
620 | p_ref_cnt); |
621 | } |
622 | |
623 | static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid) |
624 | { |
625 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
626 | |
627 | if (ppfid >= p_llh_info->num_ppfid) { |
628 | DP_NOTICE(cdev, |
629 | "ppfid %d is not valid, available indices are 0..%d\n" , |
630 | ppfid, p_llh_info->num_ppfid - 1); |
631 | *p_abs_ppfid = 0; |
632 | return -EINVAL; |
633 | } |
634 | |
635 | *p_abs_ppfid = p_llh_info->ppfid_array[ppfid]; |
636 | |
637 | return 0; |
638 | } |
639 | |
640 | static int |
641 | qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
642 | { |
643 | struct qed_dev *cdev = p_hwfn->cdev; |
644 | enum qed_eng eng; |
645 | u8 ppfid; |
646 | int rc; |
647 | |
648 | rc = qed_mcp_get_engine_config(p_hwfn, p_ptt); |
649 | if (rc != 0 && rc != -EOPNOTSUPP) { |
650 | DP_NOTICE(p_hwfn, |
651 | "Failed to get the engine affinity configuration\n" ); |
652 | return rc; |
653 | } |
654 | |
655 | /* RoCE PF is bound to a single engine */ |
656 | if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { |
657 | eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; |
658 | rc = qed_llh_set_roce_affinity(cdev, eng); |
659 | if (rc) { |
660 | DP_NOTICE(cdev, |
661 | "Failed to set the RoCE engine affinity\n" ); |
662 | return rc; |
663 | } |
664 | |
665 | DP_VERBOSE(cdev, |
666 | QED_MSG_SP, |
667 | "LLH: Set the engine affinity of RoCE packets as %d\n" , |
668 | eng); |
669 | } |
670 | |
671 | /* Storage PF is bound to a single engine while L2 PF uses both */ |
672 | if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) || |
673 | QED_IS_NVMETCP_PERSONALITY(p_hwfn)) |
674 | eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; |
675 | else /* L2_PERSONALITY */ |
676 | eng = QED_BOTH_ENG; |
677 | |
678 | for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { |
679 | rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); |
680 | if (rc) { |
681 | DP_NOTICE(cdev, |
682 | "Failed to set the engine affinity of ppfid %d\n" , |
683 | ppfid); |
684 | return rc; |
685 | } |
686 | } |
687 | |
688 | DP_VERBOSE(cdev, QED_MSG_SP, |
689 | "LLH: Set the engine affinity of non-RoCE packets as %d\n" , |
690 | eng); |
691 | |
692 | return 0; |
693 | } |
694 | |
695 | static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn, |
696 | struct qed_ptt *p_ptt) |
697 | { |
698 | struct qed_dev *cdev = p_hwfn->cdev; |
699 | u8 ppfid, abs_ppfid; |
700 | int rc; |
701 | |
702 | for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { |
703 | u32 addr; |
704 | |
705 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
706 | if (rc) |
707 | return rc; |
708 | |
709 | addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4; |
710 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val: p_hwfn->rel_pf_id); |
711 | } |
712 | |
713 | if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && |
714 | !QED_IS_FCOE_PERSONALITY(p_hwfn)) { |
715 | rc = qed_llh_add_mac_filter(cdev, ppfid: 0, |
716 | mac_addr: p_hwfn->hw_info.hw_mac_addr); |
717 | if (rc) |
718 | DP_NOTICE(cdev, |
719 | "Failed to add an LLH filter with the primary MAC\n" ); |
720 | } |
721 | |
722 | if (QED_IS_CMT(cdev)) { |
723 | rc = qed_llh_set_engine_affin(p_hwfn, p_ptt); |
724 | if (rc) |
725 | return rc; |
726 | } |
727 | |
728 | return 0; |
729 | } |
730 | |
731 | u8 qed_llh_get_num_ppfid(struct qed_dev *cdev) |
732 | { |
733 | return cdev->p_llh_info->num_ppfid; |
734 | } |
735 | |
736 | #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3 |
737 | #define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0 |
738 | #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3 |
739 | #define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2 |
740 | |
741 | int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng) |
742 | { |
743 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
744 | struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); |
745 | u32 addr, val, eng_sel; |
746 | u8 abs_ppfid; |
747 | int rc = 0; |
748 | |
749 | if (!p_ptt) |
750 | return -EAGAIN; |
751 | |
752 | if (!QED_IS_CMT(cdev)) |
753 | goto out; |
754 | |
755 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
756 | if (rc) |
757 | goto out; |
758 | |
759 | switch (eng) { |
760 | case QED_ENG0: |
761 | eng_sel = 0; |
762 | break; |
763 | case QED_ENG1: |
764 | eng_sel = 1; |
765 | break; |
766 | case QED_BOTH_ENG: |
767 | eng_sel = 2; |
768 | break; |
769 | default: |
770 | DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n" , eng); |
771 | rc = -EINVAL; |
772 | goto out; |
773 | } |
774 | |
775 | addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; |
776 | val = qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
777 | SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel); |
778 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val); |
779 | |
780 | /* The iWARP affinity is set as the affinity of ppfid 0 */ |
781 | if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn)) |
782 | cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0; |
783 | out: |
784 | qed_ptt_release(p_hwfn, p_ptt); |
785 | |
786 | return rc; |
787 | } |
788 | |
789 | int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng) |
790 | { |
791 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
792 | struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); |
793 | u32 addr, val, eng_sel; |
794 | u8 ppfid, abs_ppfid; |
795 | int rc = 0; |
796 | |
797 | if (!p_ptt) |
798 | return -EAGAIN; |
799 | |
800 | if (!QED_IS_CMT(cdev)) |
801 | goto out; |
802 | |
803 | switch (eng) { |
804 | case QED_ENG0: |
805 | eng_sel = 0; |
806 | break; |
807 | case QED_ENG1: |
808 | eng_sel = 1; |
809 | break; |
810 | case QED_BOTH_ENG: |
811 | eng_sel = 2; |
812 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL, |
813 | val: 0xf); /* QP bit 15 */ |
814 | break; |
815 | default: |
816 | DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n" , eng); |
817 | rc = -EINVAL; |
818 | goto out; |
819 | } |
820 | |
821 | for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { |
822 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
823 | if (rc) |
824 | goto out; |
825 | |
826 | addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; |
827 | val = qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
828 | SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel); |
829 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val); |
830 | } |
831 | out: |
832 | qed_ptt_release(p_hwfn, p_ptt); |
833 | |
834 | return rc; |
835 | } |
836 | |
837 | struct qed_llh_filter_details { |
838 | u64 value; |
839 | u32 mode; |
840 | u32 protocol_type; |
841 | u32 hdr_sel; |
842 | u32 enable; |
843 | }; |
844 | |
845 | static int |
846 | qed_llh_access_filter(struct qed_hwfn *p_hwfn, |
847 | struct qed_ptt *p_ptt, |
848 | u8 abs_ppfid, |
849 | u8 filter_idx, |
850 | struct qed_llh_filter_details *p_details) |
851 | { |
852 | struct qed_dmae_params params = {0}; |
853 | u32 addr; |
854 | u8 pfid; |
855 | int rc; |
856 | |
857 | /* The NIG/LLH registers that are accessed in this function have only 16 |
858 | * rows which are exposed to a PF. I.e. only the 16 filters of its |
859 | * default ppfid. Accessing filters of other ppfids requires pretending |
860 | * to another PFs. |
861 | * The calculation of PPFID->PFID in AH is based on the relative index |
862 | * of a PF on its port. |
863 | * For BB the pfid is actually the abs_ppfid. |
864 | */ |
865 | if (QED_IS_BB(p_hwfn->cdev)) |
866 | pfid = abs_ppfid; |
867 | else |
868 | pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine + |
869 | MFW_PORT(p_hwfn); |
870 | |
871 | /* Filter enable - should be done first when removing a filter */ |
872 | if (!p_details->enable) { |
873 | qed_fid_pretend(p_hwfn, p_ptt, |
874 | fid: pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); |
875 | |
876 | addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; |
877 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val: p_details->enable); |
878 | |
879 | qed_fid_pretend(p_hwfn, p_ptt, |
880 | fid: p_hwfn->rel_pf_id << |
881 | PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); |
882 | } |
883 | |
884 | /* Filter value */ |
885 | addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4; |
886 | |
887 | SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1); |
888 | params.dst_pfid = pfid; |
889 | rc = qed_dmae_host2grc(p_hwfn, |
890 | p_ptt, |
891 | source_addr: (u64)(uintptr_t)&p_details->value, |
892 | grc_addr: addr, size_in_dwords: 2 /* size_in_dwords */, |
893 | p_params: ¶ms); |
894 | if (rc) |
895 | return rc; |
896 | |
897 | qed_fid_pretend(p_hwfn, p_ptt, |
898 | fid: pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); |
899 | |
900 | /* Filter mode */ |
901 | addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4; |
902 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val: p_details->mode); |
903 | |
904 | /* Filter protocol type */ |
905 | addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4; |
906 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val: p_details->protocol_type); |
907 | |
908 | /* Filter header select */ |
909 | addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4; |
910 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val: p_details->hdr_sel); |
911 | |
912 | /* Filter enable - should be done last when adding a filter */ |
913 | if (p_details->enable) { |
914 | addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; |
915 | qed_wr(p_hwfn, p_ptt, hw_addr: addr, val: p_details->enable); |
916 | } |
917 | |
918 | qed_fid_pretend(p_hwfn, p_ptt, |
919 | fid: p_hwfn->rel_pf_id << |
920 | PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); |
921 | |
922 | return 0; |
923 | } |
924 | |
925 | static int |
926 | qed_llh_add_filter(struct qed_hwfn *p_hwfn, |
927 | struct qed_ptt *p_ptt, |
928 | u8 abs_ppfid, |
929 | u8 filter_idx, u8 filter_prot_type, u32 high, u32 low) |
930 | { |
931 | struct qed_llh_filter_details filter_details; |
932 | |
933 | filter_details.enable = 1; |
934 | filter_details.value = ((u64)high << 32) | low; |
935 | filter_details.hdr_sel = 0; |
936 | filter_details.protocol_type = filter_prot_type; |
937 | /* Mode: 0: MAC-address classification 1: protocol classification */ |
938 | filter_details.mode = filter_prot_type ? 1 : 0; |
939 | |
940 | return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, |
941 | p_details: &filter_details); |
942 | } |
943 | |
944 | static int |
945 | qed_llh_remove_filter(struct qed_hwfn *p_hwfn, |
946 | struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx) |
947 | { |
948 | struct qed_llh_filter_details filter_details = {0}; |
949 | |
950 | return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, |
951 | p_details: &filter_details); |
952 | } |
953 | |
954 | int qed_llh_add_mac_filter(struct qed_dev *cdev, |
955 | u8 ppfid, const u8 mac_addr[ETH_ALEN]) |
956 | { |
957 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
958 | struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); |
959 | union qed_llh_filter filter = {}; |
960 | u8 filter_idx, abs_ppfid = 0; |
961 | u32 high, low, ref_cnt; |
962 | int rc = 0; |
963 | |
964 | if (!p_ptt) |
965 | return -EAGAIN; |
966 | |
967 | if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) |
968 | goto out; |
969 | |
970 | memcpy(filter.mac.addr, mac_addr, ETH_ALEN); |
971 | rc = qed_llh_shadow_add_filter(cdev, ppfid, |
972 | type: QED_LLH_FILTER_TYPE_MAC, |
973 | p_filter: &filter, p_filter_idx: &filter_idx, p_ref_cnt: &ref_cnt); |
974 | if (rc) |
975 | goto err; |
976 | |
977 | /* Configure the LLH only in case of a new the filter */ |
978 | if (ref_cnt == 1) { |
979 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
980 | if (rc) |
981 | goto err; |
982 | |
983 | high = mac_addr[1] | (mac_addr[0] << 8); |
984 | low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | |
985 | (mac_addr[2] << 24); |
986 | rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, |
987 | filter_prot_type: 0, high, low); |
988 | if (rc) |
989 | goto err; |
990 | } |
991 | |
992 | DP_VERBOSE(cdev, |
993 | QED_MSG_SP, |
994 | "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n" , |
995 | mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); |
996 | |
997 | goto out; |
998 | |
999 | err: DP_NOTICE(cdev, |
1000 | "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n" , |
1001 | mac_addr, ppfid); |
1002 | out: |
1003 | qed_ptt_release(p_hwfn, p_ptt); |
1004 | |
1005 | return rc; |
1006 | } |
1007 | |
1008 | static int |
1009 | qed_llh_protocol_filter_stringify(struct qed_dev *cdev, |
1010 | enum qed_llh_prot_filter_type_t type, |
1011 | u16 source_port_or_eth_type, |
1012 | u16 dest_port, u8 *str, size_t str_len) |
1013 | { |
1014 | switch (type) { |
1015 | case QED_LLH_FILTER_ETHERTYPE: |
1016 | snprintf(buf: str, size: str_len, fmt: "Ethertype 0x%04x" , |
1017 | source_port_or_eth_type); |
1018 | break; |
1019 | case QED_LLH_FILTER_TCP_SRC_PORT: |
1020 | snprintf(buf: str, size: str_len, fmt: "TCP src port 0x%04x" , |
1021 | source_port_or_eth_type); |
1022 | break; |
1023 | case QED_LLH_FILTER_UDP_SRC_PORT: |
1024 | snprintf(buf: str, size: str_len, fmt: "UDP src port 0x%04x" , |
1025 | source_port_or_eth_type); |
1026 | break; |
1027 | case QED_LLH_FILTER_TCP_DEST_PORT: |
1028 | snprintf(buf: str, size: str_len, fmt: "TCP dst port 0x%04x" , dest_port); |
1029 | break; |
1030 | case QED_LLH_FILTER_UDP_DEST_PORT: |
1031 | snprintf(buf: str, size: str_len, fmt: "UDP dst port 0x%04x" , dest_port); |
1032 | break; |
1033 | case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: |
1034 | snprintf(buf: str, size: str_len, fmt: "TCP src/dst ports 0x%04x/0x%04x" , |
1035 | source_port_or_eth_type, dest_port); |
1036 | break; |
1037 | case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: |
1038 | snprintf(buf: str, size: str_len, fmt: "UDP src/dst ports 0x%04x/0x%04x" , |
1039 | source_port_or_eth_type, dest_port); |
1040 | break; |
1041 | default: |
1042 | DP_NOTICE(cdev, |
1043 | "Non valid LLH protocol filter type %d\n" , type); |
1044 | return -EINVAL; |
1045 | } |
1046 | |
1047 | return 0; |
1048 | } |
1049 | |
1050 | static int |
1051 | qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev, |
1052 | enum qed_llh_prot_filter_type_t type, |
1053 | u16 source_port_or_eth_type, |
1054 | u16 dest_port, u32 *p_high, u32 *p_low) |
1055 | { |
1056 | *p_high = 0; |
1057 | *p_low = 0; |
1058 | |
1059 | switch (type) { |
1060 | case QED_LLH_FILTER_ETHERTYPE: |
1061 | *p_high = source_port_or_eth_type; |
1062 | break; |
1063 | case QED_LLH_FILTER_TCP_SRC_PORT: |
1064 | case QED_LLH_FILTER_UDP_SRC_PORT: |
1065 | *p_low = source_port_or_eth_type << 16; |
1066 | break; |
1067 | case QED_LLH_FILTER_TCP_DEST_PORT: |
1068 | case QED_LLH_FILTER_UDP_DEST_PORT: |
1069 | *p_low = dest_port; |
1070 | break; |
1071 | case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: |
1072 | case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: |
1073 | *p_low = (source_port_or_eth_type << 16) | dest_port; |
1074 | break; |
1075 | default: |
1076 | DP_NOTICE(cdev, |
1077 | "Non valid LLH protocol filter type %d\n" , type); |
1078 | return -EINVAL; |
1079 | } |
1080 | |
1081 | return 0; |
1082 | } |
1083 | |
1084 | int |
1085 | qed_llh_add_protocol_filter(struct qed_dev *cdev, |
1086 | u8 ppfid, |
1087 | enum qed_llh_prot_filter_type_t type, |
1088 | u16 source_port_or_eth_type, u16 dest_port) |
1089 | { |
1090 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
1091 | struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); |
1092 | u8 filter_idx, abs_ppfid, str[32], type_bitmap; |
1093 | union qed_llh_filter filter = {}; |
1094 | u32 high, low, ref_cnt; |
1095 | int rc = 0; |
1096 | |
1097 | if (!p_ptt) |
1098 | return -EAGAIN; |
1099 | |
1100 | if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) |
1101 | goto out; |
1102 | |
1103 | rc = qed_llh_protocol_filter_stringify(cdev, type, |
1104 | source_port_or_eth_type, |
1105 | dest_port, str, str_len: sizeof(str)); |
1106 | if (rc) |
1107 | goto err; |
1108 | |
1109 | filter.protocol.type = type; |
1110 | filter.protocol.source_port_or_eth_type = source_port_or_eth_type; |
1111 | filter.protocol.dest_port = dest_port; |
1112 | rc = qed_llh_shadow_add_filter(cdev, |
1113 | ppfid, |
1114 | type: QED_LLH_FILTER_TYPE_PROTOCOL, |
1115 | p_filter: &filter, p_filter_idx: &filter_idx, p_ref_cnt: &ref_cnt); |
1116 | if (rc) |
1117 | goto err; |
1118 | |
1119 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
1120 | if (rc) |
1121 | goto err; |
1122 | |
1123 | /* Configure the LLH only in case of a new the filter */ |
1124 | if (ref_cnt == 1) { |
1125 | rc = qed_llh_protocol_filter_to_hilo(cdev, type, |
1126 | source_port_or_eth_type, |
1127 | dest_port, p_high: &high, p_low: &low); |
1128 | if (rc) |
1129 | goto err; |
1130 | |
1131 | type_bitmap = 0x1 << type; |
1132 | rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, |
1133 | filter_idx, filter_prot_type: type_bitmap, high, low); |
1134 | if (rc) |
1135 | goto err; |
1136 | } |
1137 | |
1138 | DP_VERBOSE(cdev, |
1139 | QED_MSG_SP, |
1140 | "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n" , |
1141 | str, ppfid, abs_ppfid, filter_idx, ref_cnt); |
1142 | |
1143 | goto out; |
1144 | |
1145 | err: DP_NOTICE(p_hwfn, |
1146 | "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n" , |
1147 | str, ppfid); |
1148 | out: |
1149 | qed_ptt_release(p_hwfn, p_ptt); |
1150 | |
1151 | return rc; |
1152 | } |
1153 | |
1154 | void qed_llh_remove_mac_filter(struct qed_dev *cdev, |
1155 | u8 ppfid, u8 mac_addr[ETH_ALEN]) |
1156 | { |
1157 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
1158 | struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); |
1159 | union qed_llh_filter filter = {}; |
1160 | u8 filter_idx, abs_ppfid; |
1161 | int rc = 0; |
1162 | u32 ref_cnt; |
1163 | |
1164 | if (!p_ptt) |
1165 | return; |
1166 | |
1167 | if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) |
1168 | goto out; |
1169 | |
1170 | if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) |
1171 | return; |
1172 | |
1173 | ether_addr_copy(dst: filter.mac.addr, src: mac_addr); |
1174 | rc = qed_llh_shadow_remove_filter(cdev, ppfid, p_filter: &filter, p_filter_idx: &filter_idx, |
1175 | p_ref_cnt: &ref_cnt); |
1176 | if (rc) |
1177 | goto err; |
1178 | |
1179 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
1180 | if (rc) |
1181 | goto err; |
1182 | |
1183 | /* Remove from the LLH in case the filter is not in use */ |
1184 | if (!ref_cnt) { |
1185 | rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, |
1186 | filter_idx); |
1187 | if (rc) |
1188 | goto err; |
1189 | } |
1190 | |
1191 | DP_VERBOSE(cdev, |
1192 | QED_MSG_SP, |
1193 | "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n" , |
1194 | mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); |
1195 | |
1196 | goto out; |
1197 | |
1198 | err: DP_NOTICE(cdev, |
1199 | "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n" , |
1200 | mac_addr, ppfid); |
1201 | out: |
1202 | qed_ptt_release(p_hwfn, p_ptt); |
1203 | } |
1204 | |
1205 | void qed_llh_remove_protocol_filter(struct qed_dev *cdev, |
1206 | u8 ppfid, |
1207 | enum qed_llh_prot_filter_type_t type, |
1208 | u16 source_port_or_eth_type, u16 dest_port) |
1209 | { |
1210 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
1211 | struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); |
1212 | u8 filter_idx, abs_ppfid, str[32]; |
1213 | union qed_llh_filter filter = {}; |
1214 | int rc = 0; |
1215 | u32 ref_cnt; |
1216 | |
1217 | if (!p_ptt) |
1218 | return; |
1219 | |
1220 | if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) |
1221 | goto out; |
1222 | |
1223 | rc = qed_llh_protocol_filter_stringify(cdev, type, |
1224 | source_port_or_eth_type, |
1225 | dest_port, str, str_len: sizeof(str)); |
1226 | if (rc) |
1227 | goto err; |
1228 | |
1229 | filter.protocol.type = type; |
1230 | filter.protocol.source_port_or_eth_type = source_port_or_eth_type; |
1231 | filter.protocol.dest_port = dest_port; |
1232 | rc = qed_llh_shadow_remove_filter(cdev, ppfid, p_filter: &filter, p_filter_idx: &filter_idx, |
1233 | p_ref_cnt: &ref_cnt); |
1234 | if (rc) |
1235 | goto err; |
1236 | |
1237 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
1238 | if (rc) |
1239 | goto err; |
1240 | |
1241 | /* Remove from the LLH in case the filter is not in use */ |
1242 | if (!ref_cnt) { |
1243 | rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, |
1244 | filter_idx); |
1245 | if (rc) |
1246 | goto err; |
1247 | } |
1248 | |
1249 | DP_VERBOSE(cdev, |
1250 | QED_MSG_SP, |
1251 | "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n" , |
1252 | str, ppfid, abs_ppfid, filter_idx, ref_cnt); |
1253 | |
1254 | goto out; |
1255 | |
1256 | err: DP_NOTICE(cdev, |
1257 | "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n" , |
1258 | str, ppfid); |
1259 | out: |
1260 | qed_ptt_release(p_hwfn, p_ptt); |
1261 | } |
1262 | |
1263 | /******************************* NIG LLH - End ********************************/ |
1264 | |
1265 | #define QED_MIN_DPIS (4) |
1266 | #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) |
1267 | |
1268 | static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, |
1269 | struct qed_ptt *p_ptt, enum BAR_ID bar_id) |
1270 | { |
1271 | u32 bar_reg = (bar_id == BAR_ID_0 ? |
1272 | PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); |
1273 | u32 val; |
1274 | |
1275 | if (IS_VF(p_hwfn->cdev)) |
1276 | return qed_vf_hw_bar_size(p_hwfn, bar_id); |
1277 | |
1278 | val = qed_rd(p_hwfn, p_ptt, hw_addr: bar_reg); |
1279 | if (val) |
1280 | return 1 << (val + 15); |
1281 | |
1282 | /* Old MFW initialized above registered only conditionally */ |
1283 | if (p_hwfn->cdev->num_hwfns > 1) { |
1284 | DP_INFO(p_hwfn, |
1285 | "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n" ); |
1286 | return BAR_ID_0 ? 256 * 1024 : 512 * 1024; |
1287 | } else { |
1288 | DP_INFO(p_hwfn, |
1289 | "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n" ); |
1290 | return 512 * 1024; |
1291 | } |
1292 | } |
1293 | |
1294 | void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) |
1295 | { |
1296 | u32 i; |
1297 | |
1298 | cdev->dp_level = dp_level; |
1299 | cdev->dp_module = dp_module; |
1300 | for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { |
1301 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1302 | |
1303 | p_hwfn->dp_level = dp_level; |
1304 | p_hwfn->dp_module = dp_module; |
1305 | } |
1306 | } |
1307 | |
1308 | void qed_init_struct(struct qed_dev *cdev) |
1309 | { |
1310 | u8 i; |
1311 | |
1312 | for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { |
1313 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
1314 | |
1315 | p_hwfn->cdev = cdev; |
1316 | p_hwfn->my_id = i; |
1317 | p_hwfn->b_active = false; |
1318 | |
1319 | mutex_init(&p_hwfn->dmae_info.mutex); |
1320 | } |
1321 | |
1322 | /* hwfn 0 is always active */ |
1323 | cdev->hwfns[0].b_active = true; |
1324 | |
1325 | /* set the default cache alignment to 128 */ |
1326 | cdev->cache_shift = 7; |
1327 | } |
1328 | |
1329 | static void qed_qm_info_free(struct qed_hwfn *p_hwfn) |
1330 | { |
1331 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1332 | |
1333 | kfree(objp: qm_info->qm_pq_params); |
1334 | qm_info->qm_pq_params = NULL; |
1335 | kfree(objp: qm_info->qm_vport_params); |
1336 | qm_info->qm_vport_params = NULL; |
1337 | kfree(objp: qm_info->qm_port_params); |
1338 | qm_info->qm_port_params = NULL; |
1339 | kfree(objp: qm_info->wfq_data); |
1340 | qm_info->wfq_data = NULL; |
1341 | } |
1342 | |
1343 | static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) |
1344 | { |
1345 | kfree(objp: p_hwfn->dbg_user_info); |
1346 | p_hwfn->dbg_user_info = NULL; |
1347 | } |
1348 | |
1349 | void qed_resc_free(struct qed_dev *cdev) |
1350 | { |
1351 | struct qed_rdma_info *rdma_info; |
1352 | struct qed_hwfn *p_hwfn; |
1353 | int i; |
1354 | |
1355 | if (IS_VF(cdev)) { |
1356 | for_each_hwfn(cdev, i) |
1357 | qed_l2_free(p_hwfn: &cdev->hwfns[i]); |
1358 | return; |
1359 | } |
1360 | |
1361 | kfree(objp: cdev->fw_data); |
1362 | cdev->fw_data = NULL; |
1363 | |
1364 | kfree(objp: cdev->reset_stats); |
1365 | cdev->reset_stats = NULL; |
1366 | |
1367 | qed_llh_free(cdev); |
1368 | |
1369 | for_each_hwfn(cdev, i) { |
1370 | p_hwfn = cdev->hwfns + i; |
1371 | rdma_info = p_hwfn->p_rdma_info; |
1372 | |
1373 | qed_cxt_mngr_free(p_hwfn); |
1374 | qed_qm_info_free(p_hwfn); |
1375 | qed_spq_free(p_hwfn); |
1376 | qed_eq_free(p_hwfn); |
1377 | qed_consq_free(p_hwfn); |
1378 | qed_int_free(p_hwfn); |
1379 | #ifdef CONFIG_QED_LL2 |
1380 | qed_ll2_free(p_hwfn); |
1381 | #endif |
1382 | if (p_hwfn->hw_info.personality == QED_PCI_FCOE) |
1383 | qed_fcoe_free(p_hwfn); |
1384 | |
1385 | if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { |
1386 | qed_iscsi_free(p_hwfn); |
1387 | qed_ooo_free(p_hwfn); |
1388 | } |
1389 | |
1390 | if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { |
1391 | qed_nvmetcp_free(p_hwfn); |
1392 | qed_ooo_free(p_hwfn); |
1393 | } |
1394 | |
1395 | if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { |
1396 | qed_spq_unregister_async_cb(p_hwfn, protocol_id: rdma_info->proto); |
1397 | qed_rdma_info_free(p_hwfn); |
1398 | } |
1399 | |
1400 | qed_spq_unregister_async_cb(p_hwfn, protocol_id: PROTOCOLID_COMMON); |
1401 | qed_iov_free(p_hwfn); |
1402 | qed_l2_free(p_hwfn); |
1403 | qed_dmae_info_free(p_hwfn); |
1404 | qed_dcbx_info_free(p_hwfn); |
1405 | qed_dbg_user_data_free(p_hwfn); |
1406 | qed_fw_overlay_mem_free(p_hwfn, fw_overlay_mem: &p_hwfn->fw_overlay_mem); |
1407 | |
1408 | /* Destroy doorbell recovery mechanism */ |
1409 | qed_db_recovery_teardown(p_hwfn); |
1410 | } |
1411 | } |
1412 | |
1413 | /******************** QM initialization *******************/ |
1414 | #define ACTIVE_TCS_BMAP 0x9f |
1415 | #define ACTIVE_TCS_BMAP_4PORT_K2 0xf |
1416 | |
1417 | /* determines the physical queue flags for a given PF. */ |
1418 | static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) |
1419 | { |
1420 | u32 flags; |
1421 | |
1422 | /* common flags */ |
1423 | flags = PQ_FLAGS_LB; |
1424 | |
1425 | /* feature flags */ |
1426 | if (IS_QED_SRIOV(p_hwfn->cdev)) |
1427 | flags |= PQ_FLAGS_VFS; |
1428 | |
1429 | /* protocol flags */ |
1430 | switch (p_hwfn->hw_info.personality) { |
1431 | case QED_PCI_ETH: |
1432 | flags |= PQ_FLAGS_MCOS; |
1433 | break; |
1434 | case QED_PCI_FCOE: |
1435 | flags |= PQ_FLAGS_OFLD; |
1436 | break; |
1437 | case QED_PCI_ISCSI: |
1438 | case QED_PCI_NVMETCP: |
1439 | flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; |
1440 | break; |
1441 | case QED_PCI_ETH_ROCE: |
1442 | flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; |
1443 | if (IS_QED_MULTI_TC_ROCE(p_hwfn)) |
1444 | flags |= PQ_FLAGS_MTC; |
1445 | break; |
1446 | case QED_PCI_ETH_IWARP: |
1447 | flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | |
1448 | PQ_FLAGS_OFLD; |
1449 | break; |
1450 | default: |
1451 | DP_ERR(p_hwfn, |
1452 | "unknown personality %d\n" , p_hwfn->hw_info.personality); |
1453 | return 0; |
1454 | } |
1455 | |
1456 | return flags; |
1457 | } |
1458 | |
1459 | /* Getters for resource amounts necessary for qm initialization */ |
1460 | static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) |
1461 | { |
1462 | return p_hwfn->hw_info.num_hw_tc; |
1463 | } |
1464 | |
1465 | static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) |
1466 | { |
1467 | return IS_QED_SRIOV(p_hwfn->cdev) ? |
1468 | p_hwfn->cdev->p_iov_info->total_vfs : 0; |
1469 | } |
1470 | |
1471 | static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn) |
1472 | { |
1473 | u32 pq_flags = qed_get_pq_flags(p_hwfn); |
1474 | |
1475 | if (!(PQ_FLAGS_MTC & pq_flags)) |
1476 | return 1; |
1477 | |
1478 | return qed_init_qm_get_num_tcs(p_hwfn); |
1479 | } |
1480 | |
1481 | #define NUM_DEFAULT_RLS 1 |
1482 | |
1483 | static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) |
1484 | { |
1485 | u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); |
1486 | |
1487 | /* num RLs can't exceed resource amount of rls or vports */ |
1488 | num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), |
1489 | RESC_NUM(p_hwfn, QED_VPORT)); |
1490 | |
1491 | /* Make sure after we reserve there's something left */ |
1492 | if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) |
1493 | return 0; |
1494 | |
1495 | /* subtract rls necessary for VFs and one default one for the PF */ |
1496 | num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; |
1497 | |
1498 | return num_pf_rls; |
1499 | } |
1500 | |
1501 | static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) |
1502 | { |
1503 | u32 pq_flags = qed_get_pq_flags(p_hwfn); |
1504 | |
1505 | /* all pqs share the same vport, except for vfs and pf_rl pqs */ |
1506 | return (!!(PQ_FLAGS_RLS & pq_flags)) * |
1507 | qed_init_qm_get_num_pf_rls(p_hwfn) + |
1508 | (!!(PQ_FLAGS_VFS & pq_flags)) * |
1509 | qed_init_qm_get_num_vfs(p_hwfn) + 1; |
1510 | } |
1511 | |
1512 | /* calc amount of PQs according to the requested flags */ |
1513 | static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) |
1514 | { |
1515 | u32 pq_flags = qed_get_pq_flags(p_hwfn); |
1516 | |
1517 | return (!!(PQ_FLAGS_RLS & pq_flags)) * |
1518 | qed_init_qm_get_num_pf_rls(p_hwfn) + |
1519 | (!!(PQ_FLAGS_MCOS & pq_flags)) * |
1520 | qed_init_qm_get_num_tcs(p_hwfn) + |
1521 | (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + |
1522 | (!!(PQ_FLAGS_ACK & pq_flags)) + |
1523 | (!!(PQ_FLAGS_OFLD & pq_flags)) * |
1524 | qed_init_qm_get_num_mtc_tcs(p_hwfn) + |
1525 | (!!(PQ_FLAGS_LLT & pq_flags)) * |
1526 | qed_init_qm_get_num_mtc_tcs(p_hwfn) + |
1527 | (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); |
1528 | } |
1529 | |
1530 | /* initialize the top level QM params */ |
1531 | static void qed_init_qm_params(struct qed_hwfn *p_hwfn) |
1532 | { |
1533 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1534 | bool four_port; |
1535 | |
1536 | /* pq and vport bases for this PF */ |
1537 | qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); |
1538 | qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); |
1539 | |
1540 | /* rate limiting and weighted fair queueing are always enabled */ |
1541 | qm_info->vport_rl_en = true; |
1542 | qm_info->vport_wfq_en = true; |
1543 | |
1544 | /* TC config is different for AH 4 port */ |
1545 | four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2; |
1546 | |
1547 | /* in AH 4 port we have fewer TCs per port */ |
1548 | qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : |
1549 | NUM_OF_PHYS_TCS; |
1550 | |
1551 | /* unless MFW indicated otherwise, ooo_tc == 3 for |
1552 | * AH 4-port and 4 otherwise. |
1553 | */ |
1554 | if (!qm_info->ooo_tc) |
1555 | qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : |
1556 | DCBX_TCP_OOO_TC; |
1557 | } |
1558 | |
1559 | /* initialize qm vport params */ |
1560 | static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) |
1561 | { |
1562 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1563 | u8 i; |
1564 | |
1565 | /* all vports participate in weighted fair queueing */ |
1566 | for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) |
1567 | qm_info->qm_vport_params[i].wfq = 1; |
1568 | } |
1569 | |
1570 | /* initialize qm port params */ |
1571 | static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) |
1572 | { |
1573 | /* Initialize qm port parameters */ |
1574 | u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine; |
1575 | struct qed_dev *cdev = p_hwfn->cdev; |
1576 | |
1577 | /* indicate how ooo and high pri traffic is dealt with */ |
1578 | active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? |
1579 | ACTIVE_TCS_BMAP_4PORT_K2 : |
1580 | ACTIVE_TCS_BMAP; |
1581 | |
1582 | for (i = 0; i < num_ports; i++) { |
1583 | struct init_qm_port_params *p_qm_port = |
1584 | &p_hwfn->qm_info.qm_port_params[i]; |
1585 | u16 pbf_max_cmd_lines; |
1586 | |
1587 | p_qm_port->active = 1; |
1588 | p_qm_port->active_phys_tcs = active_phys_tcs; |
1589 | pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev); |
1590 | p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports; |
1591 | p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports; |
1592 | } |
1593 | } |
1594 | |
1595 | /* Reset the params which must be reset for qm init. QM init may be called as |
1596 | * a result of flows other than driver load (e.g. dcbx renegotiation). Other |
1597 | * params may be affected by the init but would simply recalculate to the same |
1598 | * values. The allocations made for QM init, ports, vports, pqs and vfqs are not |
1599 | * affected as these amounts stay the same. |
1600 | */ |
1601 | static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) |
1602 | { |
1603 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1604 | |
1605 | qm_info->num_pqs = 0; |
1606 | qm_info->num_vports = 0; |
1607 | qm_info->num_pf_rls = 0; |
1608 | qm_info->num_vf_pqs = 0; |
1609 | qm_info->first_vf_pq = 0; |
1610 | qm_info->first_mcos_pq = 0; |
1611 | qm_info->first_rl_pq = 0; |
1612 | } |
1613 | |
1614 | static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) |
1615 | { |
1616 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1617 | |
1618 | qm_info->num_vports++; |
1619 | |
1620 | if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) |
1621 | DP_ERR(p_hwfn, |
1622 | "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n" , |
1623 | qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); |
1624 | } |
1625 | |
1626 | /* initialize a single pq and manage qm_info resources accounting. |
1627 | * The pq_init_flags param determines whether the PQ is rate limited |
1628 | * (for VF or PF) and whether a new vport is allocated to the pq or not |
1629 | * (i.e. vport will be shared). |
1630 | */ |
1631 | |
1632 | /* flags for pq init */ |
1633 | #define PQ_INIT_SHARE_VPORT BIT(0) |
1634 | #define PQ_INIT_PF_RL BIT(1) |
1635 | #define PQ_INIT_VF_RL BIT(2) |
1636 | |
1637 | /* defines for pq init */ |
1638 | #define PQ_INIT_DEFAULT_WRR_GROUP 1 |
1639 | #define PQ_INIT_DEFAULT_TC 0 |
1640 | |
1641 | void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc) |
1642 | { |
1643 | p_info->offload_tc = tc; |
1644 | p_info->offload_tc_set = true; |
1645 | } |
1646 | |
1647 | static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn) |
1648 | { |
1649 | return p_hwfn->hw_info.offload_tc_set; |
1650 | } |
1651 | |
1652 | static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn) |
1653 | { |
1654 | if (qed_is_offload_tc_set(p_hwfn)) |
1655 | return p_hwfn->hw_info.offload_tc; |
1656 | |
1657 | return PQ_INIT_DEFAULT_TC; |
1658 | } |
1659 | |
1660 | static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, |
1661 | struct qed_qm_info *qm_info, |
1662 | u8 tc, u32 pq_init_flags) |
1663 | { |
1664 | u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); |
1665 | |
1666 | if (pq_idx > max_pq) |
1667 | DP_ERR(p_hwfn, |
1668 | "pq overflow! pq %d, max pq %d\n" , pq_idx, max_pq); |
1669 | |
1670 | /* init pq params */ |
1671 | qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; |
1672 | qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + |
1673 | qm_info->num_vports; |
1674 | qm_info->qm_pq_params[pq_idx].tc_id = tc; |
1675 | qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; |
1676 | qm_info->qm_pq_params[pq_idx].rl_valid = |
1677 | (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); |
1678 | |
1679 | /* qm params accounting */ |
1680 | qm_info->num_pqs++; |
1681 | if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) |
1682 | qm_info->num_vports++; |
1683 | |
1684 | if (pq_init_flags & PQ_INIT_PF_RL) |
1685 | qm_info->num_pf_rls++; |
1686 | |
1687 | if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) |
1688 | DP_ERR(p_hwfn, |
1689 | "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n" , |
1690 | qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); |
1691 | |
1692 | if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) |
1693 | DP_ERR(p_hwfn, |
1694 | "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n" , |
1695 | qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); |
1696 | } |
1697 | |
1698 | /* get pq index according to PQ_FLAGS */ |
1699 | static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, |
1700 | unsigned long pq_flags) |
1701 | { |
1702 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1703 | |
1704 | /* Can't have multiple flags set here */ |
1705 | if (bitmap_weight(src: &pq_flags, |
1706 | nbits: sizeof(pq_flags) * BITS_PER_BYTE) > 1) { |
1707 | DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n" , pq_flags); |
1708 | goto err; |
1709 | } |
1710 | |
1711 | if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { |
1712 | DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n" , pq_flags); |
1713 | goto err; |
1714 | } |
1715 | |
1716 | switch (pq_flags) { |
1717 | case PQ_FLAGS_RLS: |
1718 | return &qm_info->first_rl_pq; |
1719 | case PQ_FLAGS_MCOS: |
1720 | return &qm_info->first_mcos_pq; |
1721 | case PQ_FLAGS_LB: |
1722 | return &qm_info->pure_lb_pq; |
1723 | case PQ_FLAGS_OOO: |
1724 | return &qm_info->ooo_pq; |
1725 | case PQ_FLAGS_ACK: |
1726 | return &qm_info->pure_ack_pq; |
1727 | case PQ_FLAGS_OFLD: |
1728 | return &qm_info->first_ofld_pq; |
1729 | case PQ_FLAGS_LLT: |
1730 | return &qm_info->first_llt_pq; |
1731 | case PQ_FLAGS_VFS: |
1732 | return &qm_info->first_vf_pq; |
1733 | default: |
1734 | goto err; |
1735 | } |
1736 | |
1737 | err: |
1738 | return &qm_info->start_pq; |
1739 | } |
1740 | |
1741 | /* save pq index in qm info */ |
1742 | static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, |
1743 | u32 pq_flags, u16 pq_val) |
1744 | { |
1745 | u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); |
1746 | |
1747 | *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; |
1748 | } |
1749 | |
1750 | /* get tx pq index, with the PQ TX base already set (ready for context init) */ |
1751 | u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) |
1752 | { |
1753 | u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); |
1754 | |
1755 | return *base_pq_idx + CM_TX_PQ_BASE; |
1756 | } |
1757 | |
1758 | u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) |
1759 | { |
1760 | u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); |
1761 | |
1762 | if (max_tc == 0) { |
1763 | DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n" , |
1764 | PQ_FLAGS_MCOS); |
1765 | return p_hwfn->qm_info.start_pq; |
1766 | } |
1767 | |
1768 | if (tc > max_tc) |
1769 | DP_ERR(p_hwfn, "tc %d must be smaller than %d\n" , tc, max_tc); |
1770 | |
1771 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); |
1772 | } |
1773 | |
1774 | u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) |
1775 | { |
1776 | u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); |
1777 | |
1778 | if (max_vf == 0) { |
1779 | DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n" , |
1780 | PQ_FLAGS_VFS); |
1781 | return p_hwfn->qm_info.start_pq; |
1782 | } |
1783 | |
1784 | if (vf > max_vf) |
1785 | DP_ERR(p_hwfn, "vf %d must be smaller than %d\n" , vf, max_vf); |
1786 | |
1787 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); |
1788 | } |
1789 | |
1790 | u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) |
1791 | { |
1792 | u16 first_ofld_pq, pq_offset; |
1793 | |
1794 | first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); |
1795 | pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? |
1796 | tc : PQ_INIT_DEFAULT_TC; |
1797 | |
1798 | return first_ofld_pq + pq_offset; |
1799 | } |
1800 | |
1801 | u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc) |
1802 | { |
1803 | u16 first_llt_pq, pq_offset; |
1804 | |
1805 | first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT); |
1806 | pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? |
1807 | tc : PQ_INIT_DEFAULT_TC; |
1808 | |
1809 | return first_llt_pq + pq_offset; |
1810 | } |
1811 | |
1812 | /* Functions for creating specific types of pqs */ |
1813 | static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) |
1814 | { |
1815 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1816 | |
1817 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) |
1818 | return; |
1819 | |
1820 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, pq_val: qm_info->num_pqs); |
1821 | qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); |
1822 | } |
1823 | |
1824 | static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) |
1825 | { |
1826 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1827 | |
1828 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) |
1829 | return; |
1830 | |
1831 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, pq_val: qm_info->num_pqs); |
1832 | qed_init_qm_pq(p_hwfn, qm_info, tc: qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); |
1833 | } |
1834 | |
1835 | static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) |
1836 | { |
1837 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1838 | |
1839 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) |
1840 | return; |
1841 | |
1842 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, pq_val: qm_info->num_pqs); |
1843 | qed_init_qm_pq(p_hwfn, qm_info, tc: qed_get_offload_tc(p_hwfn), |
1844 | PQ_INIT_SHARE_VPORT); |
1845 | } |
1846 | |
1847 | static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn) |
1848 | { |
1849 | u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn); |
1850 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1851 | u8 tc; |
1852 | |
1853 | /* override pq's TC if offload TC is set */ |
1854 | for (tc = 0; tc < num_tcs; tc++) |
1855 | qed_init_qm_pq(p_hwfn, qm_info, |
1856 | tc: qed_is_offload_tc_set(p_hwfn) ? |
1857 | p_hwfn->hw_info.offload_tc : tc, |
1858 | PQ_INIT_SHARE_VPORT); |
1859 | } |
1860 | |
1861 | static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) |
1862 | { |
1863 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1864 | |
1865 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) |
1866 | return; |
1867 | |
1868 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, pq_val: qm_info->num_pqs); |
1869 | qed_init_qm_mtc_pqs(p_hwfn); |
1870 | } |
1871 | |
1872 | static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) |
1873 | { |
1874 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1875 | |
1876 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) |
1877 | return; |
1878 | |
1879 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, pq_val: qm_info->num_pqs); |
1880 | qed_init_qm_mtc_pqs(p_hwfn); |
1881 | } |
1882 | |
1883 | static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) |
1884 | { |
1885 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1886 | u8 tc_idx; |
1887 | |
1888 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) |
1889 | return; |
1890 | |
1891 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, pq_val: qm_info->num_pqs); |
1892 | for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) |
1893 | qed_init_qm_pq(p_hwfn, qm_info, tc: tc_idx, PQ_INIT_SHARE_VPORT); |
1894 | } |
1895 | |
1896 | static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) |
1897 | { |
1898 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1899 | u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); |
1900 | |
1901 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) |
1902 | return; |
1903 | |
1904 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, pq_val: qm_info->num_pqs); |
1905 | qm_info->num_vf_pqs = num_vfs; |
1906 | for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) |
1907 | qed_init_qm_pq(p_hwfn, |
1908 | qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); |
1909 | } |
1910 | |
1911 | static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) |
1912 | { |
1913 | u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); |
1914 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1915 | |
1916 | if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) |
1917 | return; |
1918 | |
1919 | qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, pq_val: qm_info->num_pqs); |
1920 | for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) |
1921 | qed_init_qm_pq(p_hwfn, qm_info, tc: qed_get_offload_tc(p_hwfn), |
1922 | PQ_INIT_PF_RL); |
1923 | } |
1924 | |
1925 | static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) |
1926 | { |
1927 | /* rate limited pqs, must come first (FW assumption) */ |
1928 | qed_init_qm_rl_pqs(p_hwfn); |
1929 | |
1930 | /* pqs for multi cos */ |
1931 | qed_init_qm_mcos_pqs(p_hwfn); |
1932 | |
1933 | /* pure loopback pq */ |
1934 | qed_init_qm_lb_pq(p_hwfn); |
1935 | |
1936 | /* out of order pq */ |
1937 | qed_init_qm_ooo_pq(p_hwfn); |
1938 | |
1939 | /* pure ack pq */ |
1940 | qed_init_qm_pure_ack_pq(p_hwfn); |
1941 | |
1942 | /* pq for offloaded protocol */ |
1943 | qed_init_qm_offload_pq(p_hwfn); |
1944 | |
1945 | /* low latency pq */ |
1946 | qed_init_qm_low_latency_pq(p_hwfn); |
1947 | |
1948 | /* done sharing vports */ |
1949 | qed_init_qm_advance_vport(p_hwfn); |
1950 | |
1951 | /* pqs for vfs */ |
1952 | qed_init_qm_vf_pqs(p_hwfn); |
1953 | } |
1954 | |
1955 | /* compare values of getters against resources amounts */ |
1956 | static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) |
1957 | { |
1958 | if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { |
1959 | DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n" ); |
1960 | return -EINVAL; |
1961 | } |
1962 | |
1963 | if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) |
1964 | return 0; |
1965 | |
1966 | if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { |
1967 | p_hwfn->hw_info.multi_tc_roce_en = false; |
1968 | DP_NOTICE(p_hwfn, |
1969 | "multi-tc roce was disabled to reduce requested amount of pqs\n" ); |
1970 | if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) |
1971 | return 0; |
1972 | } |
1973 | |
1974 | DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n" ); |
1975 | return -EINVAL; |
1976 | } |
1977 | |
1978 | static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) |
1979 | { |
1980 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
1981 | struct init_qm_vport_params *vport; |
1982 | struct init_qm_port_params *port; |
1983 | struct init_qm_pq_params *pq; |
1984 | int i, tc; |
1985 | |
1986 | /* top level params */ |
1987 | DP_VERBOSE(p_hwfn, |
1988 | NETIF_MSG_HW, |
1989 | "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n" , |
1990 | qm_info->start_pq, |
1991 | qm_info->start_vport, |
1992 | qm_info->pure_lb_pq, |
1993 | qm_info->first_ofld_pq, |
1994 | qm_info->first_llt_pq, |
1995 | qm_info->pure_ack_pq); |
1996 | DP_VERBOSE(p_hwfn, |
1997 | NETIF_MSG_HW, |
1998 | "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n" , |
1999 | qm_info->ooo_pq, |
2000 | qm_info->first_vf_pq, |
2001 | qm_info->num_pqs, |
2002 | qm_info->num_vf_pqs, |
2003 | qm_info->num_vports, qm_info->max_phys_tcs_per_port); |
2004 | DP_VERBOSE(p_hwfn, |
2005 | NETIF_MSG_HW, |
2006 | "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n" , |
2007 | qm_info->pf_rl_en, |
2008 | qm_info->pf_wfq_en, |
2009 | qm_info->vport_rl_en, |
2010 | qm_info->vport_wfq_en, |
2011 | qm_info->pf_wfq, |
2012 | qm_info->pf_rl, |
2013 | qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); |
2014 | |
2015 | /* port table */ |
2016 | for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) { |
2017 | port = &(qm_info->qm_port_params[i]); |
2018 | DP_VERBOSE(p_hwfn, |
2019 | NETIF_MSG_HW, |
2020 | "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n" , |
2021 | i, |
2022 | port->active, |
2023 | port->active_phys_tcs, |
2024 | port->num_pbf_cmd_lines, |
2025 | port->num_btb_blocks, port->reserved); |
2026 | } |
2027 | |
2028 | /* vport table */ |
2029 | for (i = 0; i < qm_info->num_vports; i++) { |
2030 | vport = &(qm_info->qm_vport_params[i]); |
2031 | DP_VERBOSE(p_hwfn, |
2032 | NETIF_MSG_HW, |
2033 | "vport idx %d, wfq %d, first_tx_pq_id [ " , |
2034 | qm_info->start_vport + i, vport->wfq); |
2035 | for (tc = 0; tc < NUM_OF_TCS; tc++) |
2036 | DP_VERBOSE(p_hwfn, |
2037 | NETIF_MSG_HW, |
2038 | "%d " , vport->first_tx_pq_id[tc]); |
2039 | DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n" ); |
2040 | } |
2041 | |
2042 | /* pq table */ |
2043 | for (i = 0; i < qm_info->num_pqs; i++) { |
2044 | pq = &(qm_info->qm_pq_params[i]); |
2045 | DP_VERBOSE(p_hwfn, |
2046 | NETIF_MSG_HW, |
2047 | "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n" , |
2048 | qm_info->start_pq + i, |
2049 | pq->port_id, |
2050 | pq->vport_id, |
2051 | pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id); |
2052 | } |
2053 | } |
2054 | |
2055 | static void qed_init_qm_info(struct qed_hwfn *p_hwfn) |
2056 | { |
2057 | /* reset params required for init run */ |
2058 | qed_init_qm_reset_params(p_hwfn); |
2059 | |
2060 | /* init QM top level params */ |
2061 | qed_init_qm_params(p_hwfn); |
2062 | |
2063 | /* init QM port params */ |
2064 | qed_init_qm_port_params(p_hwfn); |
2065 | |
2066 | /* init QM vport params */ |
2067 | qed_init_qm_vport_params(p_hwfn); |
2068 | |
2069 | /* init QM physical queue params */ |
2070 | qed_init_qm_pq_params(p_hwfn); |
2071 | |
2072 | /* display all that init */ |
2073 | qed_dp_init_qm_params(p_hwfn); |
2074 | } |
2075 | |
2076 | /* This function reconfigures the QM pf on the fly. |
2077 | * For this purpose we: |
2078 | * 1. reconfigure the QM database |
2079 | * 2. set new values to runtime array |
2080 | * 3. send an sdm_qm_cmd through the rbc interface to stop the QM |
2081 | * 4. activate init tool in QM_PF stage |
2082 | * 5. send an sdm_qm_cmd through rbc interface to release the QM |
2083 | */ |
2084 | int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
2085 | { |
2086 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
2087 | bool b_rc; |
2088 | int rc; |
2089 | |
2090 | /* initialize qed's qm data structure */ |
2091 | qed_init_qm_info(p_hwfn); |
2092 | |
2093 | /* stop PF's qm queues */ |
2094 | spin_lock_bh(lock: &qm_lock); |
2095 | b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, is_release_cmd: false, is_tx_pq: true, |
2096 | start_pq: qm_info->start_pq, num_pqs: qm_info->num_pqs); |
2097 | spin_unlock_bh(lock: &qm_lock); |
2098 | if (!b_rc) |
2099 | return -EINVAL; |
2100 | |
2101 | /* prepare QM portion of runtime array */ |
2102 | qed_qm_init_pf(p_hwfn, p_ptt, is_pf_loading: false); |
2103 | |
2104 | /* activate init tool on runtime array */ |
2105 | rc = qed_init_run(p_hwfn, p_ptt, phase: PHASE_QM_PF, phase_id: p_hwfn->rel_pf_id, |
2106 | modes: p_hwfn->hw_info.hw_mode); |
2107 | if (rc) |
2108 | return rc; |
2109 | |
2110 | /* start PF's qm queues */ |
2111 | spin_lock_bh(lock: &qm_lock); |
2112 | b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, is_release_cmd: true, is_tx_pq: true, |
2113 | start_pq: qm_info->start_pq, num_pqs: qm_info->num_pqs); |
2114 | spin_unlock_bh(lock: &qm_lock); |
2115 | if (!b_rc) |
2116 | return -EINVAL; |
2117 | |
2118 | return 0; |
2119 | } |
2120 | |
2121 | static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) |
2122 | { |
2123 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
2124 | int rc; |
2125 | |
2126 | rc = qed_init_qm_sanity(p_hwfn); |
2127 | if (rc) |
2128 | goto alloc_err; |
2129 | |
2130 | qm_info->qm_pq_params = kcalloc(n: qed_init_qm_get_num_pqs(p_hwfn), |
2131 | size: sizeof(*qm_info->qm_pq_params), |
2132 | GFP_KERNEL); |
2133 | if (!qm_info->qm_pq_params) |
2134 | goto alloc_err; |
2135 | |
2136 | qm_info->qm_vport_params = kcalloc(n: qed_init_qm_get_num_vports(p_hwfn), |
2137 | size: sizeof(*qm_info->qm_vport_params), |
2138 | GFP_KERNEL); |
2139 | if (!qm_info->qm_vport_params) |
2140 | goto alloc_err; |
2141 | |
2142 | qm_info->qm_port_params = kcalloc(n: p_hwfn->cdev->num_ports_in_engine, |
2143 | size: sizeof(*qm_info->qm_port_params), |
2144 | GFP_KERNEL); |
2145 | if (!qm_info->qm_port_params) |
2146 | goto alloc_err; |
2147 | |
2148 | qm_info->wfq_data = kcalloc(n: qed_init_qm_get_num_vports(p_hwfn), |
2149 | size: sizeof(*qm_info->wfq_data), |
2150 | GFP_KERNEL); |
2151 | if (!qm_info->wfq_data) |
2152 | goto alloc_err; |
2153 | |
2154 | return 0; |
2155 | |
2156 | alloc_err: |
2157 | DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n" ); |
2158 | qed_qm_info_free(p_hwfn); |
2159 | return -ENOMEM; |
2160 | } |
2161 | |
2162 | int qed_resc_alloc(struct qed_dev *cdev) |
2163 | { |
2164 | u32 rdma_tasks, excess_tasks; |
2165 | u32 line_count; |
2166 | int i, rc = 0; |
2167 | |
2168 | if (IS_VF(cdev)) { |
2169 | for_each_hwfn(cdev, i) { |
2170 | rc = qed_l2_alloc(p_hwfn: &cdev->hwfns[i]); |
2171 | if (rc) |
2172 | return rc; |
2173 | } |
2174 | return rc; |
2175 | } |
2176 | |
2177 | cdev->fw_data = kzalloc(size: sizeof(*cdev->fw_data), GFP_KERNEL); |
2178 | if (!cdev->fw_data) |
2179 | return -ENOMEM; |
2180 | |
2181 | for_each_hwfn(cdev, i) { |
2182 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2183 | u32 n_eqes, num_cons; |
2184 | |
2185 | /* Initialize the doorbell recovery mechanism */ |
2186 | rc = qed_db_recovery_setup(p_hwfn); |
2187 | if (rc) |
2188 | goto alloc_err; |
2189 | |
2190 | /* First allocate the context manager structure */ |
2191 | rc = qed_cxt_mngr_alloc(p_hwfn); |
2192 | if (rc) |
2193 | goto alloc_err; |
2194 | |
2195 | /* Set the HW cid/tid numbers (in the contest manager) |
2196 | * Must be done prior to any further computations. |
2197 | */ |
2198 | rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); |
2199 | if (rc) |
2200 | goto alloc_err; |
2201 | |
2202 | rc = qed_alloc_qm_data(p_hwfn); |
2203 | if (rc) |
2204 | goto alloc_err; |
2205 | |
2206 | /* init qm info */ |
2207 | qed_init_qm_info(p_hwfn); |
2208 | |
2209 | /* Compute the ILT client partition */ |
2210 | rc = qed_cxt_cfg_ilt_compute(p_hwfn, last_line: &line_count); |
2211 | if (rc) { |
2212 | DP_NOTICE(p_hwfn, |
2213 | "too many ILT lines; re-computing with less lines\n" ); |
2214 | /* In case there are not enough ILT lines we reduce the |
2215 | * number of RDMA tasks and re-compute. |
2216 | */ |
2217 | excess_tasks = |
2218 | qed_cxt_cfg_ilt_compute_excess(p_hwfn, used_lines: line_count); |
2219 | if (!excess_tasks) |
2220 | goto alloc_err; |
2221 | |
2222 | rdma_tasks = RDMA_MAX_TIDS - excess_tasks; |
2223 | rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); |
2224 | if (rc) |
2225 | goto alloc_err; |
2226 | |
2227 | rc = qed_cxt_cfg_ilt_compute(p_hwfn, last_line: &line_count); |
2228 | if (rc) { |
2229 | DP_ERR(p_hwfn, |
2230 | "failed ILT compute. Requested too many lines: %u\n" , |
2231 | line_count); |
2232 | |
2233 | goto alloc_err; |
2234 | } |
2235 | } |
2236 | |
2237 | /* CID map / ILT shadow table / T2 |
2238 | * The talbes sizes are determined by the computations above |
2239 | */ |
2240 | rc = qed_cxt_tables_alloc(p_hwfn); |
2241 | if (rc) |
2242 | goto alloc_err; |
2243 | |
2244 | /* SPQ, must follow ILT because initializes SPQ context */ |
2245 | rc = qed_spq_alloc(p_hwfn); |
2246 | if (rc) |
2247 | goto alloc_err; |
2248 | |
2249 | /* SP status block allocation */ |
2250 | p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, |
2251 | ptt_idx: RESERVED_PTT_DPC); |
2252 | |
2253 | rc = qed_int_alloc(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
2254 | if (rc) |
2255 | goto alloc_err; |
2256 | |
2257 | rc = qed_iov_alloc(p_hwfn); |
2258 | if (rc) |
2259 | goto alloc_err; |
2260 | |
2261 | /* EQ */ |
2262 | n_eqes = qed_chain_get_capacity(p_chain: &p_hwfn->p_spq->chain); |
2263 | if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { |
2264 | u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn); |
2265 | enum protocol_type rdma_proto; |
2266 | |
2267 | if (QED_IS_ROCE_PERSONALITY(p_hwfn)) |
2268 | rdma_proto = PROTOCOLID_ROCE; |
2269 | else |
2270 | rdma_proto = PROTOCOLID_IWARP; |
2271 | |
2272 | num_cons = qed_cxt_get_proto_cid_count(p_hwfn, |
2273 | type: rdma_proto, |
2274 | NULL) * 2; |
2275 | /* EQ should be able to get events from all SRQ's |
2276 | * at the same time |
2277 | */ |
2278 | n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq; |
2279 | } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || |
2280 | p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { |
2281 | num_cons = |
2282 | qed_cxt_get_proto_cid_count(p_hwfn, |
2283 | type: PROTOCOLID_TCP_ULP, |
2284 | NULL); |
2285 | n_eqes += 2 * num_cons; |
2286 | } |
2287 | |
2288 | if (n_eqes > 0xFFFF) { |
2289 | DP_ERR(p_hwfn, |
2290 | "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n" , |
2291 | n_eqes, 0xFFFF); |
2292 | goto alloc_no_mem; |
2293 | } |
2294 | |
2295 | rc = qed_eq_alloc(p_hwfn, num_elem: (u16)n_eqes); |
2296 | if (rc) |
2297 | goto alloc_err; |
2298 | |
2299 | rc = qed_consq_alloc(p_hwfn); |
2300 | if (rc) |
2301 | goto alloc_err; |
2302 | |
2303 | rc = qed_l2_alloc(p_hwfn); |
2304 | if (rc) |
2305 | goto alloc_err; |
2306 | |
2307 | #ifdef CONFIG_QED_LL2 |
2308 | if (p_hwfn->using_ll2) { |
2309 | rc = qed_ll2_alloc(p_hwfn); |
2310 | if (rc) |
2311 | goto alloc_err; |
2312 | } |
2313 | #endif |
2314 | |
2315 | if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { |
2316 | rc = qed_fcoe_alloc(p_hwfn); |
2317 | if (rc) |
2318 | goto alloc_err; |
2319 | } |
2320 | |
2321 | if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { |
2322 | rc = qed_iscsi_alloc(p_hwfn); |
2323 | if (rc) |
2324 | goto alloc_err; |
2325 | rc = qed_ooo_alloc(p_hwfn); |
2326 | if (rc) |
2327 | goto alloc_err; |
2328 | } |
2329 | |
2330 | if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { |
2331 | rc = qed_nvmetcp_alloc(p_hwfn); |
2332 | if (rc) |
2333 | goto alloc_err; |
2334 | rc = qed_ooo_alloc(p_hwfn); |
2335 | if (rc) |
2336 | goto alloc_err; |
2337 | } |
2338 | |
2339 | if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { |
2340 | rc = qed_rdma_info_alloc(p_hwfn); |
2341 | if (rc) |
2342 | goto alloc_err; |
2343 | } |
2344 | |
2345 | /* DMA info initialization */ |
2346 | rc = qed_dmae_info_alloc(p_hwfn); |
2347 | if (rc) |
2348 | goto alloc_err; |
2349 | |
2350 | /* DCBX initialization */ |
2351 | rc = qed_dcbx_info_alloc(p_hwfn); |
2352 | if (rc) |
2353 | goto alloc_err; |
2354 | |
2355 | rc = qed_dbg_alloc_user_data(p_hwfn, user_data_ptr: &p_hwfn->dbg_user_info); |
2356 | if (rc) |
2357 | goto alloc_err; |
2358 | } |
2359 | |
2360 | rc = qed_llh_alloc(cdev); |
2361 | if (rc) { |
2362 | DP_NOTICE(cdev, |
2363 | "Failed to allocate memory for the llh_info structure\n" ); |
2364 | goto alloc_err; |
2365 | } |
2366 | |
2367 | cdev->reset_stats = kzalloc(size: sizeof(*cdev->reset_stats), GFP_KERNEL); |
2368 | if (!cdev->reset_stats) |
2369 | goto alloc_no_mem; |
2370 | |
2371 | return 0; |
2372 | |
2373 | alloc_no_mem: |
2374 | rc = -ENOMEM; |
2375 | alloc_err: |
2376 | qed_resc_free(cdev); |
2377 | return rc; |
2378 | } |
2379 | |
2380 | static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, |
2381 | u8 opcode, |
2382 | u16 echo, |
2383 | union event_ring_data *data, u8 fw_return_code) |
2384 | { |
2385 | if (fw_return_code != COMMON_ERR_CODE_ERROR) |
2386 | goto eqe_unexpected; |
2387 | |
2388 | if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && |
2389 | le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { |
2390 | qed_sriov_vfpf_malicious(p_hwfn, p_data: &data->err_data); |
2391 | return 0; |
2392 | } |
2393 | |
2394 | eqe_unexpected: |
2395 | DP_ERR(p_hwfn, |
2396 | "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n" , |
2397 | opcode, fw_return_code, echo); |
2398 | return -EINVAL; |
2399 | } |
2400 | |
2401 | static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, |
2402 | u8 opcode, |
2403 | __le16 echo, |
2404 | union event_ring_data *data, |
2405 | u8 fw_return_code) |
2406 | { |
2407 | switch (opcode) { |
2408 | case COMMON_EVENT_VF_PF_CHANNEL: |
2409 | case COMMON_EVENT_VF_FLR: |
2410 | return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, |
2411 | fw_return_code); |
2412 | case COMMON_EVENT_FW_ERROR: |
2413 | return qed_fw_err_handler(p_hwfn, opcode, |
2414 | le16_to_cpu(echo), data, |
2415 | fw_return_code); |
2416 | default: |
2417 | DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n" , |
2418 | opcode, echo); |
2419 | return -EINVAL; |
2420 | } |
2421 | } |
2422 | |
2423 | void qed_resc_setup(struct qed_dev *cdev) |
2424 | { |
2425 | int i; |
2426 | |
2427 | if (IS_VF(cdev)) { |
2428 | for_each_hwfn(cdev, i) |
2429 | qed_l2_setup(p_hwfn: &cdev->hwfns[i]); |
2430 | return; |
2431 | } |
2432 | |
2433 | for_each_hwfn(cdev, i) { |
2434 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2435 | |
2436 | qed_cxt_mngr_setup(p_hwfn); |
2437 | qed_spq_setup(p_hwfn); |
2438 | qed_eq_setup(p_hwfn); |
2439 | qed_consq_setup(p_hwfn); |
2440 | |
2441 | /* Read shadow of current MFW mailbox */ |
2442 | qed_mcp_read_mb(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
2443 | memcpy(p_hwfn->mcp_info->mfw_mb_shadow, |
2444 | p_hwfn->mcp_info->mfw_mb_cur, |
2445 | p_hwfn->mcp_info->mfw_mb_length); |
2446 | |
2447 | qed_int_setup(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
2448 | |
2449 | qed_l2_setup(p_hwfn); |
2450 | qed_iov_setup(p_hwfn); |
2451 | qed_spq_register_async_cb(p_hwfn, protocol_id: PROTOCOLID_COMMON, |
2452 | cb: qed_common_eqe_event); |
2453 | #ifdef CONFIG_QED_LL2 |
2454 | if (p_hwfn->using_ll2) |
2455 | qed_ll2_setup(p_hwfn); |
2456 | #endif |
2457 | if (p_hwfn->hw_info.personality == QED_PCI_FCOE) |
2458 | qed_fcoe_setup(p_hwfn); |
2459 | |
2460 | if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { |
2461 | qed_iscsi_setup(p_hwfn); |
2462 | qed_ooo_setup(p_hwfn); |
2463 | } |
2464 | |
2465 | if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { |
2466 | qed_nvmetcp_setup(p_hwfn); |
2467 | qed_ooo_setup(p_hwfn); |
2468 | } |
2469 | } |
2470 | } |
2471 | |
2472 | #define FINAL_CLEANUP_POLL_CNT (100) |
2473 | #define FINAL_CLEANUP_POLL_TIME (10) |
2474 | int qed_final_cleanup(struct qed_hwfn *p_hwfn, |
2475 | struct qed_ptt *p_ptt, u16 id, bool is_vf) |
2476 | { |
2477 | u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; |
2478 | int rc = -EBUSY; |
2479 | |
2480 | addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, |
2481 | USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id); |
2482 | if (is_vf) |
2483 | id += 0x10; |
2484 | |
2485 | command |= X_FINAL_CLEANUP_AGG_INT << |
2486 | SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; |
2487 | command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; |
2488 | command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; |
2489 | command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; |
2490 | |
2491 | /* Make sure notification is not set before initiating final cleanup */ |
2492 | if (REG_RD(p_hwfn, addr)) { |
2493 | DP_NOTICE(p_hwfn, |
2494 | "Unexpected; Found final cleanup notification before initiating final cleanup\n" ); |
2495 | REG_WR(p_hwfn, addr, 0); |
2496 | } |
2497 | |
2498 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, |
2499 | "Sending final cleanup for PFVF[%d] [Command %08x]\n" , |
2500 | id, command); |
2501 | |
2502 | qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, val: command); |
2503 | |
2504 | /* Poll until completion */ |
2505 | while (!REG_RD(p_hwfn, addr) && count--) |
2506 | msleep(FINAL_CLEANUP_POLL_TIME); |
2507 | |
2508 | if (REG_RD(p_hwfn, addr)) |
2509 | rc = 0; |
2510 | else |
2511 | DP_NOTICE(p_hwfn, |
2512 | "Failed to receive FW final cleanup notification\n" ); |
2513 | |
2514 | /* Cleanup afterwards */ |
2515 | REG_WR(p_hwfn, addr, 0); |
2516 | |
2517 | return rc; |
2518 | } |
2519 | |
2520 | static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) |
2521 | { |
2522 | int hw_mode = 0; |
2523 | |
2524 | if (QED_IS_BB_B0(p_hwfn->cdev)) { |
2525 | hw_mode |= 1 << MODE_BB; |
2526 | } else if (QED_IS_AH(p_hwfn->cdev)) { |
2527 | hw_mode |= 1 << MODE_K2; |
2528 | } else { |
2529 | DP_NOTICE(p_hwfn, "Unknown chip type %#x\n" , |
2530 | p_hwfn->cdev->type); |
2531 | return -EINVAL; |
2532 | } |
2533 | |
2534 | switch (p_hwfn->cdev->num_ports_in_engine) { |
2535 | case 1: |
2536 | hw_mode |= 1 << MODE_PORTS_PER_ENG_1; |
2537 | break; |
2538 | case 2: |
2539 | hw_mode |= 1 << MODE_PORTS_PER_ENG_2; |
2540 | break; |
2541 | case 4: |
2542 | hw_mode |= 1 << MODE_PORTS_PER_ENG_4; |
2543 | break; |
2544 | default: |
2545 | DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n" , |
2546 | p_hwfn->cdev->num_ports_in_engine); |
2547 | return -EINVAL; |
2548 | } |
2549 | |
2550 | if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) |
2551 | hw_mode |= 1 << MODE_MF_SD; |
2552 | else |
2553 | hw_mode |= 1 << MODE_MF_SI; |
2554 | |
2555 | hw_mode |= 1 << MODE_ASIC; |
2556 | |
2557 | if (p_hwfn->cdev->num_hwfns > 1) |
2558 | hw_mode |= 1 << MODE_100G; |
2559 | |
2560 | p_hwfn->hw_info.hw_mode = hw_mode; |
2561 | |
2562 | DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), |
2563 | "Configuring function for hw_mode: 0x%08x\n" , |
2564 | p_hwfn->hw_info.hw_mode); |
2565 | |
2566 | return 0; |
2567 | } |
2568 | |
2569 | /* Init run time data for all PFs on an engine. */ |
2570 | static void qed_init_cau_rt_data(struct qed_dev *cdev) |
2571 | { |
2572 | u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; |
2573 | int i, igu_sb_id; |
2574 | |
2575 | for_each_hwfn(cdev, i) { |
2576 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
2577 | struct qed_igu_info *p_igu_info; |
2578 | struct qed_igu_block *p_block; |
2579 | struct cau_sb_entry sb_entry; |
2580 | |
2581 | p_igu_info = p_hwfn->hw_info.p_igu_info; |
2582 | |
2583 | for (igu_sb_id = 0; |
2584 | igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) { |
2585 | p_block = &p_igu_info->entry[igu_sb_id]; |
2586 | |
2587 | if (!p_block->is_pf) |
2588 | continue; |
2589 | |
2590 | qed_init_cau_sb_entry(p_hwfn, p_sb_entry: &sb_entry, |
2591 | pf_id: p_block->function_id, vf_number: 0, vf_valid: 0); |
2592 | STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, |
2593 | sb_entry); |
2594 | } |
2595 | } |
2596 | } |
2597 | |
2598 | static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, |
2599 | struct qed_ptt *p_ptt) |
2600 | { |
2601 | u32 val, wr_mbs, cache_line_size; |
2602 | |
2603 | val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); |
2604 | switch (val) { |
2605 | case 0: |
2606 | wr_mbs = 128; |
2607 | break; |
2608 | case 1: |
2609 | wr_mbs = 256; |
2610 | break; |
2611 | case 2: |
2612 | wr_mbs = 512; |
2613 | break; |
2614 | default: |
2615 | DP_INFO(p_hwfn, |
2616 | "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n" , |
2617 | val); |
2618 | return; |
2619 | } |
2620 | |
2621 | cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); |
2622 | switch (cache_line_size) { |
2623 | case 32: |
2624 | val = 0; |
2625 | break; |
2626 | case 64: |
2627 | val = 1; |
2628 | break; |
2629 | case 128: |
2630 | val = 2; |
2631 | break; |
2632 | case 256: |
2633 | val = 3; |
2634 | break; |
2635 | default: |
2636 | DP_INFO(p_hwfn, |
2637 | "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n" , |
2638 | cache_line_size); |
2639 | } |
2640 | |
2641 | if (wr_mbs < L1_CACHE_BYTES) |
2642 | DP_INFO(p_hwfn, |
2643 | "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n" , |
2644 | L1_CACHE_BYTES, wr_mbs); |
2645 | |
2646 | STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); |
2647 | if (val > 0) { |
2648 | STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); |
2649 | STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); |
2650 | } |
2651 | } |
2652 | |
2653 | static int qed_hw_init_common(struct qed_hwfn *p_hwfn, |
2654 | struct qed_ptt *p_ptt, int hw_mode) |
2655 | { |
2656 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
2657 | struct qed_qm_common_rt_init_params *params; |
2658 | struct qed_dev *cdev = p_hwfn->cdev; |
2659 | u8 vf_id, max_num_vfs; |
2660 | u16 num_pfs, pf_id; |
2661 | u32 concrete_fid; |
2662 | int rc = 0; |
2663 | |
2664 | params = kzalloc(size: sizeof(*params), GFP_KERNEL); |
2665 | if (!params) { |
2666 | DP_NOTICE(p_hwfn->cdev, |
2667 | "Failed to allocate common init params\n" ); |
2668 | |
2669 | return -ENOMEM; |
2670 | } |
2671 | |
2672 | qed_init_cau_rt_data(cdev); |
2673 | |
2674 | /* Program GTT windows */ |
2675 | qed_gtt_init(p_hwfn); |
2676 | |
2677 | if (p_hwfn->mcp_info) { |
2678 | if (p_hwfn->mcp_info->func_info.bandwidth_max) |
2679 | qm_info->pf_rl_en = true; |
2680 | if (p_hwfn->mcp_info->func_info.bandwidth_min) |
2681 | qm_info->pf_wfq_en = true; |
2682 | } |
2683 | |
2684 | params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; |
2685 | params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; |
2686 | params->pf_rl_en = qm_info->pf_rl_en; |
2687 | params->pf_wfq_en = qm_info->pf_wfq_en; |
2688 | params->global_rl_en = qm_info->vport_rl_en; |
2689 | params->vport_wfq_en = qm_info->vport_wfq_en; |
2690 | params->port_params = qm_info->qm_port_params; |
2691 | |
2692 | qed_qm_common_rt_init(p_hwfn, p_params: params); |
2693 | |
2694 | qed_cxt_hw_init_common(p_hwfn); |
2695 | |
2696 | qed_init_cache_line_size(p_hwfn, p_ptt); |
2697 | |
2698 | rc = qed_init_run(p_hwfn, p_ptt, phase: PHASE_ENGINE, ANY_PHASE_ID, modes: hw_mode); |
2699 | if (rc) |
2700 | goto out; |
2701 | |
2702 | qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, val: 0); |
2703 | qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, val: 1); |
2704 | |
2705 | if (QED_IS_BB(p_hwfn->cdev)) { |
2706 | num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); |
2707 | for (pf_id = 0; pf_id < num_pfs; pf_id++) { |
2708 | qed_fid_pretend(p_hwfn, p_ptt, fid: pf_id); |
2709 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, val: 0x0); |
2710 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, val: 0x0); |
2711 | } |
2712 | /* pretend to original PF */ |
2713 | qed_fid_pretend(p_hwfn, p_ptt, fid: p_hwfn->rel_pf_id); |
2714 | } |
2715 | |
2716 | max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; |
2717 | for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { |
2718 | concrete_fid = qed_vfid_to_concrete(p_hwfn, vfid: vf_id); |
2719 | qed_fid_pretend(p_hwfn, p_ptt, fid: (u16)concrete_fid); |
2720 | qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, val: 0x1); |
2721 | qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, val: 0x0); |
2722 | qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, val: 0x1); |
2723 | qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, val: 0x0); |
2724 | } |
2725 | /* pretend to original PF */ |
2726 | qed_fid_pretend(p_hwfn, p_ptt, fid: p_hwfn->rel_pf_id); |
2727 | |
2728 | out: |
2729 | kfree(objp: params); |
2730 | |
2731 | return rc; |
2732 | } |
2733 | |
2734 | static int |
2735 | qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, |
2736 | struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) |
2737 | { |
2738 | u32 dpi_bit_shift, dpi_count, dpi_page_size; |
2739 | u32 min_dpis; |
2740 | u32 n_wids; |
2741 | |
2742 | /* Calculate DPI size */ |
2743 | n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); |
2744 | dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); |
2745 | dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); |
2746 | dpi_bit_shift = ilog2(dpi_page_size / 4096); |
2747 | dpi_count = pwm_region_size / dpi_page_size; |
2748 | |
2749 | min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; |
2750 | min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); |
2751 | |
2752 | p_hwfn->dpi_size = dpi_page_size; |
2753 | p_hwfn->dpi_count = dpi_count; |
2754 | |
2755 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, val: dpi_bit_shift); |
2756 | |
2757 | if (dpi_count < min_dpis) |
2758 | return -EINVAL; |
2759 | |
2760 | return 0; |
2761 | } |
2762 | |
2763 | enum QED_ROCE_EDPM_MODE { |
2764 | QED_ROCE_EDPM_MODE_ENABLE = 0, |
2765 | QED_ROCE_EDPM_MODE_FORCE_ON = 1, |
2766 | QED_ROCE_EDPM_MODE_DISABLE = 2, |
2767 | }; |
2768 | |
2769 | bool qed_edpm_enabled(struct qed_hwfn *p_hwfn) |
2770 | { |
2771 | if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) |
2772 | return false; |
2773 | |
2774 | return true; |
2775 | } |
2776 | |
2777 | static int |
2778 | qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
2779 | { |
2780 | u32 pwm_regsize, norm_regsize; |
2781 | u32 non_pwm_conn, min_addr_reg1; |
2782 | u32 db_bar_size, n_cpus = 1; |
2783 | u32 roce_edpm_mode; |
2784 | u32 pf_dems_shift; |
2785 | int rc = 0; |
2786 | u8 cond; |
2787 | |
2788 | db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, bar_id: BAR_ID_1); |
2789 | if (p_hwfn->cdev->num_hwfns > 1) |
2790 | db_bar_size /= 2; |
2791 | |
2792 | /* Calculate doorbell regions */ |
2793 | non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, type: PROTOCOLID_CORE) + |
2794 | qed_cxt_get_proto_cid_count(p_hwfn, type: PROTOCOLID_CORE, |
2795 | NULL) + |
2796 | qed_cxt_get_proto_cid_count(p_hwfn, type: PROTOCOLID_ETH, |
2797 | NULL); |
2798 | norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE); |
2799 | min_addr_reg1 = norm_regsize / 4096; |
2800 | pwm_regsize = db_bar_size - norm_regsize; |
2801 | |
2802 | /* Check that the normal and PWM sizes are valid */ |
2803 | if (db_bar_size < norm_regsize) { |
2804 | DP_ERR(p_hwfn->cdev, |
2805 | "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n" , |
2806 | db_bar_size, norm_regsize); |
2807 | return -EINVAL; |
2808 | } |
2809 | |
2810 | if (pwm_regsize < QED_MIN_PWM_REGION) { |
2811 | DP_ERR(p_hwfn->cdev, |
2812 | "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n" , |
2813 | pwm_regsize, |
2814 | QED_MIN_PWM_REGION, db_bar_size, norm_regsize); |
2815 | return -EINVAL; |
2816 | } |
2817 | |
2818 | /* Calculate number of DPIs */ |
2819 | roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; |
2820 | if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || |
2821 | ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { |
2822 | /* Either EDPM is mandatory, or we are attempting to allocate a |
2823 | * WID per CPU. |
2824 | */ |
2825 | n_cpus = num_present_cpus(); |
2826 | rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_region_size: pwm_regsize, n_cpus); |
2827 | } |
2828 | |
2829 | cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || |
2830 | (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); |
2831 | if (cond || p_hwfn->dcbx_no_edpm) { |
2832 | /* Either EDPM is disabled from user configuration, or it is |
2833 | * disabled via DCBx, or it is not mandatory and we failed to |
2834 | * allocated a WID per CPU. |
2835 | */ |
2836 | n_cpus = 1; |
2837 | rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_region_size: pwm_regsize, n_cpus); |
2838 | |
2839 | if (cond) |
2840 | qed_rdma_dpm_bar(p_hwfn, p_ptt); |
2841 | } |
2842 | |
2843 | p_hwfn->wid_count = (u16)n_cpus; |
2844 | |
2845 | DP_INFO(p_hwfn, |
2846 | "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n" , |
2847 | norm_regsize, |
2848 | pwm_regsize, |
2849 | p_hwfn->dpi_size, |
2850 | p_hwfn->dpi_count, |
2851 | (!qed_edpm_enabled(p_hwfn)) ? |
2852 | "disabled" : "enabled" , PAGE_SIZE); |
2853 | |
2854 | if (rc) { |
2855 | DP_ERR(p_hwfn, |
2856 | "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n" , |
2857 | p_hwfn->dpi_count, |
2858 | p_hwfn->pf_params.rdma_pf_params.min_dpis); |
2859 | return -EINVAL; |
2860 | } |
2861 | |
2862 | p_hwfn->dpi_start_offset = norm_regsize; |
2863 | |
2864 | /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ |
2865 | pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); |
2866 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, val: pf_dems_shift); |
2867 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, val: min_addr_reg1); |
2868 | |
2869 | return 0; |
2870 | } |
2871 | |
2872 | static int qed_hw_init_port(struct qed_hwfn *p_hwfn, |
2873 | struct qed_ptt *p_ptt, int hw_mode) |
2874 | { |
2875 | int rc = 0; |
2876 | |
2877 | /* In CMT the gate should be cleared by the 2nd hwfn */ |
2878 | if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn)) |
2879 | STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); |
2880 | |
2881 | rc = qed_init_run(p_hwfn, p_ptt, phase: PHASE_PORT, phase_id: p_hwfn->port_id, modes: hw_mode); |
2882 | if (rc) |
2883 | return rc; |
2884 | |
2885 | qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, val: 0); |
2886 | |
2887 | return 0; |
2888 | } |
2889 | |
2890 | static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, |
2891 | struct qed_ptt *p_ptt, |
2892 | struct qed_tunnel_info *p_tunn, |
2893 | int hw_mode, |
2894 | bool b_hw_start, |
2895 | enum qed_int_mode int_mode, |
2896 | bool allow_npar_tx_switch) |
2897 | { |
2898 | u8 rel_pf_id = p_hwfn->rel_pf_id; |
2899 | int rc = 0; |
2900 | |
2901 | if (p_hwfn->mcp_info) { |
2902 | struct qed_mcp_function_info *p_info; |
2903 | |
2904 | p_info = &p_hwfn->mcp_info->func_info; |
2905 | if (p_info->bandwidth_min) |
2906 | p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; |
2907 | |
2908 | /* Update rate limit once we'll actually have a link */ |
2909 | p_hwfn->qm_info.pf_rl = 100000; |
2910 | } |
2911 | |
2912 | qed_cxt_hw_init_pf(p_hwfn, p_ptt); |
2913 | |
2914 | qed_int_igu_init_rt(p_hwfn); |
2915 | |
2916 | /* Set VLAN in NIG if needed */ |
2917 | if (hw_mode & BIT(MODE_MF_SD)) { |
2918 | DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n" ); |
2919 | STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); |
2920 | STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, |
2921 | p_hwfn->hw_info.ovlan); |
2922 | |
2923 | DP_VERBOSE(p_hwfn, NETIF_MSG_HW, |
2924 | "Configuring LLH_FUNC_FILTER_HDR_SEL\n" ); |
2925 | STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, |
2926 | 1); |
2927 | } |
2928 | |
2929 | /* Enable classification by MAC if needed */ |
2930 | if (hw_mode & BIT(MODE_MF_SI)) { |
2931 | DP_VERBOSE(p_hwfn, NETIF_MSG_HW, |
2932 | "Configuring TAGMAC_CLS_TYPE\n" ); |
2933 | STORE_RT_REG(p_hwfn, |
2934 | NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); |
2935 | } |
2936 | |
2937 | /* Protocol Configuration */ |
2938 | STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, |
2939 | ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) || |
2940 | (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0); |
2941 | STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, |
2942 | (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); |
2943 | STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); |
2944 | |
2945 | /* Sanity check before the PF init sequence that uses DMAE */ |
2946 | rc = qed_dmae_sanity(p_hwfn, p_ptt, phase: "pf_phase" ); |
2947 | if (rc) |
2948 | return rc; |
2949 | |
2950 | /* PF Init sequence */ |
2951 | rc = qed_init_run(p_hwfn, p_ptt, phase: PHASE_PF, phase_id: rel_pf_id, modes: hw_mode); |
2952 | if (rc) |
2953 | return rc; |
2954 | |
2955 | /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ |
2956 | rc = qed_init_run(p_hwfn, p_ptt, phase: PHASE_QM_PF, phase_id: rel_pf_id, modes: hw_mode); |
2957 | if (rc) |
2958 | return rc; |
2959 | |
2960 | qed_fw_overlay_init_ram(p_hwfn, p_ptt, fw_overlay_mem: p_hwfn->fw_overlay_mem); |
2961 | |
2962 | /* Pure runtime initializations - directly to the HW */ |
2963 | qed_int_igu_init_pure_rt(p_hwfn, p_ptt, b_set: true, b_slowpath: true); |
2964 | |
2965 | rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); |
2966 | if (rc) |
2967 | return rc; |
2968 | |
2969 | /* Use the leading hwfn since in CMT only NIG #0 is operational */ |
2970 | if (IS_LEAD_HWFN(p_hwfn)) { |
2971 | rc = qed_llh_hw_init_pf(p_hwfn, p_ptt); |
2972 | if (rc) |
2973 | return rc; |
2974 | } |
2975 | |
2976 | if (b_hw_start) { |
2977 | /* enable interrupts */ |
2978 | qed_int_igu_enable(p_hwfn, p_ptt, int_mode); |
2979 | |
2980 | /* send function start command */ |
2981 | rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, |
2982 | allow_npar_tx_switch); |
2983 | if (rc) { |
2984 | DP_NOTICE(p_hwfn, "Function start ramrod failed\n" ); |
2985 | return rc; |
2986 | } |
2987 | if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { |
2988 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); |
2989 | qed_wr(p_hwfn, p_ptt, |
2990 | PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, |
2991 | val: 0x100); |
2992 | } |
2993 | } |
2994 | return rc; |
2995 | } |
2996 | |
2997 | int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, |
2998 | struct qed_ptt *p_ptt, bool b_enable) |
2999 | { |
3000 | u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; |
3001 | |
3002 | /* Configure the PF's internal FID_enable for master transactions */ |
3003 | qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, val: set_val); |
3004 | |
3005 | /* Wait until value is set - try for 1 second every 50us */ |
3006 | for (delay_idx = 0; delay_idx < 20000; delay_idx++) { |
3007 | val = qed_rd(p_hwfn, p_ptt, |
3008 | PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); |
3009 | if (val == set_val) |
3010 | break; |
3011 | |
3012 | usleep_range(min: 50, max: 60); |
3013 | } |
3014 | |
3015 | if (val != set_val) { |
3016 | DP_NOTICE(p_hwfn, |
3017 | "PFID_ENABLE_MASTER wasn't changed after a second\n" ); |
3018 | return -EAGAIN; |
3019 | } |
3020 | |
3021 | return 0; |
3022 | } |
3023 | |
3024 | static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, |
3025 | struct qed_ptt *p_main_ptt) |
3026 | { |
3027 | /* Read shadow of current MFW mailbox */ |
3028 | qed_mcp_read_mb(p_hwfn, p_ptt: p_main_ptt); |
3029 | memcpy(p_hwfn->mcp_info->mfw_mb_shadow, |
3030 | p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); |
3031 | } |
3032 | |
3033 | static void |
3034 | qed_fill_load_req_params(struct qed_load_req_params *p_load_req, |
3035 | struct qed_drv_load_params *p_drv_load) |
3036 | { |
3037 | memset(p_load_req, 0, sizeof(*p_load_req)); |
3038 | |
3039 | p_load_req->drv_role = p_drv_load->is_crash_kernel ? |
3040 | QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; |
3041 | p_load_req->timeout_val = p_drv_load->mfw_timeout_val; |
3042 | p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; |
3043 | p_load_req->override_force_load = p_drv_load->override_force_load; |
3044 | } |
3045 | |
3046 | static int qed_vf_start(struct qed_hwfn *p_hwfn, |
3047 | struct qed_hw_init_params *p_params) |
3048 | { |
3049 | if (p_params->p_tunn) { |
3050 | qed_vf_set_vf_start_tunn_update_param(p_tun: p_params->p_tunn); |
3051 | qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn: p_params->p_tunn); |
3052 | } |
3053 | |
3054 | p_hwfn->b_int_enabled = true; |
3055 | |
3056 | return 0; |
3057 | } |
3058 | |
3059 | static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
3060 | { |
3061 | qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, |
3062 | BIT(p_hwfn->abs_pf_id)); |
3063 | } |
3064 | |
3065 | int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) |
3066 | { |
3067 | struct qed_load_req_params load_req_params; |
3068 | u32 load_code, resp, param, drv_mb_param; |
3069 | bool b_default_mtu = true; |
3070 | struct qed_hwfn *p_hwfn; |
3071 | const u32 *fw_overlays; |
3072 | u32 fw_overlays_len; |
3073 | u16 ether_type; |
3074 | int rc = 0, i; |
3075 | |
3076 | if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { |
3077 | DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n" ); |
3078 | return -EINVAL; |
3079 | } |
3080 | |
3081 | if (IS_PF(cdev)) { |
3082 | rc = qed_init_fw_data(cdev, fw_data: p_params->bin_fw_data); |
3083 | if (rc) |
3084 | return rc; |
3085 | } |
3086 | |
3087 | for_each_hwfn(cdev, i) { |
3088 | p_hwfn = &cdev->hwfns[i]; |
3089 | |
3090 | /* If management didn't provide a default, set one of our own */ |
3091 | if (!p_hwfn->hw_info.mtu) { |
3092 | p_hwfn->hw_info.mtu = 1500; |
3093 | b_default_mtu = false; |
3094 | } |
3095 | |
3096 | if (IS_VF(cdev)) { |
3097 | qed_vf_start(p_hwfn, p_params); |
3098 | continue; |
3099 | } |
3100 | |
3101 | /* Some flows may keep variable set */ |
3102 | p_hwfn->mcp_info->mcp_handling_status = 0; |
3103 | |
3104 | rc = qed_calc_hw_mode(p_hwfn); |
3105 | if (rc) |
3106 | return rc; |
3107 | |
3108 | if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, |
3109 | &cdev->mf_bits) || |
3110 | test_bit(QED_MF_8021AD_TAGGING, |
3111 | &cdev->mf_bits))) { |
3112 | if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) |
3113 | ether_type = ETH_P_8021Q; |
3114 | else |
3115 | ether_type = ETH_P_8021AD; |
3116 | STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, |
3117 | ether_type); |
3118 | STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, |
3119 | ether_type); |
3120 | STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, |
3121 | ether_type); |
3122 | STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, |
3123 | ether_type); |
3124 | } |
3125 | |
3126 | qed_fill_load_req_params(p_load_req: &load_req_params, |
3127 | p_drv_load: p_params->p_drv_load_params); |
3128 | rc = qed_mcp_load_req(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3129 | p_params: &load_req_params); |
3130 | if (rc) { |
3131 | DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n" ); |
3132 | return rc; |
3133 | } |
3134 | |
3135 | load_code = load_req_params.load_code; |
3136 | DP_VERBOSE(p_hwfn, QED_MSG_SP, |
3137 | "Load request was sent. Load code: 0x%x\n" , |
3138 | load_code); |
3139 | |
3140 | /* Only relevant for recovery: |
3141 | * Clear the indication after LOAD_REQ is responded by the MFW. |
3142 | */ |
3143 | cdev->recov_in_prog = false; |
3144 | |
3145 | qed_mcp_set_capabilities(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
3146 | |
3147 | qed_reset_mb_shadow(p_hwfn, p_main_ptt: p_hwfn->p_main_ptt); |
3148 | |
3149 | /* Clean up chip from previous driver if such remains exist. |
3150 | * This is not needed when the PF is the first one on the |
3151 | * engine, since afterwards we are going to init the FW. |
3152 | */ |
3153 | if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { |
3154 | rc = qed_final_cleanup(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3155 | id: p_hwfn->rel_pf_id, is_vf: false); |
3156 | if (rc) { |
3157 | qed_hw_err_notify(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3158 | err_type: QED_HW_ERR_RAMROD_FAIL, |
3159 | fmt: "Final cleanup failed\n" ); |
3160 | goto load_err; |
3161 | } |
3162 | } |
3163 | |
3164 | /* Log and clear previous pglue_b errors if such exist */ |
3165 | qed_pglueb_rbc_attn_handler(p_hwfn, p_ptt: p_hwfn->p_main_ptt, hw_init: true); |
3166 | |
3167 | /* Enable the PF's internal FID_enable in the PXP */ |
3168 | rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3169 | b_enable: true); |
3170 | if (rc) |
3171 | goto load_err; |
3172 | |
3173 | /* Clear the pglue_b was_error indication. |
3174 | * In E4 it must be done after the BME and the internal |
3175 | * FID_enable for the PF are set, since VDMs may cause the |
3176 | * indication to be set again. |
3177 | */ |
3178 | qed_pglueb_clear_err(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
3179 | |
3180 | fw_overlays = cdev->fw_data->fw_overlays; |
3181 | fw_overlays_len = cdev->fw_data->fw_overlays_len; |
3182 | p_hwfn->fw_overlay_mem = |
3183 | qed_fw_overlay_mem_alloc(p_hwfn, fw_overlay_in_buf: fw_overlays, |
3184 | buf_size_in_bytes: fw_overlays_len); |
3185 | if (!p_hwfn->fw_overlay_mem) { |
3186 | DP_NOTICE(p_hwfn, |
3187 | "Failed to allocate fw overlay memory\n" ); |
3188 | rc = -ENOMEM; |
3189 | goto load_err; |
3190 | } |
3191 | |
3192 | switch (load_code) { |
3193 | case FW_MSG_CODE_DRV_LOAD_ENGINE: |
3194 | rc = qed_hw_init_common(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3195 | hw_mode: p_hwfn->hw_info.hw_mode); |
3196 | if (rc) |
3197 | break; |
3198 | fallthrough; |
3199 | case FW_MSG_CODE_DRV_LOAD_PORT: |
3200 | rc = qed_hw_init_port(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3201 | hw_mode: p_hwfn->hw_info.hw_mode); |
3202 | if (rc) |
3203 | break; |
3204 | |
3205 | fallthrough; |
3206 | case FW_MSG_CODE_DRV_LOAD_FUNCTION: |
3207 | rc = qed_hw_init_pf(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3208 | p_tunn: p_params->p_tunn, |
3209 | hw_mode: p_hwfn->hw_info.hw_mode, |
3210 | b_hw_start: p_params->b_hw_start, |
3211 | int_mode: p_params->int_mode, |
3212 | allow_npar_tx_switch: p_params->allow_npar_tx_switch); |
3213 | break; |
3214 | default: |
3215 | DP_NOTICE(p_hwfn, |
3216 | "Unexpected load code [0x%08x]" , load_code); |
3217 | rc = -EINVAL; |
3218 | break; |
3219 | } |
3220 | |
3221 | if (rc) { |
3222 | DP_NOTICE(p_hwfn, |
3223 | "init phase failed for loadcode 0x%x (rc %d)\n" , |
3224 | load_code, rc); |
3225 | goto load_err; |
3226 | } |
3227 | |
3228 | rc = qed_mcp_load_done(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
3229 | if (rc) |
3230 | return rc; |
3231 | |
3232 | /* send DCBX attention request command */ |
3233 | DP_VERBOSE(p_hwfn, |
3234 | QED_MSG_DCB, |
3235 | "sending phony dcbx set command to trigger DCBx attention handling\n" ); |
3236 | rc = qed_mcp_cmd(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3237 | cmd: DRV_MSG_CODE_SET_DCBX, |
3238 | param: 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, |
3239 | o_mcp_resp: &resp, o_mcp_param: ¶m); |
3240 | if (rc) { |
3241 | DP_NOTICE(p_hwfn, |
3242 | "Failed to send DCBX attention request\n" ); |
3243 | return rc; |
3244 | } |
3245 | |
3246 | p_hwfn->hw_init_done = true; |
3247 | } |
3248 | |
3249 | if (IS_PF(cdev)) { |
3250 | p_hwfn = QED_LEADING_HWFN(cdev); |
3251 | |
3252 | /* Get pre-negotiated values for stag, bandwidth etc. */ |
3253 | DP_VERBOSE(p_hwfn, |
3254 | QED_MSG_SPQ, |
3255 | "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n" ); |
3256 | drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; |
3257 | rc = qed_mcp_cmd(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3258 | cmd: DRV_MSG_CODE_GET_OEM_UPDATES, |
3259 | param: drv_mb_param, o_mcp_resp: &resp, o_mcp_param: ¶m); |
3260 | if (rc) |
3261 | DP_NOTICE(p_hwfn, |
3262 | "Failed to send GET_OEM_UPDATES attention request\n" ); |
3263 | |
3264 | drv_mb_param = STORM_FW_VERSION; |
3265 | rc = qed_mcp_cmd(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3266 | cmd: DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, |
3267 | param: drv_mb_param, o_mcp_resp: &load_code, o_mcp_param: ¶m); |
3268 | if (rc) |
3269 | DP_INFO(p_hwfn, "Failed to update firmware version\n" ); |
3270 | |
3271 | if (!b_default_mtu) { |
3272 | rc = qed_mcp_ov_update_mtu(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3273 | mtu: p_hwfn->hw_info.mtu); |
3274 | if (rc) |
3275 | DP_INFO(p_hwfn, |
3276 | "Failed to update default mtu\n" ); |
3277 | } |
3278 | |
3279 | rc = qed_mcp_ov_update_driver_state(p_hwfn, |
3280 | p_ptt: p_hwfn->p_main_ptt, |
3281 | drv_state: QED_OV_DRIVER_STATE_DISABLED); |
3282 | if (rc) |
3283 | DP_INFO(p_hwfn, "Failed to update driver state\n" ); |
3284 | |
3285 | rc = qed_mcp_ov_update_eswitch(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3286 | eswitch: QED_OV_ESWITCH_NONE); |
3287 | if (rc) |
3288 | DP_INFO(p_hwfn, "Failed to update eswitch mode\n" ); |
3289 | } |
3290 | |
3291 | return 0; |
3292 | |
3293 | load_err: |
3294 | /* The MFW load lock should be released also when initialization fails. |
3295 | */ |
3296 | qed_mcp_load_done(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
3297 | return rc; |
3298 | } |
3299 | |
3300 | #define QED_HW_STOP_RETRY_LIMIT (10) |
3301 | static void qed_hw_timers_stop(struct qed_dev *cdev, |
3302 | struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
3303 | { |
3304 | int i; |
3305 | |
3306 | /* close timers */ |
3307 | qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, val: 0x0); |
3308 | qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, val: 0x0); |
3309 | |
3310 | if (cdev->recov_in_prog) |
3311 | return; |
3312 | |
3313 | for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { |
3314 | if ((!qed_rd(p_hwfn, p_ptt, |
3315 | TM_REG_PF_SCAN_ACTIVE_CONN)) && |
3316 | (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) |
3317 | break; |
3318 | |
3319 | /* Dependent on number of connection/tasks, possibly |
3320 | * 1ms sleep is required between polls |
3321 | */ |
3322 | usleep_range(min: 1000, max: 2000); |
3323 | } |
3324 | |
3325 | if (i < QED_HW_STOP_RETRY_LIMIT) |
3326 | return; |
3327 | |
3328 | DP_NOTICE(p_hwfn, |
3329 | "Timers linear scans are not over [Connection %02x Tasks %02x]\n" , |
3330 | (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), |
3331 | (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); |
3332 | } |
3333 | |
3334 | void qed_hw_timers_stop_all(struct qed_dev *cdev) |
3335 | { |
3336 | int j; |
3337 | |
3338 | for_each_hwfn(cdev, j) { |
3339 | struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; |
3340 | struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; |
3341 | |
3342 | qed_hw_timers_stop(cdev, p_hwfn, p_ptt); |
3343 | } |
3344 | } |
3345 | |
3346 | int qed_hw_stop(struct qed_dev *cdev) |
3347 | { |
3348 | struct qed_hwfn *p_hwfn; |
3349 | struct qed_ptt *p_ptt; |
3350 | int rc, rc2 = 0; |
3351 | int j; |
3352 | |
3353 | for_each_hwfn(cdev, j) { |
3354 | p_hwfn = &cdev->hwfns[j]; |
3355 | p_ptt = p_hwfn->p_main_ptt; |
3356 | |
3357 | DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n" ); |
3358 | |
3359 | if (IS_VF(cdev)) { |
3360 | qed_vf_pf_int_cleanup(p_hwfn); |
3361 | rc = qed_vf_pf_reset(p_hwfn); |
3362 | if (rc) { |
3363 | DP_NOTICE(p_hwfn, |
3364 | "qed_vf_pf_reset failed. rc = %d.\n" , |
3365 | rc); |
3366 | rc2 = -EINVAL; |
3367 | } |
3368 | continue; |
3369 | } |
3370 | |
3371 | /* mark the hw as uninitialized... */ |
3372 | p_hwfn->hw_init_done = false; |
3373 | |
3374 | /* Send unload command to MCP */ |
3375 | if (!cdev->recov_in_prog) { |
3376 | rc = qed_mcp_unload_req(p_hwfn, p_ptt); |
3377 | if (rc) { |
3378 | DP_NOTICE(p_hwfn, |
3379 | "Failed sending a UNLOAD_REQ command. rc = %d.\n" , |
3380 | rc); |
3381 | rc2 = -EINVAL; |
3382 | } |
3383 | } |
3384 | |
3385 | qed_slowpath_irq_sync(p_hwfn); |
3386 | |
3387 | /* After this point no MFW attentions are expected, e.g. prevent |
3388 | * race between pf stop and dcbx pf update. |
3389 | */ |
3390 | rc = qed_sp_pf_stop(p_hwfn); |
3391 | if (rc) { |
3392 | DP_NOTICE(p_hwfn, |
3393 | "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n" , |
3394 | rc); |
3395 | rc2 = -EINVAL; |
3396 | } |
3397 | |
3398 | qed_wr(p_hwfn, p_ptt, |
3399 | NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, val: 0x1); |
3400 | |
3401 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, val: 0x0); |
3402 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, val: 0x0); |
3403 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, val: 0x0); |
3404 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, val: 0x0); |
3405 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, val: 0x0); |
3406 | |
3407 | qed_hw_timers_stop(cdev, p_hwfn, p_ptt); |
3408 | |
3409 | /* Disable Attention Generation */ |
3410 | qed_int_igu_disable_int(p_hwfn, p_ptt); |
3411 | |
3412 | qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, val: 0); |
3413 | qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, val: 0); |
3414 | |
3415 | qed_int_igu_init_pure_rt(p_hwfn, p_ptt, b_set: false, b_slowpath: true); |
3416 | |
3417 | /* Need to wait 1ms to guarantee SBs are cleared */ |
3418 | usleep_range(min: 1000, max: 2000); |
3419 | |
3420 | /* Disable PF in HW blocks */ |
3421 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, val: 0); |
3422 | qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, val: 0); |
3423 | |
3424 | if (IS_LEAD_HWFN(p_hwfn) && |
3425 | test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && |
3426 | !QED_IS_FCOE_PERSONALITY(p_hwfn)) |
3427 | qed_llh_remove_mac_filter(cdev, ppfid: 0, |
3428 | mac_addr: p_hwfn->hw_info.hw_mac_addr); |
3429 | |
3430 | if (!cdev->recov_in_prog) { |
3431 | rc = qed_mcp_unload_done(p_hwfn, p_ptt); |
3432 | if (rc) { |
3433 | DP_NOTICE(p_hwfn, |
3434 | "Failed sending a UNLOAD_DONE command. rc = %d.\n" , |
3435 | rc); |
3436 | rc2 = -EINVAL; |
3437 | } |
3438 | } |
3439 | } |
3440 | |
3441 | if (IS_PF(cdev) && !cdev->recov_in_prog) { |
3442 | p_hwfn = QED_LEADING_HWFN(cdev); |
3443 | p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; |
3444 | |
3445 | /* Clear the PF's internal FID_enable in the PXP. |
3446 | * In CMT this should only be done for first hw-function, and |
3447 | * only after all transactions have stopped for all active |
3448 | * hw-functions. |
3449 | */ |
3450 | rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, b_enable: false); |
3451 | if (rc) { |
3452 | DP_NOTICE(p_hwfn, |
3453 | "qed_pglueb_set_pfid_enable() failed. rc = %d.\n" , |
3454 | rc); |
3455 | rc2 = -EINVAL; |
3456 | } |
3457 | } |
3458 | |
3459 | return rc2; |
3460 | } |
3461 | |
3462 | int qed_hw_stop_fastpath(struct qed_dev *cdev) |
3463 | { |
3464 | int j; |
3465 | |
3466 | for_each_hwfn(cdev, j) { |
3467 | struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; |
3468 | struct qed_ptt *p_ptt; |
3469 | |
3470 | if (IS_VF(cdev)) { |
3471 | qed_vf_pf_int_cleanup(p_hwfn); |
3472 | continue; |
3473 | } |
3474 | p_ptt = qed_ptt_acquire(p_hwfn); |
3475 | if (!p_ptt) |
3476 | return -EAGAIN; |
3477 | |
3478 | DP_VERBOSE(p_hwfn, |
3479 | NETIF_MSG_IFDOWN, "Shutting down the fastpath\n" ); |
3480 | |
3481 | qed_wr(p_hwfn, p_ptt, |
3482 | NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, val: 0x1); |
3483 | |
3484 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, val: 0x0); |
3485 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, val: 0x0); |
3486 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, val: 0x0); |
3487 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, val: 0x0); |
3488 | qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, val: 0x0); |
3489 | |
3490 | qed_int_igu_init_pure_rt(p_hwfn, p_ptt, b_set: false, b_slowpath: false); |
3491 | |
3492 | /* Need to wait 1ms to guarantee SBs are cleared */ |
3493 | usleep_range(min: 1000, max: 2000); |
3494 | qed_ptt_release(p_hwfn, p_ptt); |
3495 | } |
3496 | |
3497 | return 0; |
3498 | } |
3499 | |
3500 | int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) |
3501 | { |
3502 | struct qed_ptt *p_ptt; |
3503 | |
3504 | if (IS_VF(p_hwfn->cdev)) |
3505 | return 0; |
3506 | |
3507 | p_ptt = qed_ptt_acquire(p_hwfn); |
3508 | if (!p_ptt) |
3509 | return -EAGAIN; |
3510 | |
3511 | if (p_hwfn->p_rdma_info && |
3512 | p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) |
3513 | qed_wr(p_hwfn, p_ptt, hw_addr: p_hwfn->rdma_prs_search_reg, val: 0x1); |
3514 | |
3515 | /* Re-open incoming traffic */ |
3516 | qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, val: 0x0); |
3517 | qed_ptt_release(p_hwfn, p_ptt); |
3518 | |
3519 | return 0; |
3520 | } |
3521 | |
3522 | /* Free hwfn memory and resources acquired in hw_hwfn_prepare */ |
3523 | static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) |
3524 | { |
3525 | qed_ptt_pool_free(p_hwfn); |
3526 | kfree(objp: p_hwfn->hw_info.p_igu_info); |
3527 | p_hwfn->hw_info.p_igu_info = NULL; |
3528 | } |
3529 | |
3530 | /* Setup bar access */ |
3531 | static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) |
3532 | { |
3533 | /* clear indirect access */ |
3534 | if (QED_IS_AH(p_hwfn->cdev)) { |
3535 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3536 | PGLUE_B_REG_PGL_ADDR_E8_F0_K2, val: 0); |
3537 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3538 | PGLUE_B_REG_PGL_ADDR_EC_F0_K2, val: 0); |
3539 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3540 | PGLUE_B_REG_PGL_ADDR_F0_F0_K2, val: 0); |
3541 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3542 | PGLUE_B_REG_PGL_ADDR_F4_F0_K2, val: 0); |
3543 | } else { |
3544 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3545 | PGLUE_B_REG_PGL_ADDR_88_F0_BB, val: 0); |
3546 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3547 | PGLUE_B_REG_PGL_ADDR_8C_F0_BB, val: 0); |
3548 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3549 | PGLUE_B_REG_PGL_ADDR_90_F0_BB, val: 0); |
3550 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3551 | PGLUE_B_REG_PGL_ADDR_94_F0_BB, val: 0); |
3552 | } |
3553 | |
3554 | /* Clean previous pglue_b errors if such exist */ |
3555 | qed_pglueb_clear_err(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
3556 | |
3557 | /* enable internal target-read */ |
3558 | qed_wr(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
3559 | PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, val: 1); |
3560 | } |
3561 | |
3562 | static void get_function_id(struct qed_hwfn *p_hwfn) |
3563 | { |
3564 | /* ME Register */ |
3565 | p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, |
3566 | PXP_PF_ME_OPAQUE_ADDR); |
3567 | |
3568 | p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); |
3569 | |
3570 | p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; |
3571 | p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, |
3572 | PXP_CONCRETE_FID_PFID); |
3573 | p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, |
3574 | PXP_CONCRETE_FID_PORT); |
3575 | |
3576 | DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, |
3577 | "Read ME register: Concrete 0x%08x Opaque 0x%04x\n" , |
3578 | p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); |
3579 | } |
3580 | |
3581 | static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) |
3582 | { |
3583 | u32 *feat_num = p_hwfn->hw_info.feat_num; |
3584 | struct qed_sb_cnt_info sb_cnt; |
3585 | u32 non_l2_sbs = 0; |
3586 | |
3587 | memset(&sb_cnt, 0, sizeof(sb_cnt)); |
3588 | qed_int_get_num_sbs(p_hwfn, p_sb_cnt_info: &sb_cnt); |
3589 | |
3590 | if (IS_ENABLED(CONFIG_QED_RDMA) && |
3591 | QED_IS_RDMA_PERSONALITY(p_hwfn)) { |
3592 | /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide |
3593 | * the status blocks equally between L2 / RoCE but with |
3594 | * consideration as to how many l2 queues / cnqs we have. |
3595 | */ |
3596 | feat_num[QED_RDMA_CNQ] = |
3597 | min_t(u32, sb_cnt.cnt / 2, |
3598 | RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); |
3599 | |
3600 | non_l2_sbs = feat_num[QED_RDMA_CNQ]; |
3601 | } |
3602 | if (QED_IS_L2_PERSONALITY(p_hwfn)) { |
3603 | /* Start by allocating VF queues, then PF's */ |
3604 | feat_num[QED_VF_L2_QUE] = min_t(u32, |
3605 | RESC_NUM(p_hwfn, QED_L2_QUEUE), |
3606 | sb_cnt.iov_cnt); |
3607 | feat_num[QED_PF_L2_QUE] = min_t(u32, |
3608 | sb_cnt.cnt - non_l2_sbs, |
3609 | RESC_NUM(p_hwfn, |
3610 | QED_L2_QUEUE) - |
3611 | FEAT_NUM(p_hwfn, |
3612 | QED_VF_L2_QUE)); |
3613 | } |
3614 | |
3615 | if (QED_IS_FCOE_PERSONALITY(p_hwfn)) |
3616 | feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, |
3617 | RESC_NUM(p_hwfn, |
3618 | QED_CMDQS_CQS)); |
3619 | |
3620 | if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) |
3621 | feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, |
3622 | RESC_NUM(p_hwfn, |
3623 | QED_CMDQS_CQS)); |
3624 | |
3625 | if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) |
3626 | feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt, |
3627 | RESC_NUM(p_hwfn, |
3628 | QED_CMDQS_CQS)); |
3629 | |
3630 | DP_VERBOSE(p_hwfn, |
3631 | NETIF_MSG_PROBE, |
3632 | "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n" , |
3633 | (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), |
3634 | (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), |
3635 | (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), |
3636 | (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), |
3637 | (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), |
3638 | (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ), |
3639 | (int)sb_cnt.cnt); |
3640 | } |
3641 | |
3642 | const char *qed_hw_get_resc_name(enum qed_resources res_id) |
3643 | { |
3644 | switch (res_id) { |
3645 | case QED_L2_QUEUE: |
3646 | return "L2_QUEUE" ; |
3647 | case QED_VPORT: |
3648 | return "VPORT" ; |
3649 | case QED_RSS_ENG: |
3650 | return "RSS_ENG" ; |
3651 | case QED_PQ: |
3652 | return "PQ" ; |
3653 | case QED_RL: |
3654 | return "RL" ; |
3655 | case QED_MAC: |
3656 | return "MAC" ; |
3657 | case QED_VLAN: |
3658 | return "VLAN" ; |
3659 | case QED_RDMA_CNQ_RAM: |
3660 | return "RDMA_CNQ_RAM" ; |
3661 | case QED_ILT: |
3662 | return "ILT" ; |
3663 | case QED_LL2_RAM_QUEUE: |
3664 | return "LL2_RAM_QUEUE" ; |
3665 | case QED_LL2_CTX_QUEUE: |
3666 | return "LL2_CTX_QUEUE" ; |
3667 | case QED_CMDQS_CQS: |
3668 | return "CMDQS_CQS" ; |
3669 | case QED_RDMA_STATS_QUEUE: |
3670 | return "RDMA_STATS_QUEUE" ; |
3671 | case QED_BDQ: |
3672 | return "BDQ" ; |
3673 | case QED_SB: |
3674 | return "SB" ; |
3675 | default: |
3676 | return "UNKNOWN_RESOURCE" ; |
3677 | } |
3678 | } |
3679 | |
3680 | static int |
3681 | __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, |
3682 | struct qed_ptt *p_ptt, |
3683 | enum qed_resources res_id, |
3684 | u32 resc_max_val, u32 *p_mcp_resp) |
3685 | { |
3686 | int rc; |
3687 | |
3688 | rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, |
3689 | resc_max_val, p_mcp_resp); |
3690 | if (rc) { |
3691 | DP_NOTICE(p_hwfn, |
3692 | "MFW response failure for a max value setting of resource %d [%s]\n" , |
3693 | res_id, qed_hw_get_resc_name(res_id)); |
3694 | return rc; |
3695 | } |
3696 | |
3697 | if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) |
3698 | DP_INFO(p_hwfn, |
3699 | "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n" , |
3700 | res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); |
3701 | |
3702 | return 0; |
3703 | } |
3704 | |
3705 | static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = { |
3706 | {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2}, |
3707 | {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2}, |
3708 | {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2}, |
3709 | {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,}, |
3710 | {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2}, |
3711 | {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2}, |
3712 | {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2}, |
3713 | {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2}, |
3714 | {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2}, |
3715 | {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2}, |
3716 | {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS}, |
3717 | {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES}, |
3718 | {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2}, |
3719 | }; |
3720 | |
3721 | u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) |
3722 | { |
3723 | enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; |
3724 | |
3725 | if (type >= QED_NUM_HSI_DEFS) { |
3726 | DP_ERR(cdev, "Unexpected HSI definition type [%d]\n" , type); |
3727 | return 0; |
3728 | } |
3729 | |
3730 | return qed_hsi_def_val[type][chip_id]; |
3731 | } |
3732 | |
3733 | static int |
3734 | qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
3735 | { |
3736 | u32 resc_max_val, mcp_resp; |
3737 | u8 res_id; |
3738 | int rc; |
3739 | |
3740 | for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { |
3741 | switch (res_id) { |
3742 | case QED_LL2_RAM_QUEUE: |
3743 | resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES; |
3744 | break; |
3745 | case QED_LL2_CTX_QUEUE: |
3746 | resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES; |
3747 | break; |
3748 | case QED_RDMA_CNQ_RAM: |
3749 | /* No need for a case for QED_CMDQS_CQS since |
3750 | * CNQ/CMDQS are the same resource. |
3751 | */ |
3752 | resc_max_val = NUM_OF_GLOBAL_QUEUES; |
3753 | break; |
3754 | case QED_RDMA_STATS_QUEUE: |
3755 | resc_max_val = |
3756 | NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev); |
3757 | break; |
3758 | case QED_BDQ: |
3759 | resc_max_val = BDQ_NUM_RESOURCES; |
3760 | break; |
3761 | default: |
3762 | continue; |
3763 | } |
3764 | |
3765 | rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, |
3766 | resc_max_val, p_mcp_resp: &mcp_resp); |
3767 | if (rc) |
3768 | return rc; |
3769 | |
3770 | /* There's no point to continue to the next resource if the |
3771 | * command is not supported by the MFW. |
3772 | * We do continue if the command is supported but the resource |
3773 | * is unknown to the MFW. Such a resource will be later |
3774 | * configured with the default allocation values. |
3775 | */ |
3776 | if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) |
3777 | return -EINVAL; |
3778 | } |
3779 | |
3780 | return 0; |
3781 | } |
3782 | |
3783 | static |
3784 | int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, |
3785 | enum qed_resources res_id, |
3786 | u32 *p_resc_num, u32 *p_resc_start) |
3787 | { |
3788 | u8 num_funcs = p_hwfn->num_funcs_on_engine; |
3789 | struct qed_dev *cdev = p_hwfn->cdev; |
3790 | |
3791 | switch (res_id) { |
3792 | case QED_L2_QUEUE: |
3793 | *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs; |
3794 | break; |
3795 | case QED_VPORT: |
3796 | *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs; |
3797 | break; |
3798 | case QED_RSS_ENG: |
3799 | *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs; |
3800 | break; |
3801 | case QED_PQ: |
3802 | *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs; |
3803 | *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ |
3804 | break; |
3805 | case QED_RL: |
3806 | *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs; |
3807 | break; |
3808 | case QED_MAC: |
3809 | case QED_VLAN: |
3810 | /* Each VFC resource can accommodate both a MAC and a VLAN */ |
3811 | *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; |
3812 | break; |
3813 | case QED_ILT: |
3814 | *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs; |
3815 | break; |
3816 | case QED_LL2_RAM_QUEUE: |
3817 | *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs; |
3818 | break; |
3819 | case QED_LL2_CTX_QUEUE: |
3820 | *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs; |
3821 | break; |
3822 | case QED_RDMA_CNQ_RAM: |
3823 | case QED_CMDQS_CQS: |
3824 | /* CNQ/CMDQS are the same resource */ |
3825 | *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; |
3826 | break; |
3827 | case QED_RDMA_STATS_QUEUE: |
3828 | *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs; |
3829 | break; |
3830 | case QED_BDQ: |
3831 | if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && |
3832 | p_hwfn->hw_info.personality != QED_PCI_FCOE && |
3833 | p_hwfn->hw_info.personality != QED_PCI_NVMETCP) |
3834 | *p_resc_num = 0; |
3835 | else |
3836 | *p_resc_num = 1; |
3837 | break; |
3838 | case QED_SB: |
3839 | /* Since we want its value to reflect whether MFW supports |
3840 | * the new scheme, have a default of 0. |
3841 | */ |
3842 | *p_resc_num = 0; |
3843 | break; |
3844 | default: |
3845 | return -EINVAL; |
3846 | } |
3847 | |
3848 | switch (res_id) { |
3849 | case QED_BDQ: |
3850 | if (!*p_resc_num) |
3851 | *p_resc_start = 0; |
3852 | else if (p_hwfn->cdev->num_ports_in_engine == 4) |
3853 | *p_resc_start = p_hwfn->port_id; |
3854 | else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || |
3855 | p_hwfn->hw_info.personality == QED_PCI_NVMETCP) |
3856 | *p_resc_start = p_hwfn->port_id; |
3857 | else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) |
3858 | *p_resc_start = p_hwfn->port_id + 2; |
3859 | break; |
3860 | default: |
3861 | *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; |
3862 | break; |
3863 | } |
3864 | |
3865 | return 0; |
3866 | } |
3867 | |
3868 | static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, |
3869 | enum qed_resources res_id) |
3870 | { |
3871 | u32 dflt_resc_num = 0, dflt_resc_start = 0; |
3872 | u32 mcp_resp, *p_resc_num, *p_resc_start; |
3873 | int rc; |
3874 | |
3875 | p_resc_num = &RESC_NUM(p_hwfn, res_id); |
3876 | p_resc_start = &RESC_START(p_hwfn, res_id); |
3877 | |
3878 | rc = qed_hw_get_dflt_resc(p_hwfn, res_id, p_resc_num: &dflt_resc_num, |
3879 | p_resc_start: &dflt_resc_start); |
3880 | if (rc) { |
3881 | DP_ERR(p_hwfn, |
3882 | "Failed to get default amount for resource %d [%s]\n" , |
3883 | res_id, qed_hw_get_resc_name(res_id)); |
3884 | return rc; |
3885 | } |
3886 | |
3887 | rc = qed_mcp_get_resc_info(p_hwfn, p_ptt: p_hwfn->p_main_ptt, res_id, |
3888 | p_mcp_resp: &mcp_resp, p_resc_num, p_resc_start); |
3889 | if (rc) { |
3890 | DP_NOTICE(p_hwfn, |
3891 | "MFW response failure for an allocation request for resource %d [%s]\n" , |
3892 | res_id, qed_hw_get_resc_name(res_id)); |
3893 | return rc; |
3894 | } |
3895 | |
3896 | /* Default driver values are applied in the following cases: |
3897 | * - The resource allocation MB command is not supported by the MFW |
3898 | * - There is an internal error in the MFW while processing the request |
3899 | * - The resource ID is unknown to the MFW |
3900 | */ |
3901 | if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { |
3902 | DP_INFO(p_hwfn, |
3903 | "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n" , |
3904 | res_id, |
3905 | qed_hw_get_resc_name(res_id), |
3906 | mcp_resp, dflt_resc_num, dflt_resc_start); |
3907 | *p_resc_num = dflt_resc_num; |
3908 | *p_resc_start = dflt_resc_start; |
3909 | goto out; |
3910 | } |
3911 | |
3912 | out: |
3913 | /* PQs have to divide by 8 [that's the HW granularity]. |
3914 | * Reduce number so it would fit. |
3915 | */ |
3916 | if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { |
3917 | DP_INFO(p_hwfn, |
3918 | "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n" , |
3919 | *p_resc_num, |
3920 | (*p_resc_num) & ~0x7, |
3921 | *p_resc_start, (*p_resc_start) & ~0x7); |
3922 | *p_resc_num &= ~0x7; |
3923 | *p_resc_start &= ~0x7; |
3924 | } |
3925 | |
3926 | return 0; |
3927 | } |
3928 | |
3929 | static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) |
3930 | { |
3931 | int rc; |
3932 | u8 res_id; |
3933 | |
3934 | for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { |
3935 | rc = __qed_hw_set_resc_info(p_hwfn, res_id); |
3936 | if (rc) |
3937 | return rc; |
3938 | } |
3939 | |
3940 | return 0; |
3941 | } |
3942 | |
3943 | static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, |
3944 | struct qed_ptt *p_ptt) |
3945 | { |
3946 | struct qed_dev *cdev = p_hwfn->cdev; |
3947 | u8 native_ppfid_idx; |
3948 | int rc; |
3949 | |
3950 | /* Calculation of BB/AH is different for native_ppfid_idx */ |
3951 | if (QED_IS_BB(cdev)) |
3952 | native_ppfid_idx = p_hwfn->rel_pf_id; |
3953 | else |
3954 | native_ppfid_idx = p_hwfn->rel_pf_id / |
3955 | cdev->num_ports_in_engine; |
3956 | |
3957 | rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt); |
3958 | if (rc != 0 && rc != -EOPNOTSUPP) |
3959 | return rc; |
3960 | else if (rc == -EOPNOTSUPP) |
3961 | cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; |
3962 | |
3963 | if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) { |
3964 | DP_INFO(p_hwfn, |
3965 | "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n" , |
3966 | native_ppfid_idx, cdev->ppfid_bitmap); |
3967 | cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; |
3968 | } |
3969 | |
3970 | return 0; |
3971 | } |
3972 | |
3973 | static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
3974 | { |
3975 | struct qed_resc_unlock_params resc_unlock_params; |
3976 | struct qed_resc_lock_params resc_lock_params; |
3977 | bool b_ah = QED_IS_AH(p_hwfn->cdev); |
3978 | u8 res_id; |
3979 | int rc; |
3980 | |
3981 | /* Setting the max values of the soft resources and the following |
3982 | * resources allocation queries should be atomic. Since several PFs can |
3983 | * run in parallel - a resource lock is needed. |
3984 | * If either the resource lock or resource set value commands are not |
3985 | * supported - skip the max values setting, release the lock if |
3986 | * needed, and proceed to the queries. Other failures, including a |
3987 | * failure to acquire the lock, will cause this function to fail. |
3988 | */ |
3989 | qed_mcp_resc_lock_default_init(p_lock: &resc_lock_params, p_unlock: &resc_unlock_params, |
3990 | resource: QED_RESC_LOCK_RESC_ALLOC, b_is_permanent: false); |
3991 | |
3992 | rc = qed_mcp_resc_lock(p_hwfn, p_ptt, p_params: &resc_lock_params); |
3993 | if (rc && rc != -EINVAL) { |
3994 | return rc; |
3995 | } else if (rc == -EINVAL) { |
3996 | DP_INFO(p_hwfn, |
3997 | "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n" ); |
3998 | } else if (!resc_lock_params.b_granted) { |
3999 | DP_NOTICE(p_hwfn, |
4000 | "Failed to acquire the resource lock for the resource allocation commands\n" ); |
4001 | return -EBUSY; |
4002 | } else { |
4003 | rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); |
4004 | if (rc && rc != -EINVAL) { |
4005 | DP_NOTICE(p_hwfn, |
4006 | "Failed to set the max values of the soft resources\n" ); |
4007 | goto unlock_and_exit; |
4008 | } else if (rc == -EINVAL) { |
4009 | DP_INFO(p_hwfn, |
4010 | "Skip the max values setting of the soft resources since it is not supported by the MFW\n" ); |
4011 | rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, |
4012 | p_params: &resc_unlock_params); |
4013 | if (rc) |
4014 | DP_INFO(p_hwfn, |
4015 | "Failed to release the resource lock for the resource allocation commands\n" ); |
4016 | } |
4017 | } |
4018 | |
4019 | rc = qed_hw_set_resc_info(p_hwfn); |
4020 | if (rc) |
4021 | goto unlock_and_exit; |
4022 | |
4023 | if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { |
4024 | rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, p_params: &resc_unlock_params); |
4025 | if (rc) |
4026 | DP_INFO(p_hwfn, |
4027 | "Failed to release the resource lock for the resource allocation commands\n" ); |
4028 | } |
4029 | |
4030 | /* PPFID bitmap */ |
4031 | if (IS_LEAD_HWFN(p_hwfn)) { |
4032 | rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt); |
4033 | if (rc) |
4034 | return rc; |
4035 | } |
4036 | |
4037 | /* Sanity for ILT */ |
4038 | if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || |
4039 | (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { |
4040 | DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n" , |
4041 | RESC_START(p_hwfn, QED_ILT), |
4042 | RESC_END(p_hwfn, QED_ILT) - 1); |
4043 | return -EINVAL; |
4044 | } |
4045 | |
4046 | /* This will also learn the number of SBs from MFW */ |
4047 | if (qed_int_igu_reset_cam(p_hwfn, p_ptt)) |
4048 | return -EINVAL; |
4049 | |
4050 | qed_hw_set_feat(p_hwfn); |
4051 | |
4052 | for (res_id = 0; res_id < QED_MAX_RESC; res_id++) |
4053 | DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n" , |
4054 | qed_hw_get_resc_name(res_id), |
4055 | RESC_NUM(p_hwfn, res_id), |
4056 | RESC_START(p_hwfn, res_id)); |
4057 | |
4058 | return 0; |
4059 | |
4060 | unlock_and_exit: |
4061 | if (resc_lock_params.b_granted && !resc_unlock_params.b_released) |
4062 | qed_mcp_resc_unlock(p_hwfn, p_ptt, p_params: &resc_unlock_params); |
4063 | return rc; |
4064 | } |
4065 | |
4066 | static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4067 | { |
4068 | u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld; |
4069 | u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; |
4070 | struct qed_mcp_link_speed_params *ext_speed; |
4071 | struct qed_mcp_link_capabilities *p_caps; |
4072 | struct qed_mcp_link_params *link; |
4073 | int i; |
4074 | |
4075 | /* Read global nvm_cfg address */ |
4076 | nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); |
4077 | |
4078 | /* Verify MCP has initialized it */ |
4079 | if (!nvm_cfg_addr) { |
4080 | DP_NOTICE(p_hwfn, "Shared memory not initialized\n" ); |
4081 | return -EINVAL; |
4082 | } |
4083 | |
4084 | /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ |
4085 | nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, hw_addr: nvm_cfg_addr + 4); |
4086 | |
4087 | addr = MCP_REG_SCRATCH + nvm_cfg1_offset + |
4088 | offsetof(struct nvm_cfg1, glob) + |
4089 | offsetof(struct nvm_cfg1_glob, core_cfg); |
4090 | |
4091 | core_cfg = qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
4092 | |
4093 | switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> |
4094 | NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { |
4095 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: |
4096 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: |
4097 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: |
4098 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: |
4099 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: |
4100 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: |
4101 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: |
4102 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: |
4103 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: |
4104 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: |
4105 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: |
4106 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1: |
4107 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1: |
4108 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2: |
4109 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2: |
4110 | case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4: |
4111 | break; |
4112 | default: |
4113 | DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n" , core_cfg); |
4114 | break; |
4115 | } |
4116 | |
4117 | /* Read default link configuration */ |
4118 | link = &p_hwfn->mcp_info->link_input; |
4119 | p_caps = &p_hwfn->mcp_info->link_capabilities; |
4120 | port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + |
4121 | offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); |
4122 | link_temp = qed_rd(p_hwfn, p_ptt, |
4123 | hw_addr: port_cfg_addr + |
4124 | offsetof(struct nvm_cfg1_port, speed_cap_mask)); |
4125 | link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; |
4126 | link->speed.advertised_speeds = link_temp; |
4127 | |
4128 | p_caps->speed_capabilities = link->speed.advertised_speeds; |
4129 | |
4130 | link_temp = qed_rd(p_hwfn, p_ptt, |
4131 | hw_addr: port_cfg_addr + |
4132 | offsetof(struct nvm_cfg1_port, link_settings)); |
4133 | switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> |
4134 | NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { |
4135 | case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: |
4136 | link->speed.autoneg = true; |
4137 | break; |
4138 | case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: |
4139 | link->speed.forced_speed = 1000; |
4140 | break; |
4141 | case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: |
4142 | link->speed.forced_speed = 10000; |
4143 | break; |
4144 | case NVM_CFG1_PORT_DRV_LINK_SPEED_20G: |
4145 | link->speed.forced_speed = 20000; |
4146 | break; |
4147 | case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: |
4148 | link->speed.forced_speed = 25000; |
4149 | break; |
4150 | case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: |
4151 | link->speed.forced_speed = 40000; |
4152 | break; |
4153 | case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: |
4154 | link->speed.forced_speed = 50000; |
4155 | break; |
4156 | case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: |
4157 | link->speed.forced_speed = 100000; |
4158 | break; |
4159 | default: |
4160 | DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n" , link_temp); |
4161 | } |
4162 | |
4163 | p_caps->default_speed_autoneg = link->speed.autoneg; |
4164 | |
4165 | fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); |
4166 | link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); |
4167 | link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); |
4168 | link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); |
4169 | link->loopback_mode = 0; |
4170 | |
4171 | if (p_hwfn->mcp_info->capabilities & |
4172 | FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { |
4173 | switch (GET_MFW_FIELD(link_temp, |
4174 | NVM_CFG1_PORT_FEC_FORCE_MODE)) { |
4175 | case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE: |
4176 | p_caps->fec_default |= QED_FEC_MODE_NONE; |
4177 | break; |
4178 | case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE: |
4179 | p_caps->fec_default |= QED_FEC_MODE_FIRECODE; |
4180 | break; |
4181 | case NVM_CFG1_PORT_FEC_FORCE_MODE_RS: |
4182 | p_caps->fec_default |= QED_FEC_MODE_RS; |
4183 | break; |
4184 | case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO: |
4185 | p_caps->fec_default |= QED_FEC_MODE_AUTO; |
4186 | break; |
4187 | default: |
4188 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
4189 | "unknown FEC mode in 0x%08x\n" , link_temp); |
4190 | } |
4191 | } else { |
4192 | p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED; |
4193 | } |
4194 | |
4195 | link->fec = p_caps->fec_default; |
4196 | |
4197 | if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { |
4198 | link_temp = qed_rd(p_hwfn, p_ptt, hw_addr: port_cfg_addr + |
4199 | offsetof(struct nvm_cfg1_port, ext_phy)); |
4200 | link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; |
4201 | link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; |
4202 | p_caps->default_eee = QED_MCP_EEE_ENABLED; |
4203 | link->eee.enable = true; |
4204 | switch (link_temp) { |
4205 | case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: |
4206 | p_caps->default_eee = QED_MCP_EEE_DISABLED; |
4207 | link->eee.enable = false; |
4208 | break; |
4209 | case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: |
4210 | p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; |
4211 | break; |
4212 | case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: |
4213 | p_caps->eee_lpi_timer = |
4214 | EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; |
4215 | break; |
4216 | case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: |
4217 | p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; |
4218 | break; |
4219 | } |
4220 | |
4221 | link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; |
4222 | link->eee.tx_lpi_enable = link->eee.enable; |
4223 | link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV; |
4224 | } else { |
4225 | p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; |
4226 | } |
4227 | |
4228 | if (p_hwfn->mcp_info->capabilities & |
4229 | FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { |
4230 | ext_speed = &link->ext_speed; |
4231 | |
4232 | link_temp = qed_rd(p_hwfn, p_ptt, |
4233 | hw_addr: port_cfg_addr + |
4234 | offsetof(struct nvm_cfg1_port, |
4235 | extended_speed)); |
4236 | |
4237 | fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED); |
4238 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN) |
4239 | ext_speed->autoneg = true; |
4240 | |
4241 | ext_speed->forced_speed = 0; |
4242 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G) |
4243 | ext_speed->forced_speed |= QED_EXT_SPEED_1G; |
4244 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G) |
4245 | ext_speed->forced_speed |= QED_EXT_SPEED_10G; |
4246 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G) |
4247 | ext_speed->forced_speed |= QED_EXT_SPEED_20G; |
4248 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G) |
4249 | ext_speed->forced_speed |= QED_EXT_SPEED_25G; |
4250 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G) |
4251 | ext_speed->forced_speed |= QED_EXT_SPEED_40G; |
4252 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R) |
4253 | ext_speed->forced_speed |= QED_EXT_SPEED_50G_R; |
4254 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2) |
4255 | ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2; |
4256 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2) |
4257 | ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2; |
4258 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4) |
4259 | ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4; |
4260 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4) |
4261 | ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4; |
4262 | |
4263 | fld = GET_MFW_FIELD(link_temp, |
4264 | NVM_CFG1_PORT_EXTENDED_SPEED_CAP); |
4265 | |
4266 | ext_speed->advertised_speeds = 0; |
4267 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED) |
4268 | ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES; |
4269 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G) |
4270 | ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G; |
4271 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G) |
4272 | ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G; |
4273 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G) |
4274 | ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G; |
4275 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G) |
4276 | ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G; |
4277 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G) |
4278 | ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G; |
4279 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R) |
4280 | ext_speed->advertised_speeds |= |
4281 | QED_EXT_SPEED_MASK_50G_R; |
4282 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2) |
4283 | ext_speed->advertised_speeds |= |
4284 | QED_EXT_SPEED_MASK_50G_R2; |
4285 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2) |
4286 | ext_speed->advertised_speeds |= |
4287 | QED_EXT_SPEED_MASK_100G_R2; |
4288 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4) |
4289 | ext_speed->advertised_speeds |= |
4290 | QED_EXT_SPEED_MASK_100G_R4; |
4291 | if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4) |
4292 | ext_speed->advertised_speeds |= |
4293 | QED_EXT_SPEED_MASK_100G_P4; |
4294 | |
4295 | link_temp = qed_rd(p_hwfn, p_ptt, |
4296 | hw_addr: port_cfg_addr + |
4297 | offsetof(struct nvm_cfg1_port, |
4298 | extended_fec_mode)); |
4299 | link->ext_fec_mode = link_temp; |
4300 | |
4301 | p_caps->default_ext_speed_caps = ext_speed->advertised_speeds; |
4302 | p_caps->default_ext_speed = ext_speed->forced_speed; |
4303 | p_caps->default_ext_autoneg = ext_speed->autoneg; |
4304 | p_caps->default_ext_fec = link->ext_fec_mode; |
4305 | |
4306 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
4307 | "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n" , |
4308 | ext_speed->forced_speed, |
4309 | ext_speed->advertised_speeds, ext_speed->autoneg, |
4310 | p_caps->default_ext_fec); |
4311 | } |
4312 | |
4313 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
4314 | "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n" , |
4315 | link->speed.forced_speed, link->speed.advertised_speeds, |
4316 | link->speed.autoneg, link->pause.autoneg, |
4317 | p_caps->default_eee, p_caps->eee_lpi_timer, |
4318 | p_caps->fec_default); |
4319 | |
4320 | if (IS_LEAD_HWFN(p_hwfn)) { |
4321 | struct qed_dev *cdev = p_hwfn->cdev; |
4322 | |
4323 | /* Read Multi-function information from shmem */ |
4324 | addr = MCP_REG_SCRATCH + nvm_cfg1_offset + |
4325 | offsetof(struct nvm_cfg1, glob) + |
4326 | offsetof(struct nvm_cfg1_glob, generic_cont0); |
4327 | |
4328 | generic_cont0 = qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
4329 | |
4330 | mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> |
4331 | NVM_CFG1_GLOB_MF_MODE_OFFSET; |
4332 | |
4333 | switch (mf_mode) { |
4334 | case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: |
4335 | cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); |
4336 | break; |
4337 | case NVM_CFG1_GLOB_MF_MODE_UFP: |
4338 | cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | |
4339 | BIT(QED_MF_LLH_PROTO_CLSS) | |
4340 | BIT(QED_MF_UFP_SPECIFIC) | |
4341 | BIT(QED_MF_8021Q_TAGGING) | |
4342 | BIT(QED_MF_DONT_ADD_VLAN0_TAG); |
4343 | break; |
4344 | case NVM_CFG1_GLOB_MF_MODE_BD: |
4345 | cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | |
4346 | BIT(QED_MF_LLH_PROTO_CLSS) | |
4347 | BIT(QED_MF_8021AD_TAGGING) | |
4348 | BIT(QED_MF_DONT_ADD_VLAN0_TAG); |
4349 | break; |
4350 | case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: |
4351 | cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | |
4352 | BIT(QED_MF_LLH_PROTO_CLSS) | |
4353 | BIT(QED_MF_LL2_NON_UNICAST) | |
4354 | BIT(QED_MF_INTER_PF_SWITCH) | |
4355 | BIT(QED_MF_DISABLE_ARFS); |
4356 | break; |
4357 | case NVM_CFG1_GLOB_MF_MODE_DEFAULT: |
4358 | cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | |
4359 | BIT(QED_MF_LLH_PROTO_CLSS) | |
4360 | BIT(QED_MF_LL2_NON_UNICAST); |
4361 | if (QED_IS_BB(p_hwfn->cdev)) |
4362 | cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); |
4363 | break; |
4364 | } |
4365 | |
4366 | DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n" , |
4367 | cdev->mf_bits); |
4368 | |
4369 | /* In CMT the PF is unknown when the GFS block processes the |
4370 | * packet. Therefore cannot use searcher as it has a per PF |
4371 | * database, and thus ARFS must be disabled. |
4372 | * |
4373 | */ |
4374 | if (QED_IS_CMT(cdev)) |
4375 | cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS); |
4376 | } |
4377 | |
4378 | DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n" , |
4379 | p_hwfn->cdev->mf_bits); |
4380 | |
4381 | /* Read device capabilities information from shmem */ |
4382 | addr = MCP_REG_SCRATCH + nvm_cfg1_offset + |
4383 | offsetof(struct nvm_cfg1, glob) + |
4384 | offsetof(struct nvm_cfg1_glob, device_capabilities); |
4385 | |
4386 | device_capabilities = qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
4387 | if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) |
4388 | __set_bit(QED_DEV_CAP_ETH, |
4389 | &p_hwfn->hw_info.device_capabilities); |
4390 | if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) |
4391 | __set_bit(QED_DEV_CAP_FCOE, |
4392 | &p_hwfn->hw_info.device_capabilities); |
4393 | if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) |
4394 | __set_bit(QED_DEV_CAP_ISCSI, |
4395 | &p_hwfn->hw_info.device_capabilities); |
4396 | if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) |
4397 | __set_bit(QED_DEV_CAP_ROCE, |
4398 | &p_hwfn->hw_info.device_capabilities); |
4399 | |
4400 | /* Read device serial number information from shmem */ |
4401 | addr = MCP_REG_SCRATCH + nvm_cfg1_offset + |
4402 | offsetof(struct nvm_cfg1, glob) + |
4403 | offsetof(struct nvm_cfg1_glob, serial_number); |
4404 | |
4405 | for (i = 0; i < 4; i++) |
4406 | p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, hw_addr: addr + i * 4); |
4407 | |
4408 | return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); |
4409 | } |
4410 | |
4411 | static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4412 | { |
4413 | u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; |
4414 | u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; |
4415 | struct qed_dev *cdev = p_hwfn->cdev; |
4416 | |
4417 | num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; |
4418 | |
4419 | /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values |
4420 | * in the other bits are selected. |
4421 | * Bits 1-15 are for functions 1-15, respectively, and their value is |
4422 | * '0' only for enabled functions (function 0 always exists and |
4423 | * enabled). |
4424 | * In case of CMT, only the "even" functions are enabled, and thus the |
4425 | * number of functions for both hwfns is learnt from the same bits. |
4426 | */ |
4427 | reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); |
4428 | |
4429 | if (reg_function_hide & 0x1) { |
4430 | if (QED_IS_BB(cdev)) { |
4431 | if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { |
4432 | num_funcs = 0; |
4433 | eng_mask = 0xaaaa; |
4434 | } else { |
4435 | num_funcs = 1; |
4436 | eng_mask = 0x5554; |
4437 | } |
4438 | } else { |
4439 | num_funcs = 1; |
4440 | eng_mask = 0xfffe; |
4441 | } |
4442 | |
4443 | /* Get the number of the enabled functions on the engine */ |
4444 | tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; |
4445 | while (tmp) { |
4446 | if (tmp & 0x1) |
4447 | num_funcs++; |
4448 | tmp >>= 0x1; |
4449 | } |
4450 | |
4451 | /* Get the PF index within the enabled functions */ |
4452 | low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; |
4453 | tmp = reg_function_hide & eng_mask & low_pfs_mask; |
4454 | while (tmp) { |
4455 | if (tmp & 0x1) |
4456 | enabled_func_idx--; |
4457 | tmp >>= 0x1; |
4458 | } |
4459 | } |
4460 | |
4461 | p_hwfn->num_funcs_on_engine = num_funcs; |
4462 | p_hwfn->enabled_func_idx = enabled_func_idx; |
4463 | |
4464 | DP_VERBOSE(p_hwfn, |
4465 | NETIF_MSG_PROBE, |
4466 | "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n" , |
4467 | p_hwfn->rel_pf_id, |
4468 | p_hwfn->abs_pf_id, |
4469 | p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); |
4470 | } |
4471 | |
4472 | static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4473 | { |
4474 | u32 addr, global_offsize, global_addr, port_mode; |
4475 | struct qed_dev *cdev = p_hwfn->cdev; |
4476 | |
4477 | /* In CMT there is always only one port */ |
4478 | if (cdev->num_hwfns > 1) { |
4479 | cdev->num_ports_in_engine = 1; |
4480 | cdev->num_ports = 1; |
4481 | return; |
4482 | } |
4483 | |
4484 | /* Determine the number of ports per engine */ |
4485 | port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); |
4486 | switch (port_mode) { |
4487 | case 0x0: |
4488 | cdev->num_ports_in_engine = 1; |
4489 | break; |
4490 | case 0x1: |
4491 | cdev->num_ports_in_engine = 2; |
4492 | break; |
4493 | case 0x2: |
4494 | cdev->num_ports_in_engine = 4; |
4495 | break; |
4496 | default: |
4497 | DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n" , port_mode); |
4498 | cdev->num_ports_in_engine = 1; /* Default to something */ |
4499 | break; |
4500 | } |
4501 | |
4502 | /* Get the total number of ports of the device */ |
4503 | addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, |
4504 | PUBLIC_GLOBAL); |
4505 | global_offsize = qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
4506 | global_addr = SECTION_ADDR(global_offsize, 0); |
4507 | addr = global_addr + offsetof(struct public_global, max_ports); |
4508 | cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, hw_addr: addr); |
4509 | } |
4510 | |
4511 | static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4512 | { |
4513 | struct qed_mcp_link_capabilities *p_caps; |
4514 | u32 eee_status; |
4515 | |
4516 | p_caps = &p_hwfn->mcp_info->link_capabilities; |
4517 | if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED) |
4518 | return; |
4519 | |
4520 | p_caps->eee_speed_caps = 0; |
4521 | eee_status = qed_rd(p_hwfn, p_ptt, hw_addr: p_hwfn->mcp_info->port_addr + |
4522 | offsetof(struct public_port, eee_status)); |
4523 | eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> |
4524 | EEE_SUPPORTED_SPEED_OFFSET; |
4525 | |
4526 | if (eee_status & EEE_1G_SUPPORTED) |
4527 | p_caps->eee_speed_caps |= QED_EEE_1G_ADV; |
4528 | if (eee_status & EEE_10G_ADV) |
4529 | p_caps->eee_speed_caps |= QED_EEE_10G_ADV; |
4530 | } |
4531 | |
4532 | static int |
4533 | qed_get_hw_info(struct qed_hwfn *p_hwfn, |
4534 | struct qed_ptt *p_ptt, |
4535 | enum qed_pci_personality personality) |
4536 | { |
4537 | int rc; |
4538 | |
4539 | /* Since all information is common, only first hwfns should do this */ |
4540 | if (IS_LEAD_HWFN(p_hwfn)) { |
4541 | rc = qed_iov_hw_info(p_hwfn); |
4542 | if (rc) |
4543 | return rc; |
4544 | } |
4545 | |
4546 | if (IS_LEAD_HWFN(p_hwfn)) |
4547 | qed_hw_info_port_num(p_hwfn, p_ptt); |
4548 | |
4549 | qed_mcp_get_capabilities(p_hwfn, p_ptt); |
4550 | |
4551 | qed_hw_get_nvm_info(p_hwfn, p_ptt); |
4552 | |
4553 | rc = qed_int_igu_read_cam(p_hwfn, p_ptt); |
4554 | if (rc) |
4555 | return rc; |
4556 | |
4557 | if (qed_mcp_is_init(p_hwfn)) |
4558 | ether_addr_copy(dst: p_hwfn->hw_info.hw_mac_addr, |
4559 | src: p_hwfn->mcp_info->func_info.mac); |
4560 | else |
4561 | eth_random_addr(addr: p_hwfn->hw_info.hw_mac_addr); |
4562 | |
4563 | if (qed_mcp_is_init(p_hwfn)) { |
4564 | if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) |
4565 | p_hwfn->hw_info.ovlan = |
4566 | p_hwfn->mcp_info->func_info.ovlan; |
4567 | |
4568 | qed_mcp_cmd_port_init(p_hwfn, p_ptt); |
4569 | |
4570 | qed_get_eee_caps(p_hwfn, p_ptt); |
4571 | |
4572 | qed_mcp_read_ufp_config(p_hwfn, p_ptt); |
4573 | } |
4574 | |
4575 | if (qed_mcp_is_init(p_hwfn)) { |
4576 | enum qed_pci_personality protocol; |
4577 | |
4578 | protocol = p_hwfn->mcp_info->func_info.protocol; |
4579 | p_hwfn->hw_info.personality = protocol; |
4580 | } |
4581 | |
4582 | if (QED_IS_ROCE_PERSONALITY(p_hwfn)) |
4583 | p_hwfn->hw_info.multi_tc_roce_en = true; |
4584 | |
4585 | p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; |
4586 | p_hwfn->hw_info.num_active_tc = 1; |
4587 | |
4588 | qed_get_num_funcs(p_hwfn, p_ptt); |
4589 | |
4590 | if (qed_mcp_is_init(p_hwfn)) |
4591 | p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; |
4592 | |
4593 | return qed_hw_get_resc(p_hwfn, p_ptt); |
4594 | } |
4595 | |
4596 | static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4597 | { |
4598 | struct qed_dev *cdev = p_hwfn->cdev; |
4599 | u16 device_id_mask; |
4600 | u32 tmp; |
4601 | |
4602 | /* Read Vendor Id / Device Id */ |
4603 | pci_read_config_word(dev: cdev->pdev, PCI_VENDOR_ID, val: &cdev->vendor_id); |
4604 | pci_read_config_word(dev: cdev->pdev, PCI_DEVICE_ID, val: &cdev->device_id); |
4605 | |
4606 | /* Determine type */ |
4607 | device_id_mask = cdev->device_id & QED_DEV_ID_MASK; |
4608 | switch (device_id_mask) { |
4609 | case QED_DEV_ID_MASK_BB: |
4610 | cdev->type = QED_DEV_TYPE_BB; |
4611 | break; |
4612 | case QED_DEV_ID_MASK_AH: |
4613 | cdev->type = QED_DEV_TYPE_AH; |
4614 | break; |
4615 | default: |
4616 | DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n" , cdev->device_id); |
4617 | return -EBUSY; |
4618 | } |
4619 | |
4620 | cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); |
4621 | cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); |
4622 | |
4623 | MASK_FIELD(CHIP_REV, cdev->chip_rev); |
4624 | |
4625 | /* Learn number of HW-functions */ |
4626 | tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); |
4627 | |
4628 | if (tmp & (1 << p_hwfn->rel_pf_id)) { |
4629 | DP_NOTICE(cdev->hwfns, "device in CMT mode\n" ); |
4630 | cdev->num_hwfns = 2; |
4631 | } else { |
4632 | cdev->num_hwfns = 1; |
4633 | } |
4634 | |
4635 | cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, |
4636 | MISCS_REG_CHIP_TEST_REG) >> 4; |
4637 | MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); |
4638 | cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); |
4639 | MASK_FIELD(CHIP_METAL, cdev->chip_metal); |
4640 | |
4641 | DP_INFO(cdev->hwfns, |
4642 | "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n" , |
4643 | QED_IS_BB(cdev) ? "BB" : "AH" , |
4644 | 'A' + cdev->chip_rev, |
4645 | (int)cdev->chip_metal, |
4646 | cdev->chip_num, cdev->chip_rev, |
4647 | cdev->chip_bond_id, cdev->chip_metal); |
4648 | |
4649 | return 0; |
4650 | } |
4651 | |
4652 | static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, |
4653 | void __iomem *p_regview, |
4654 | void __iomem *p_doorbells, |
4655 | u64 db_phys_addr, |
4656 | enum qed_pci_personality personality) |
4657 | { |
4658 | struct qed_dev *cdev = p_hwfn->cdev; |
4659 | int rc = 0; |
4660 | |
4661 | /* Split PCI bars evenly between hwfns */ |
4662 | p_hwfn->regview = p_regview; |
4663 | p_hwfn->doorbells = p_doorbells; |
4664 | p_hwfn->db_phys_addr = db_phys_addr; |
4665 | |
4666 | if (IS_VF(p_hwfn->cdev)) |
4667 | return qed_vf_hw_prepare(p_hwfn); |
4668 | |
4669 | /* Validate that chip access is feasible */ |
4670 | if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { |
4671 | DP_ERR(p_hwfn, |
4672 | "Reading the ME register returns all Fs; Preventing further chip access\n" ); |
4673 | return -EINVAL; |
4674 | } |
4675 | |
4676 | get_function_id(p_hwfn); |
4677 | |
4678 | /* Allocate PTT pool */ |
4679 | rc = qed_ptt_pool_alloc(p_hwfn); |
4680 | if (rc) |
4681 | goto err0; |
4682 | |
4683 | /* Allocate the main PTT */ |
4684 | p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, ptt_idx: RESERVED_PTT_MAIN); |
4685 | |
4686 | /* First hwfn learns basic information, e.g., number of hwfns */ |
4687 | if (!p_hwfn->my_id) { |
4688 | rc = qed_get_dev_info(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
4689 | if (rc) |
4690 | goto err1; |
4691 | } |
4692 | |
4693 | qed_hw_hwfn_prepare(p_hwfn); |
4694 | |
4695 | /* Initialize MCP structure */ |
4696 | rc = qed_mcp_cmd_init(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
4697 | if (rc) { |
4698 | DP_NOTICE(p_hwfn, "Failed initializing mcp command\n" ); |
4699 | goto err1; |
4700 | } |
4701 | |
4702 | /* Read the device configuration information from the HW and SHMEM */ |
4703 | rc = qed_get_hw_info(p_hwfn, p_ptt: p_hwfn->p_main_ptt, personality); |
4704 | if (rc) { |
4705 | DP_NOTICE(p_hwfn, "Failed to get HW information\n" ); |
4706 | goto err2; |
4707 | } |
4708 | |
4709 | /* Sending a mailbox to the MFW should be done after qed_get_hw_info() |
4710 | * is called as it sets the ports number in an engine. |
4711 | */ |
4712 | if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) { |
4713 | rc = qed_mcp_initiate_pf_flr(p_hwfn, p_ptt: p_hwfn->p_main_ptt); |
4714 | if (rc) |
4715 | DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n" ); |
4716 | } |
4717 | |
4718 | /* NVRAM info initialization and population */ |
4719 | if (IS_LEAD_HWFN(p_hwfn)) { |
4720 | rc = qed_mcp_nvm_info_populate(p_hwfn); |
4721 | if (rc) { |
4722 | DP_NOTICE(p_hwfn, |
4723 | "Failed to populate nvm info shadow\n" ); |
4724 | goto err2; |
4725 | } |
4726 | } |
4727 | |
4728 | /* Allocate the init RT array and initialize the init-ops engine */ |
4729 | rc = qed_init_alloc(p_hwfn); |
4730 | if (rc) |
4731 | goto err3; |
4732 | |
4733 | return rc; |
4734 | err3: |
4735 | if (IS_LEAD_HWFN(p_hwfn)) |
4736 | qed_mcp_nvm_info_free(p_hwfn); |
4737 | err2: |
4738 | if (IS_LEAD_HWFN(p_hwfn)) |
4739 | qed_iov_free_hw_info(cdev: p_hwfn->cdev); |
4740 | qed_mcp_free(p_hwfn); |
4741 | err1: |
4742 | qed_hw_hwfn_free(p_hwfn); |
4743 | err0: |
4744 | return rc; |
4745 | } |
4746 | |
4747 | int qed_hw_prepare(struct qed_dev *cdev, |
4748 | int personality) |
4749 | { |
4750 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
4751 | int rc; |
4752 | |
4753 | /* Store the precompiled init data ptrs */ |
4754 | if (IS_PF(cdev)) |
4755 | qed_init_iro_array(cdev); |
4756 | |
4757 | /* Initialize the first hwfn - will learn number of hwfns */ |
4758 | rc = qed_hw_prepare_single(p_hwfn, |
4759 | p_regview: cdev->regview, |
4760 | p_doorbells: cdev->doorbells, |
4761 | db_phys_addr: cdev->db_phys_addr, |
4762 | personality); |
4763 | if (rc) |
4764 | return rc; |
4765 | |
4766 | personality = p_hwfn->hw_info.personality; |
4767 | |
4768 | /* Initialize the rest of the hwfns */ |
4769 | if (cdev->num_hwfns > 1) { |
4770 | void __iomem *p_regview, *p_doorbell; |
4771 | u64 db_phys_addr; |
4772 | u32 offset; |
4773 | |
4774 | /* adjust bar offset for second engine */ |
4775 | offset = qed_hw_bar_size(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
4776 | bar_id: BAR_ID_0) / 2; |
4777 | p_regview = cdev->regview + offset; |
4778 | |
4779 | offset = qed_hw_bar_size(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
4780 | bar_id: BAR_ID_1) / 2; |
4781 | |
4782 | p_doorbell = cdev->doorbells + offset; |
4783 | |
4784 | db_phys_addr = cdev->db_phys_addr + offset; |
4785 | |
4786 | /* prepare second hw function */ |
4787 | rc = qed_hw_prepare_single(p_hwfn: &cdev->hwfns[1], p_regview, |
4788 | p_doorbells: p_doorbell, db_phys_addr, |
4789 | personality); |
4790 | |
4791 | /* in case of error, need to free the previously |
4792 | * initiliazed hwfn 0. |
4793 | */ |
4794 | if (rc) { |
4795 | if (IS_PF(cdev)) { |
4796 | qed_init_free(p_hwfn); |
4797 | qed_mcp_nvm_info_free(p_hwfn); |
4798 | qed_mcp_free(p_hwfn); |
4799 | qed_hw_hwfn_free(p_hwfn); |
4800 | } |
4801 | } |
4802 | } |
4803 | |
4804 | return rc; |
4805 | } |
4806 | |
4807 | void qed_hw_remove(struct qed_dev *cdev) |
4808 | { |
4809 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
4810 | int i; |
4811 | |
4812 | if (IS_PF(cdev)) |
4813 | qed_mcp_ov_update_driver_state(p_hwfn, p_ptt: p_hwfn->p_main_ptt, |
4814 | drv_state: QED_OV_DRIVER_STATE_NOT_LOADED); |
4815 | |
4816 | for_each_hwfn(cdev, i) { |
4817 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
4818 | |
4819 | if (IS_VF(cdev)) { |
4820 | qed_vf_pf_release(p_hwfn); |
4821 | continue; |
4822 | } |
4823 | |
4824 | qed_init_free(p_hwfn); |
4825 | qed_hw_hwfn_free(p_hwfn); |
4826 | qed_mcp_free(p_hwfn); |
4827 | } |
4828 | |
4829 | qed_iov_free_hw_info(cdev); |
4830 | |
4831 | qed_mcp_nvm_info_free(p_hwfn); |
4832 | } |
4833 | |
4834 | int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) |
4835 | { |
4836 | if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { |
4837 | u16 min, max; |
4838 | |
4839 | min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); |
4840 | max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); |
4841 | DP_NOTICE(p_hwfn, |
4842 | "l2_queue id [%d] is not valid, available indices [%d - %d]\n" , |
4843 | src_id, min, max); |
4844 | |
4845 | return -EINVAL; |
4846 | } |
4847 | |
4848 | *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; |
4849 | |
4850 | return 0; |
4851 | } |
4852 | |
4853 | int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) |
4854 | { |
4855 | if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { |
4856 | u8 min, max; |
4857 | |
4858 | min = (u8)RESC_START(p_hwfn, QED_VPORT); |
4859 | max = min + RESC_NUM(p_hwfn, QED_VPORT); |
4860 | DP_NOTICE(p_hwfn, |
4861 | "vport id [%d] is not valid, available indices [%d - %d]\n" , |
4862 | src_id, min, max); |
4863 | |
4864 | return -EINVAL; |
4865 | } |
4866 | |
4867 | *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; |
4868 | |
4869 | return 0; |
4870 | } |
4871 | |
4872 | int (struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) |
4873 | { |
4874 | if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { |
4875 | u8 min, max; |
4876 | |
4877 | min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); |
4878 | max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); |
4879 | DP_NOTICE(p_hwfn, |
4880 | "rss_eng id [%d] is not valid, available indices [%d - %d]\n" , |
4881 | src_id, min, max); |
4882 | |
4883 | return -EINVAL; |
4884 | } |
4885 | |
4886 | *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; |
4887 | |
4888 | return 0; |
4889 | } |
4890 | |
4891 | static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
4892 | u32 hw_addr, void *p_eth_qzone, |
4893 | size_t eth_qzone_size, u8 timeset) |
4894 | { |
4895 | struct coalescing_timeset *p_coal_timeset; |
4896 | |
4897 | if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { |
4898 | DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n" ); |
4899 | return -EINVAL; |
4900 | } |
4901 | |
4902 | p_coal_timeset = p_eth_qzone; |
4903 | memset(p_eth_qzone, 0, eth_qzone_size); |
4904 | SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); |
4905 | SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); |
4906 | qed_memcpy_to(p_hwfn, p_ptt, hw_addr, src: p_eth_qzone, n: eth_qzone_size); |
4907 | |
4908 | return 0; |
4909 | } |
4910 | |
4911 | int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle) |
4912 | { |
4913 | struct qed_queue_cid *p_cid = p_handle; |
4914 | struct qed_hwfn *p_hwfn; |
4915 | struct qed_ptt *p_ptt; |
4916 | int rc = 0; |
4917 | |
4918 | p_hwfn = p_cid->p_owner; |
4919 | |
4920 | if (IS_VF(p_hwfn->cdev)) |
4921 | return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid); |
4922 | |
4923 | p_ptt = qed_ptt_acquire(p_hwfn); |
4924 | if (!p_ptt) |
4925 | return -EAGAIN; |
4926 | |
4927 | if (rx_coal) { |
4928 | rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, coalesce: rx_coal, p_cid); |
4929 | if (rc) |
4930 | goto out; |
4931 | p_hwfn->cdev->rx_coalesce_usecs = rx_coal; |
4932 | } |
4933 | |
4934 | if (tx_coal) { |
4935 | rc = qed_set_txq_coalesce(p_hwfn, p_ptt, coalesce: tx_coal, p_cid); |
4936 | if (rc) |
4937 | goto out; |
4938 | p_hwfn->cdev->tx_coalesce_usecs = tx_coal; |
4939 | } |
4940 | out: |
4941 | qed_ptt_release(p_hwfn, p_ptt); |
4942 | return rc; |
4943 | } |
4944 | |
4945 | int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, |
4946 | struct qed_ptt *p_ptt, |
4947 | u16 coalesce, struct qed_queue_cid *p_cid) |
4948 | { |
4949 | struct ustorm_eth_queue_zone eth_qzone; |
4950 | u8 timeset, timer_res; |
4951 | u32 address; |
4952 | int rc; |
4953 | |
4954 | /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ |
4955 | if (coalesce <= 0x7F) { |
4956 | timer_res = 0; |
4957 | } else if (coalesce <= 0xFF) { |
4958 | timer_res = 1; |
4959 | } else if (coalesce <= 0x1FF) { |
4960 | timer_res = 2; |
4961 | } else { |
4962 | DP_ERR(p_hwfn, "Invalid coalesce value - %d\n" , coalesce); |
4963 | return -EINVAL; |
4964 | } |
4965 | timeset = (u8)(coalesce >> timer_res); |
4966 | |
4967 | rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, |
4968 | sb_id: p_cid->sb_igu_id, tx: false); |
4969 | if (rc) |
4970 | goto out; |
4971 | |
4972 | address = BAR0_MAP_REG_USDM_RAM + |
4973 | USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); |
4974 | |
4975 | rc = qed_set_coalesce(p_hwfn, p_ptt, hw_addr: address, p_eth_qzone: ð_qzone, |
4976 | eth_qzone_size: sizeof(struct ustorm_eth_queue_zone), timeset); |
4977 | if (rc) |
4978 | goto out; |
4979 | |
4980 | out: |
4981 | return rc; |
4982 | } |
4983 | |
4984 | int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, |
4985 | struct qed_ptt *p_ptt, |
4986 | u16 coalesce, struct qed_queue_cid *p_cid) |
4987 | { |
4988 | struct xstorm_eth_queue_zone eth_qzone; |
4989 | u8 timeset, timer_res; |
4990 | u32 address; |
4991 | int rc; |
4992 | |
4993 | /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ |
4994 | if (coalesce <= 0x7F) { |
4995 | timer_res = 0; |
4996 | } else if (coalesce <= 0xFF) { |
4997 | timer_res = 1; |
4998 | } else if (coalesce <= 0x1FF) { |
4999 | timer_res = 2; |
5000 | } else { |
5001 | DP_ERR(p_hwfn, "Invalid coalesce value - %d\n" , coalesce); |
5002 | return -EINVAL; |
5003 | } |
5004 | timeset = (u8)(coalesce >> timer_res); |
5005 | |
5006 | rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, |
5007 | sb_id: p_cid->sb_igu_id, tx: true); |
5008 | if (rc) |
5009 | goto out; |
5010 | |
5011 | address = BAR0_MAP_REG_XSDM_RAM + |
5012 | XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); |
5013 | |
5014 | rc = qed_set_coalesce(p_hwfn, p_ptt, hw_addr: address, p_eth_qzone: ð_qzone, |
5015 | eth_qzone_size: sizeof(struct xstorm_eth_queue_zone), timeset); |
5016 | out: |
5017 | return rc; |
5018 | } |
5019 | |
5020 | /* Calculate final WFQ values for all vports and configure them. |
5021 | * After this configuration each vport will have |
5022 | * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) |
5023 | */ |
5024 | static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, |
5025 | struct qed_ptt *p_ptt, |
5026 | u32 min_pf_rate) |
5027 | { |
5028 | struct init_qm_vport_params *vport_params; |
5029 | int i; |
5030 | |
5031 | vport_params = p_hwfn->qm_info.qm_vport_params; |
5032 | |
5033 | for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { |
5034 | u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; |
5035 | |
5036 | vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) / |
5037 | min_pf_rate; |
5038 | qed_init_vport_wfq(p_hwfn, p_ptt, |
5039 | first_tx_pq_id: vport_params[i].first_tx_pq_id, |
5040 | wfq: vport_params[i].wfq); |
5041 | } |
5042 | } |
5043 | |
5044 | static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, |
5045 | u32 min_pf_rate) |
5046 | |
5047 | { |
5048 | int i; |
5049 | |
5050 | for (i = 0; i < p_hwfn->qm_info.num_vports; i++) |
5051 | p_hwfn->qm_info.qm_vport_params[i].wfq = 1; |
5052 | } |
5053 | |
5054 | static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, |
5055 | struct qed_ptt *p_ptt, |
5056 | u32 min_pf_rate) |
5057 | { |
5058 | struct init_qm_vport_params *vport_params; |
5059 | int i; |
5060 | |
5061 | vport_params = p_hwfn->qm_info.qm_vport_params; |
5062 | |
5063 | for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { |
5064 | qed_init_wfq_default_param(p_hwfn, min_pf_rate); |
5065 | qed_init_vport_wfq(p_hwfn, p_ptt, |
5066 | first_tx_pq_id: vport_params[i].first_tx_pq_id, |
5067 | wfq: vport_params[i].wfq); |
5068 | } |
5069 | } |
5070 | |
5071 | /* This function performs several validations for WFQ |
5072 | * configuration and required min rate for a given vport |
5073 | * 1. req_rate must be greater than one percent of min_pf_rate. |
5074 | * 2. req_rate should not cause other vports [not configured for WFQ explicitly] |
5075 | * rates to get less than one percent of min_pf_rate. |
5076 | * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. |
5077 | */ |
5078 | static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, |
5079 | u16 vport_id, u32 req_rate, u32 min_pf_rate) |
5080 | { |
5081 | u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; |
5082 | int non_requested_count = 0, req_count = 0, i, num_vports; |
5083 | |
5084 | num_vports = p_hwfn->qm_info.num_vports; |
5085 | |
5086 | if (num_vports < 2) { |
5087 | DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n" , num_vports); |
5088 | return -EINVAL; |
5089 | } |
5090 | |
5091 | /* Accounting for the vports which are configured for WFQ explicitly */ |
5092 | for (i = 0; i < num_vports; i++) { |
5093 | u32 tmp_speed; |
5094 | |
5095 | if ((i != vport_id) && |
5096 | p_hwfn->qm_info.wfq_data[i].configured) { |
5097 | req_count++; |
5098 | tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; |
5099 | total_req_min_rate += tmp_speed; |
5100 | } |
5101 | } |
5102 | |
5103 | /* Include current vport data as well */ |
5104 | req_count++; |
5105 | total_req_min_rate += req_rate; |
5106 | non_requested_count = num_vports - req_count; |
5107 | |
5108 | if (req_rate < min_pf_rate / QED_WFQ_UNIT) { |
5109 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
5110 | "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n" , |
5111 | vport_id, req_rate, min_pf_rate); |
5112 | return -EINVAL; |
5113 | } |
5114 | |
5115 | if (num_vports > QED_WFQ_UNIT) { |
5116 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
5117 | "Number of vports is greater than %d\n" , |
5118 | QED_WFQ_UNIT); |
5119 | return -EINVAL; |
5120 | } |
5121 | |
5122 | if (total_req_min_rate > min_pf_rate) { |
5123 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
5124 | "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n" , |
5125 | total_req_min_rate, min_pf_rate); |
5126 | return -EINVAL; |
5127 | } |
5128 | |
5129 | total_left_rate = min_pf_rate - total_req_min_rate; |
5130 | |
5131 | left_rate_per_vp = total_left_rate / non_requested_count; |
5132 | if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { |
5133 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
5134 | "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n" , |
5135 | left_rate_per_vp, min_pf_rate); |
5136 | return -EINVAL; |
5137 | } |
5138 | |
5139 | p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; |
5140 | p_hwfn->qm_info.wfq_data[vport_id].configured = true; |
5141 | |
5142 | for (i = 0; i < num_vports; i++) { |
5143 | if (p_hwfn->qm_info.wfq_data[i].configured) |
5144 | continue; |
5145 | |
5146 | p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; |
5147 | } |
5148 | |
5149 | return 0; |
5150 | } |
5151 | |
5152 | static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, |
5153 | struct qed_ptt *p_ptt, u16 vp_id, u32 rate) |
5154 | { |
5155 | struct qed_mcp_link_state *p_link; |
5156 | int rc = 0; |
5157 | |
5158 | p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; |
5159 | |
5160 | if (!p_link->min_pf_rate) { |
5161 | p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; |
5162 | p_hwfn->qm_info.wfq_data[vp_id].configured = true; |
5163 | return rc; |
5164 | } |
5165 | |
5166 | rc = qed_init_wfq_param(p_hwfn, vport_id: vp_id, req_rate: rate, min_pf_rate: p_link->min_pf_rate); |
5167 | |
5168 | if (!rc) |
5169 | qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, |
5170 | min_pf_rate: p_link->min_pf_rate); |
5171 | else |
5172 | DP_NOTICE(p_hwfn, |
5173 | "Validation failed while configuring min rate\n" ); |
5174 | |
5175 | return rc; |
5176 | } |
5177 | |
5178 | static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, |
5179 | struct qed_ptt *p_ptt, |
5180 | u32 min_pf_rate) |
5181 | { |
5182 | bool use_wfq = false; |
5183 | int rc = 0; |
5184 | u16 i; |
5185 | |
5186 | /* Validate all pre configured vports for wfq */ |
5187 | for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { |
5188 | u32 rate; |
5189 | |
5190 | if (!p_hwfn->qm_info.wfq_data[i].configured) |
5191 | continue; |
5192 | |
5193 | rate = p_hwfn->qm_info.wfq_data[i].min_speed; |
5194 | use_wfq = true; |
5195 | |
5196 | rc = qed_init_wfq_param(p_hwfn, vport_id: i, req_rate: rate, min_pf_rate); |
5197 | if (rc) { |
5198 | DP_NOTICE(p_hwfn, |
5199 | "WFQ validation failed while configuring min rate\n" ); |
5200 | break; |
5201 | } |
5202 | } |
5203 | |
5204 | if (!rc && use_wfq) |
5205 | qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); |
5206 | else |
5207 | qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); |
5208 | |
5209 | return rc; |
5210 | } |
5211 | |
5212 | /* Main API for qed clients to configure vport min rate. |
5213 | * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] |
5214 | * rate - Speed in Mbps needs to be assigned to a given vport. |
5215 | */ |
5216 | int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) |
5217 | { |
5218 | int i, rc = -EINVAL; |
5219 | |
5220 | /* Currently not supported; Might change in future */ |
5221 | if (cdev->num_hwfns > 1) { |
5222 | DP_NOTICE(cdev, |
5223 | "WFQ configuration is not supported for this device\n" ); |
5224 | return rc; |
5225 | } |
5226 | |
5227 | for_each_hwfn(cdev, i) { |
5228 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
5229 | struct qed_ptt *p_ptt; |
5230 | |
5231 | p_ptt = qed_ptt_acquire(p_hwfn); |
5232 | if (!p_ptt) |
5233 | return -EBUSY; |
5234 | |
5235 | rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); |
5236 | |
5237 | if (rc) { |
5238 | qed_ptt_release(p_hwfn, p_ptt); |
5239 | return rc; |
5240 | } |
5241 | |
5242 | qed_ptt_release(p_hwfn, p_ptt); |
5243 | } |
5244 | |
5245 | return rc; |
5246 | } |
5247 | |
5248 | /* API to configure WFQ from mcp link change */ |
5249 | void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, |
5250 | struct qed_ptt *p_ptt, u32 min_pf_rate) |
5251 | { |
5252 | int i; |
5253 | |
5254 | if (cdev->num_hwfns > 1) { |
5255 | DP_VERBOSE(cdev, |
5256 | NETIF_MSG_LINK, |
5257 | "WFQ configuration is not supported for this device\n" ); |
5258 | return; |
5259 | } |
5260 | |
5261 | for_each_hwfn(cdev, i) { |
5262 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
5263 | |
5264 | __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, |
5265 | min_pf_rate); |
5266 | } |
5267 | } |
5268 | |
5269 | int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, |
5270 | struct qed_ptt *p_ptt, |
5271 | struct qed_mcp_link_state *p_link, |
5272 | u8 max_bw) |
5273 | { |
5274 | int rc = 0; |
5275 | |
5276 | p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; |
5277 | |
5278 | if (!p_link->line_speed && (max_bw != 100)) |
5279 | return rc; |
5280 | |
5281 | p_link->speed = (p_link->line_speed * max_bw) / 100; |
5282 | p_hwfn->qm_info.pf_rl = p_link->speed; |
5283 | |
5284 | /* Since the limiter also affects Tx-switched traffic, we don't want it |
5285 | * to limit such traffic in case there's no actual limit. |
5286 | * In that case, set limit to imaginary high boundary. |
5287 | */ |
5288 | if (max_bw == 100) |
5289 | p_hwfn->qm_info.pf_rl = 100000; |
5290 | |
5291 | rc = qed_init_pf_rl(p_hwfn, p_ptt, pf_id: p_hwfn->rel_pf_id, |
5292 | pf_rl: p_hwfn->qm_info.pf_rl); |
5293 | |
5294 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
5295 | "Configured MAX bandwidth to be %08x Mb/sec\n" , |
5296 | p_link->speed); |
5297 | |
5298 | return rc; |
5299 | } |
5300 | |
5301 | /* Main API to configure PF max bandwidth where bw range is [1 - 100] */ |
5302 | int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) |
5303 | { |
5304 | int i, rc = -EINVAL; |
5305 | |
5306 | if (max_bw < 1 || max_bw > 100) { |
5307 | DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n" ); |
5308 | return rc; |
5309 | } |
5310 | |
5311 | for_each_hwfn(cdev, i) { |
5312 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
5313 | struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); |
5314 | struct qed_mcp_link_state *p_link; |
5315 | struct qed_ptt *p_ptt; |
5316 | |
5317 | p_link = &p_lead->mcp_info->link_output; |
5318 | |
5319 | p_ptt = qed_ptt_acquire(p_hwfn); |
5320 | if (!p_ptt) |
5321 | return -EBUSY; |
5322 | |
5323 | rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, |
5324 | p_link, max_bw); |
5325 | |
5326 | qed_ptt_release(p_hwfn, p_ptt); |
5327 | |
5328 | if (rc) |
5329 | break; |
5330 | } |
5331 | |
5332 | return rc; |
5333 | } |
5334 | |
5335 | int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, |
5336 | struct qed_ptt *p_ptt, |
5337 | struct qed_mcp_link_state *p_link, |
5338 | u8 min_bw) |
5339 | { |
5340 | int rc = 0; |
5341 | |
5342 | p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; |
5343 | p_hwfn->qm_info.pf_wfq = min_bw; |
5344 | |
5345 | if (!p_link->line_speed) |
5346 | return rc; |
5347 | |
5348 | p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; |
5349 | |
5350 | rc = qed_init_pf_wfq(p_hwfn, p_ptt, pf_id: p_hwfn->rel_pf_id, pf_wfq: min_bw); |
5351 | |
5352 | DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, |
5353 | "Configured MIN bandwidth to be %d Mb/sec\n" , |
5354 | p_link->min_pf_rate); |
5355 | |
5356 | return rc; |
5357 | } |
5358 | |
5359 | /* Main API to configure PF min bandwidth where bw range is [1-100] */ |
5360 | int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) |
5361 | { |
5362 | int i, rc = -EINVAL; |
5363 | |
5364 | if (min_bw < 1 || min_bw > 100) { |
5365 | DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n" ); |
5366 | return rc; |
5367 | } |
5368 | |
5369 | for_each_hwfn(cdev, i) { |
5370 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
5371 | struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); |
5372 | struct qed_mcp_link_state *p_link; |
5373 | struct qed_ptt *p_ptt; |
5374 | |
5375 | p_link = &p_lead->mcp_info->link_output; |
5376 | |
5377 | p_ptt = qed_ptt_acquire(p_hwfn); |
5378 | if (!p_ptt) |
5379 | return -EBUSY; |
5380 | |
5381 | rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, |
5382 | p_link, min_bw); |
5383 | if (rc) { |
5384 | qed_ptt_release(p_hwfn, p_ptt); |
5385 | return rc; |
5386 | } |
5387 | |
5388 | if (p_link->min_pf_rate) { |
5389 | u32 min_rate = p_link->min_pf_rate; |
5390 | |
5391 | rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, |
5392 | p_ptt, |
5393 | min_pf_rate: min_rate); |
5394 | } |
5395 | |
5396 | qed_ptt_release(p_hwfn, p_ptt); |
5397 | } |
5398 | |
5399 | return rc; |
5400 | } |
5401 | |
5402 | void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
5403 | { |
5404 | struct qed_mcp_link_state *p_link; |
5405 | |
5406 | p_link = &p_hwfn->mcp_info->link_output; |
5407 | |
5408 | if (p_link->min_pf_rate) |
5409 | qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, |
5410 | min_pf_rate: p_link->min_pf_rate); |
5411 | |
5412 | memset(p_hwfn->qm_info.wfq_data, 0, |
5413 | sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); |
5414 | } |
5415 | |
5416 | int qed_device_num_ports(struct qed_dev *cdev) |
5417 | { |
5418 | return cdev->num_ports; |
5419 | } |
5420 | |
5421 | void qed_set_fw_mac_addr(__le16 *fw_msb, |
5422 | __le16 *fw_mid, __le16 *fw_lsb, u8 *mac) |
5423 | { |
5424 | ((u8 *)fw_msb)[0] = mac[1]; |
5425 | ((u8 *)fw_msb)[1] = mac[0]; |
5426 | ((u8 *)fw_mid)[0] = mac[3]; |
5427 | ((u8 *)fw_mid)[1] = mac[2]; |
5428 | ((u8 *)fw_lsb)[0] = mac[5]; |
5429 | ((u8 *)fw_lsb)[1] = mac[4]; |
5430 | } |
5431 | |
5432 | static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid) |
5433 | { |
5434 | struct qed_llh_info *p_llh_info = cdev->p_llh_info; |
5435 | struct qed_llh_filter_info *p_filters; |
5436 | int rc; |
5437 | |
5438 | rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx: 0, action: "remove_all" ); |
5439 | if (rc) |
5440 | return rc; |
5441 | |
5442 | p_filters = p_llh_info->pp_filters[ppfid]; |
5443 | memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE * |
5444 | sizeof(*p_filters)); |
5445 | |
5446 | return 0; |
5447 | } |
5448 | |
5449 | static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid) |
5450 | { |
5451 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
5452 | struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); |
5453 | u8 filter_idx, abs_ppfid; |
5454 | int rc = 0; |
5455 | |
5456 | if (!p_ptt) |
5457 | return; |
5458 | |
5459 | if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && |
5460 | !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) |
5461 | goto out; |
5462 | |
5463 | rc = qed_llh_abs_ppfid(cdev, ppfid, p_abs_ppfid: &abs_ppfid); |
5464 | if (rc) |
5465 | goto out; |
5466 | |
5467 | rc = qed_llh_shadow_remove_all_filters(cdev, ppfid); |
5468 | if (rc) |
5469 | goto out; |
5470 | |
5471 | for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; |
5472 | filter_idx++) { |
5473 | rc = qed_llh_remove_filter(p_hwfn, p_ptt, |
5474 | abs_ppfid, filter_idx); |
5475 | if (rc) |
5476 | goto out; |
5477 | } |
5478 | out: |
5479 | qed_ptt_release(p_hwfn, p_ptt); |
5480 | } |
5481 | |
5482 | int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) |
5483 | { |
5484 | return qed_llh_add_protocol_filter(cdev, ppfid: 0, |
5485 | type: QED_LLH_FILTER_TCP_SRC_PORT, |
5486 | source_port_or_eth_type: src_port, QED_LLH_DONT_CARE); |
5487 | } |
5488 | |
5489 | void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) |
5490 | { |
5491 | qed_llh_remove_protocol_filter(cdev, ppfid: 0, |
5492 | type: QED_LLH_FILTER_TCP_SRC_PORT, |
5493 | source_port_or_eth_type: src_port, QED_LLH_DONT_CARE); |
5494 | } |
5495 | |
5496 | int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) |
5497 | { |
5498 | return qed_llh_add_protocol_filter(cdev, ppfid: 0, |
5499 | type: QED_LLH_FILTER_TCP_DEST_PORT, |
5500 | QED_LLH_DONT_CARE, dest_port); |
5501 | } |
5502 | |
5503 | void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) |
5504 | { |
5505 | qed_llh_remove_protocol_filter(cdev, ppfid: 0, |
5506 | type: QED_LLH_FILTER_TCP_DEST_PORT, |
5507 | QED_LLH_DONT_CARE, dest_port); |
5508 | } |
5509 | |
5510 | void qed_llh_clear_all_filters(struct qed_dev *cdev) |
5511 | { |
5512 | u8 ppfid; |
5513 | |
5514 | if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && |
5515 | !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) |
5516 | return; |
5517 | |
5518 | for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) |
5519 | qed_llh_clear_ppfid_filters(cdev, ppfid); |
5520 | } |
5521 | |