1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2016 Cavium, Inc. |
4 | */ |
5 | |
6 | #include <linux/device.h> |
7 | #include <linux/firmware.h> |
8 | #include <linux/interrupt.h> |
9 | #include <linux/module.h> |
10 | #include <linux/moduleparam.h> |
11 | #include <linux/pci.h> |
12 | #include <linux/printk.h> |
13 | |
14 | #include "cptpf.h" |
15 | |
16 | #define DRV_NAME "thunder-cpt" |
17 | #define DRV_VERSION "1.0" |
18 | |
19 | static u32 num_vfs = 4; /* Default 4 VF enabled */ |
20 | module_param(num_vfs, uint, 0444); |
21 | MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)" ); |
22 | |
23 | /* |
24 | * Disable cores specified by coremask |
25 | */ |
26 | static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask, |
27 | u8 type, u8 grp) |
28 | { |
29 | u64 pf_exe_ctl; |
30 | u32 timeout = 100; |
31 | u64 grpmask = 0; |
32 | struct device *dev = &cpt->pdev->dev; |
33 | |
34 | if (type == AE_TYPES) |
35 | coremask = (coremask << cpt->max_se_cores); |
36 | |
37 | /* Disengage the cores from groups */ |
38 | grpmask = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_GX_EN(0, grp)); |
39 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_GX_EN(0, grp), |
40 | val: (grpmask & ~coremask)); |
41 | udelay(CSR_DELAY); |
42 | grp = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); |
43 | while (grp & coremask) { |
44 | dev_err(dev, "Cores still busy %llx" , coremask); |
45 | grp = cpt_read_csr64(hw_addr: cpt->reg_base, |
46 | CPTX_PF_EXEC_BUSY(0)); |
47 | if (timeout--) |
48 | break; |
49 | |
50 | udelay(CSR_DELAY); |
51 | } |
52 | |
53 | /* Disable the cores */ |
54 | pf_exe_ctl = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXE_CTL(0)); |
55 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXE_CTL(0), |
56 | val: (pf_exe_ctl & ~coremask)); |
57 | udelay(CSR_DELAY); |
58 | } |
59 | |
60 | /* |
61 | * Enable cores specified by coremask |
62 | */ |
63 | static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask, |
64 | u8 type) |
65 | { |
66 | u64 pf_exe_ctl; |
67 | |
68 | if (type == AE_TYPES) |
69 | coremask = (coremask << cpt->max_se_cores); |
70 | |
71 | pf_exe_ctl = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXE_CTL(0)); |
72 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXE_CTL(0), |
73 | val: (pf_exe_ctl | coremask)); |
74 | udelay(CSR_DELAY); |
75 | } |
76 | |
77 | static void cpt_configure_group(struct cpt_device *cpt, u8 grp, |
78 | u64 coremask, u8 type) |
79 | { |
80 | u64 pf_gx_en = 0; |
81 | |
82 | if (type == AE_TYPES) |
83 | coremask = (coremask << cpt->max_se_cores); |
84 | |
85 | pf_gx_en = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_GX_EN(0, grp)); |
86 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_GX_EN(0, grp), |
87 | val: (pf_gx_en | coremask)); |
88 | udelay(CSR_DELAY); |
89 | } |
90 | |
91 | static void cpt_disable_mbox_interrupts(struct cpt_device *cpt) |
92 | { |
93 | /* Clear mbox(0) interupts for all vfs */ |
94 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), val: ~0ull); |
95 | } |
96 | |
97 | static void cpt_disable_ecc_interrupts(struct cpt_device *cpt) |
98 | { |
99 | /* Clear ecc(0) interupts for all vfs */ |
100 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), val: ~0ull); |
101 | } |
102 | |
103 | static void cpt_disable_exec_interrupts(struct cpt_device *cpt) |
104 | { |
105 | /* Clear exec interupts for all vfs */ |
106 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), val: ~0ull); |
107 | } |
108 | |
109 | static void cpt_disable_all_interrupts(struct cpt_device *cpt) |
110 | { |
111 | cpt_disable_mbox_interrupts(cpt); |
112 | cpt_disable_ecc_interrupts(cpt); |
113 | cpt_disable_exec_interrupts(cpt); |
114 | } |
115 | |
116 | static void cpt_enable_mbox_interrupts(struct cpt_device *cpt) |
117 | { |
118 | /* Set mbox(0) interupts for all vfs */ |
119 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), val: ~0ull); |
120 | } |
121 | |
122 | static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode) |
123 | { |
124 | int ret = 0, core = 0, shift = 0; |
125 | u32 total_cores = 0; |
126 | struct device *dev = &cpt->pdev->dev; |
127 | |
128 | if (!mcode || !mcode->code) { |
129 | dev_err(dev, "Either the mcode is null or data is NULL\n" ); |
130 | return -EINVAL; |
131 | } |
132 | |
133 | if (mcode->code_size == 0) { |
134 | dev_err(dev, "microcode size is 0\n" ); |
135 | return -EINVAL; |
136 | } |
137 | |
138 | /* Assumes 0-9 are SE cores for UCODE_BASE registers and |
139 | * AE core bases follow |
140 | */ |
141 | if (mcode->is_ae) { |
142 | core = CPT_MAX_SE_CORES; /* start couting from 10 */ |
143 | total_cores = CPT_MAX_TOTAL_CORES; /* upto 15 */ |
144 | } else { |
145 | core = 0; /* start couting from 0 */ |
146 | total_cores = CPT_MAX_SE_CORES; /* upto 9 */ |
147 | } |
148 | |
149 | /* Point to microcode for each core of the group */ |
150 | for (; core < total_cores ; core++, shift++) { |
151 | if (mcode->core_mask & (1 << shift)) { |
152 | cpt_write_csr64(hw_addr: cpt->reg_base, |
153 | CPTX_PF_ENGX_UCODE_BASE(0, core), |
154 | val: (u64)mcode->phys_base); |
155 | } |
156 | } |
157 | return ret; |
158 | } |
159 | |
160 | static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode) |
161 | { |
162 | int ret = 0; |
163 | struct device *dev = &cpt->pdev->dev; |
164 | |
165 | /* Make device not ready */ |
166 | cpt->flags &= ~CPT_FLAG_DEVICE_READY; |
167 | /* Disable All PF interrupts */ |
168 | cpt_disable_all_interrupts(cpt); |
169 | /* Calculate mcode group and coremasks */ |
170 | if (mcode->is_ae) { |
171 | if (mcode->num_cores > cpt->max_ae_cores) { |
172 | dev_err(dev, "Requested for more cores than available AE cores\n" ); |
173 | ret = -EINVAL; |
174 | goto cpt_init_fail; |
175 | } |
176 | |
177 | if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { |
178 | dev_err(dev, "Can't load, all eight microcode groups in use" ); |
179 | return -ENFILE; |
180 | } |
181 | |
182 | mcode->group = cpt->next_group; |
183 | /* Convert requested cores to mask */ |
184 | mcode->core_mask = GENMASK(mcode->num_cores, 0); |
185 | cpt_disable_cores(cpt, coremask: mcode->core_mask, type: AE_TYPES, |
186 | grp: mcode->group); |
187 | /* Load microcode for AE engines */ |
188 | ret = cpt_load_microcode(cpt, mcode); |
189 | if (ret) { |
190 | dev_err(dev, "Microcode load Failed for %s\n" , |
191 | mcode->version); |
192 | goto cpt_init_fail; |
193 | } |
194 | cpt->next_group++; |
195 | /* Configure group mask for the mcode */ |
196 | cpt_configure_group(cpt, grp: mcode->group, coremask: mcode->core_mask, |
197 | type: AE_TYPES); |
198 | /* Enable AE cores for the group mask */ |
199 | cpt_enable_cores(cpt, coremask: mcode->core_mask, type: AE_TYPES); |
200 | } else { |
201 | if (mcode->num_cores > cpt->max_se_cores) { |
202 | dev_err(dev, "Requested for more cores than available SE cores\n" ); |
203 | ret = -EINVAL; |
204 | goto cpt_init_fail; |
205 | } |
206 | if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { |
207 | dev_err(dev, "Can't load, all eight microcode groups in use" ); |
208 | return -ENFILE; |
209 | } |
210 | |
211 | mcode->group = cpt->next_group; |
212 | /* Covert requested cores to mask */ |
213 | mcode->core_mask = GENMASK(mcode->num_cores, 0); |
214 | cpt_disable_cores(cpt, coremask: mcode->core_mask, type: SE_TYPES, |
215 | grp: mcode->group); |
216 | /* Load microcode for SE engines */ |
217 | ret = cpt_load_microcode(cpt, mcode); |
218 | if (ret) { |
219 | dev_err(dev, "Microcode load Failed for %s\n" , |
220 | mcode->version); |
221 | goto cpt_init_fail; |
222 | } |
223 | cpt->next_group++; |
224 | /* Configure group mask for the mcode */ |
225 | cpt_configure_group(cpt, grp: mcode->group, coremask: mcode->core_mask, |
226 | type: SE_TYPES); |
227 | /* Enable SE cores for the group mask */ |
228 | cpt_enable_cores(cpt, coremask: mcode->core_mask, type: SE_TYPES); |
229 | } |
230 | |
231 | /* Enabled PF mailbox interrupts */ |
232 | cpt_enable_mbox_interrupts(cpt); |
233 | cpt->flags |= CPT_FLAG_DEVICE_READY; |
234 | |
235 | return ret; |
236 | |
237 | cpt_init_fail: |
238 | /* Enabled PF mailbox interrupts */ |
239 | cpt_enable_mbox_interrupts(cpt); |
240 | |
241 | return ret; |
242 | } |
243 | |
244 | struct { |
245 | u8 [CPT_UCODE_VERSION_SZ]; |
246 | __be32 ; |
247 | u32 ; |
248 | u64 ; |
249 | }; |
250 | |
251 | static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) |
252 | { |
253 | const struct firmware *fw_entry; |
254 | struct device *dev = &cpt->pdev->dev; |
255 | struct ucode_header *ucode; |
256 | unsigned int code_length; |
257 | struct microcode *mcode; |
258 | int j, ret = 0; |
259 | |
260 | ret = request_firmware(fw: &fw_entry, name: fw, device: dev); |
261 | if (ret) |
262 | return ret; |
263 | |
264 | ucode = (struct ucode_header *)fw_entry->data; |
265 | mcode = &cpt->mcode[cpt->next_mc_idx]; |
266 | memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); |
267 | code_length = ntohl(ucode->code_length); |
268 | if (code_length == 0 || code_length >= INT_MAX / 2) { |
269 | ret = -EINVAL; |
270 | goto fw_release; |
271 | } |
272 | mcode->code_size = code_length * 2; |
273 | |
274 | mcode->is_ae = is_ae; |
275 | mcode->core_mask = 0ULL; |
276 | mcode->num_cores = is_ae ? 6 : 10; |
277 | |
278 | /* Allocate DMAable space */ |
279 | mcode->code = dma_alloc_coherent(dev: &cpt->pdev->dev, size: mcode->code_size, |
280 | dma_handle: &mcode->phys_base, GFP_KERNEL); |
281 | if (!mcode->code) { |
282 | dev_err(dev, "Unable to allocate space for microcode" ); |
283 | ret = -ENOMEM; |
284 | goto fw_release; |
285 | } |
286 | |
287 | memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)), |
288 | mcode->code_size); |
289 | |
290 | /* Byte swap 64-bit */ |
291 | for (j = 0; j < (mcode->code_size / 8); j++) |
292 | ((__be64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]); |
293 | /* MC needs 16-bit swap */ |
294 | for (j = 0; j < (mcode->code_size / 2); j++) |
295 | ((__be16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]); |
296 | |
297 | dev_dbg(dev, "mcode->code_size = %u\n" , mcode->code_size); |
298 | dev_dbg(dev, "mcode->is_ae = %u\n" , mcode->is_ae); |
299 | dev_dbg(dev, "mcode->num_cores = %u\n" , mcode->num_cores); |
300 | dev_dbg(dev, "mcode->code = %llx\n" , (u64)mcode->code); |
301 | dev_dbg(dev, "mcode->phys_base = %llx\n" , mcode->phys_base); |
302 | |
303 | ret = do_cpt_init(cpt, mcode); |
304 | if (ret) { |
305 | dev_err(dev, "do_cpt_init failed with ret: %d\n" , ret); |
306 | goto fw_release; |
307 | } |
308 | |
309 | dev_info(dev, "Microcode Loaded %s\n" , mcode->version); |
310 | mcode->is_mc_valid = 1; |
311 | cpt->next_mc_idx++; |
312 | |
313 | fw_release: |
314 | release_firmware(fw: fw_entry); |
315 | |
316 | return ret; |
317 | } |
318 | |
319 | static int cpt_ucode_load(struct cpt_device *cpt) |
320 | { |
321 | int ret = 0; |
322 | struct device *dev = &cpt->pdev->dev; |
323 | |
324 | ret = cpt_ucode_load_fw(cpt, fw: "cpt8x-mc-ae.out" , is_ae: true); |
325 | if (ret) { |
326 | dev_err(dev, "ae:cpt_ucode_load failed with ret: %d\n" , ret); |
327 | return ret; |
328 | } |
329 | ret = cpt_ucode_load_fw(cpt, fw: "cpt8x-mc-se.out" , is_ae: false); |
330 | if (ret) { |
331 | dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n" , ret); |
332 | return ret; |
333 | } |
334 | |
335 | return ret; |
336 | } |
337 | |
338 | static irqreturn_t cpt_mbx0_intr_handler(int irq, void *cpt_irq) |
339 | { |
340 | struct cpt_device *cpt = (struct cpt_device *)cpt_irq; |
341 | |
342 | cpt_mbox_intr_handler(cpt, mbx: 0); |
343 | |
344 | return IRQ_HANDLED; |
345 | } |
346 | |
347 | static void cpt_reset(struct cpt_device *cpt) |
348 | { |
349 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_RESET(0), val: 1); |
350 | } |
351 | |
352 | static void cpt_find_max_enabled_cores(struct cpt_device *cpt) |
353 | { |
354 | union cptx_pf_constants pf_cnsts = {0}; |
355 | |
356 | pf_cnsts.u = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_CONSTANTS(0)); |
357 | cpt->max_se_cores = pf_cnsts.s.se; |
358 | cpt->max_ae_cores = pf_cnsts.s.ae; |
359 | } |
360 | |
361 | static u32 cpt_check_bist_status(struct cpt_device *cpt) |
362 | { |
363 | union cptx_pf_bist_status bist_sts = {0}; |
364 | |
365 | bist_sts.u = cpt_read_csr64(hw_addr: cpt->reg_base, |
366 | CPTX_PF_BIST_STATUS(0)); |
367 | |
368 | return bist_sts.u; |
369 | } |
370 | |
371 | static u64 cpt_check_exe_bist_status(struct cpt_device *cpt) |
372 | { |
373 | union cptx_pf_exe_bist_status bist_sts = {0}; |
374 | |
375 | bist_sts.u = cpt_read_csr64(hw_addr: cpt->reg_base, |
376 | CPTX_PF_EXE_BIST_STATUS(0)); |
377 | |
378 | return bist_sts.u; |
379 | } |
380 | |
381 | static void cpt_disable_all_cores(struct cpt_device *cpt) |
382 | { |
383 | u32 grp, timeout = 100; |
384 | struct device *dev = &cpt->pdev->dev; |
385 | |
386 | /* Disengage the cores from groups */ |
387 | for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) { |
388 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_GX_EN(0, grp), val: 0); |
389 | udelay(CSR_DELAY); |
390 | } |
391 | |
392 | grp = cpt_read_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); |
393 | while (grp) { |
394 | dev_err(dev, "Cores still busy" ); |
395 | grp = cpt_read_csr64(hw_addr: cpt->reg_base, |
396 | CPTX_PF_EXEC_BUSY(0)); |
397 | if (timeout--) |
398 | break; |
399 | |
400 | udelay(CSR_DELAY); |
401 | } |
402 | /* Disable the cores */ |
403 | cpt_write_csr64(hw_addr: cpt->reg_base, CPTX_PF_EXE_CTL(0), val: 0); |
404 | } |
405 | |
406 | /* |
407 | * Ensure all cores are disengaged from all groups by |
408 | * calling cpt_disable_all_cores() before calling this |
409 | * function. |
410 | */ |
411 | static void cpt_unload_microcode(struct cpt_device *cpt) |
412 | { |
413 | u32 grp = 0, core; |
414 | |
415 | /* Free microcode bases and reset group masks */ |
416 | for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) { |
417 | struct microcode *mcode = &cpt->mcode[grp]; |
418 | |
419 | if (cpt->mcode[grp].code) |
420 | dma_free_coherent(dev: &cpt->pdev->dev, size: mcode->code_size, |
421 | cpu_addr: mcode->code, dma_handle: mcode->phys_base); |
422 | mcode->code = NULL; |
423 | } |
424 | /* Clear UCODE_BASE registers for all engines */ |
425 | for (core = 0; core < CPT_MAX_TOTAL_CORES; core++) |
426 | cpt_write_csr64(hw_addr: cpt->reg_base, |
427 | CPTX_PF_ENGX_UCODE_BASE(0, core), val: 0ull); |
428 | } |
429 | |
430 | static int cpt_device_init(struct cpt_device *cpt) |
431 | { |
432 | u64 bist; |
433 | struct device *dev = &cpt->pdev->dev; |
434 | |
435 | /* Reset the PF when probed first */ |
436 | cpt_reset(cpt); |
437 | msleep(msecs: 100); |
438 | |
439 | /*Check BIST status*/ |
440 | bist = (u64)cpt_check_bist_status(cpt); |
441 | if (bist) { |
442 | dev_err(dev, "RAM BIST failed with code 0x%llx" , bist); |
443 | return -ENODEV; |
444 | } |
445 | |
446 | bist = cpt_check_exe_bist_status(cpt); |
447 | if (bist) { |
448 | dev_err(dev, "Engine BIST failed with code 0x%llx" , bist); |
449 | return -ENODEV; |
450 | } |
451 | |
452 | /*Get CLK frequency*/ |
453 | /*Get max enabled cores */ |
454 | cpt_find_max_enabled_cores(cpt); |
455 | /*Disable all cores*/ |
456 | cpt_disable_all_cores(cpt); |
457 | /*Reset device parameters*/ |
458 | cpt->next_mc_idx = 0; |
459 | cpt->next_group = 0; |
460 | /* PF is ready */ |
461 | cpt->flags |= CPT_FLAG_DEVICE_READY; |
462 | |
463 | return 0; |
464 | } |
465 | |
466 | static int cpt_register_interrupts(struct cpt_device *cpt) |
467 | { |
468 | int ret; |
469 | struct device *dev = &cpt->pdev->dev; |
470 | |
471 | /* Enable MSI-X */ |
472 | ret = pci_alloc_irq_vectors(dev: cpt->pdev, CPT_PF_MSIX_VECTORS, |
473 | CPT_PF_MSIX_VECTORS, PCI_IRQ_MSIX); |
474 | if (ret < 0) { |
475 | dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n" , |
476 | CPT_PF_MSIX_VECTORS); |
477 | return ret; |
478 | } |
479 | |
480 | /* Register mailbox interrupt handlers */ |
481 | ret = request_irq(irq: pci_irq_vector(dev: cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), |
482 | handler: cpt_mbx0_intr_handler, flags: 0, name: "CPT Mbox0" , dev: cpt); |
483 | if (ret) |
484 | goto fail; |
485 | |
486 | /* Enable mailbox interrupt */ |
487 | cpt_enable_mbox_interrupts(cpt); |
488 | return 0; |
489 | |
490 | fail: |
491 | dev_err(dev, "Request irq failed\n" ); |
492 | pci_disable_msix(dev: cpt->pdev); |
493 | return ret; |
494 | } |
495 | |
496 | static void cpt_unregister_interrupts(struct cpt_device *cpt) |
497 | { |
498 | free_irq(pci_irq_vector(dev: cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt); |
499 | pci_disable_msix(dev: cpt->pdev); |
500 | } |
501 | |
502 | static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs) |
503 | { |
504 | int pos = 0; |
505 | int err; |
506 | u16 total_vf_cnt; |
507 | struct pci_dev *pdev = cpt->pdev; |
508 | |
509 | pos = pci_find_ext_capability(dev: pdev, PCI_EXT_CAP_ID_SRIOV); |
510 | if (!pos) { |
511 | dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n" ); |
512 | return -ENODEV; |
513 | } |
514 | |
515 | cpt->num_vf_en = num_vfs; /* User requested VFs */ |
516 | pci_read_config_word(dev: pdev, where: (pos + PCI_SRIOV_TOTAL_VF), val: &total_vf_cnt); |
517 | if (total_vf_cnt < cpt->num_vf_en) |
518 | cpt->num_vf_en = total_vf_cnt; |
519 | |
520 | if (!total_vf_cnt) |
521 | return 0; |
522 | |
523 | /*Enabled the available VFs */ |
524 | err = pci_enable_sriov(dev: pdev, nr_virtfn: cpt->num_vf_en); |
525 | if (err) { |
526 | dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n" , |
527 | cpt->num_vf_en); |
528 | cpt->num_vf_en = 0; |
529 | return err; |
530 | } |
531 | |
532 | /* TODO: Optionally enable static VQ priorities feature */ |
533 | |
534 | dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n" , |
535 | cpt->num_vf_en); |
536 | |
537 | cpt->flags |= CPT_FLAG_SRIOV_ENABLED; |
538 | |
539 | return 0; |
540 | } |
541 | |
542 | static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
543 | { |
544 | struct device *dev = &pdev->dev; |
545 | struct cpt_device *cpt; |
546 | int err; |
547 | |
548 | if (num_vfs > 16 || num_vfs < 4) { |
549 | dev_warn(dev, "Invalid vf count %d, Resetting it to 4(default)\n" , |
550 | num_vfs); |
551 | num_vfs = 4; |
552 | } |
553 | |
554 | cpt = devm_kzalloc(dev, size: sizeof(*cpt), GFP_KERNEL); |
555 | if (!cpt) |
556 | return -ENOMEM; |
557 | |
558 | pci_set_drvdata(pdev, data: cpt); |
559 | cpt->pdev = pdev; |
560 | err = pci_enable_device(dev: pdev); |
561 | if (err) { |
562 | dev_err(dev, "Failed to enable PCI device\n" ); |
563 | pci_set_drvdata(pdev, NULL); |
564 | return err; |
565 | } |
566 | |
567 | err = pci_request_regions(pdev, DRV_NAME); |
568 | if (err) { |
569 | dev_err(dev, "PCI request regions failed 0x%x\n" , err); |
570 | goto cpt_err_disable_device; |
571 | } |
572 | |
573 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(48)); |
574 | if (err) { |
575 | dev_err(dev, "Unable to get usable 48-bit DMA configuration\n" ); |
576 | goto cpt_err_release_regions; |
577 | } |
578 | |
579 | /* MAP PF's configuration registers */ |
580 | cpt->reg_base = pcim_iomap(pdev, bar: 0, maxlen: 0); |
581 | if (!cpt->reg_base) { |
582 | dev_err(dev, "Cannot map config register space, aborting\n" ); |
583 | err = -ENOMEM; |
584 | goto cpt_err_release_regions; |
585 | } |
586 | |
587 | /* CPT device HW initialization */ |
588 | cpt_device_init(cpt); |
589 | |
590 | /* Register interrupts */ |
591 | err = cpt_register_interrupts(cpt); |
592 | if (err) |
593 | goto cpt_err_release_regions; |
594 | |
595 | err = cpt_ucode_load(cpt); |
596 | if (err) |
597 | goto cpt_err_unregister_interrupts; |
598 | |
599 | /* Configure SRIOV */ |
600 | err = cpt_sriov_init(cpt, num_vfs); |
601 | if (err) |
602 | goto cpt_err_unregister_interrupts; |
603 | |
604 | return 0; |
605 | |
606 | cpt_err_unregister_interrupts: |
607 | cpt_unregister_interrupts(cpt); |
608 | cpt_err_release_regions: |
609 | pci_release_regions(pdev); |
610 | cpt_err_disable_device: |
611 | pci_disable_device(dev: pdev); |
612 | pci_set_drvdata(pdev, NULL); |
613 | return err; |
614 | } |
615 | |
616 | static void cpt_remove(struct pci_dev *pdev) |
617 | { |
618 | struct cpt_device *cpt = pci_get_drvdata(pdev); |
619 | |
620 | /* Disengage SE and AE cores from all groups*/ |
621 | cpt_disable_all_cores(cpt); |
622 | /* Unload microcodes */ |
623 | cpt_unload_microcode(cpt); |
624 | cpt_unregister_interrupts(cpt); |
625 | pci_disable_sriov(dev: pdev); |
626 | pci_release_regions(pdev); |
627 | pci_disable_device(dev: pdev); |
628 | pci_set_drvdata(pdev, NULL); |
629 | } |
630 | |
631 | static void cpt_shutdown(struct pci_dev *pdev) |
632 | { |
633 | struct cpt_device *cpt = pci_get_drvdata(pdev); |
634 | |
635 | if (!cpt) |
636 | return; |
637 | |
638 | dev_info(&pdev->dev, "Shutdown device %x:%x.\n" , |
639 | (u32)pdev->vendor, (u32)pdev->device); |
640 | |
641 | cpt_unregister_interrupts(cpt); |
642 | pci_release_regions(pdev); |
643 | pci_disable_device(dev: pdev); |
644 | pci_set_drvdata(pdev, NULL); |
645 | } |
646 | |
647 | /* Supported devices */ |
648 | static const struct pci_device_id cpt_id_table[] = { |
649 | { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_PF_DEVICE_ID) }, |
650 | { 0, } /* end of table */ |
651 | }; |
652 | |
653 | static struct pci_driver cpt_pci_driver = { |
654 | .name = DRV_NAME, |
655 | .id_table = cpt_id_table, |
656 | .probe = cpt_probe, |
657 | .remove = cpt_remove, |
658 | .shutdown = cpt_shutdown, |
659 | }; |
660 | |
661 | module_pci_driver(cpt_pci_driver); |
662 | |
663 | MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>" ); |
664 | MODULE_DESCRIPTION("Cavium Thunder CPT Physical Function Driver" ); |
665 | MODULE_LICENSE("GPL v2" ); |
666 | MODULE_VERSION(DRV_VERSION); |
667 | MODULE_DEVICE_TABLE(pci, cpt_id_table); |
668 | |