1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // Copyright (c) 2017 Cadence |
3 | // Cadence PCIe endpoint controller driver. |
4 | // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> |
5 | |
6 | #include <linux/bitfield.h> |
7 | #include <linux/delay.h> |
8 | #include <linux/kernel.h> |
9 | #include <linux/of.h> |
10 | #include <linux/pci-epc.h> |
11 | #include <linux/platform_device.h> |
12 | #include <linux/sizes.h> |
13 | |
14 | #include "pcie-cadence.h" |
15 | |
16 | #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ |
17 | #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 |
18 | #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 |
19 | |
20 | static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn) |
21 | { |
22 | u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; |
23 | u32 first_vf_offset, stride; |
24 | |
25 | if (vfn == 0) |
26 | return fn; |
27 | |
28 | first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_SRIOV_VF_OFFSET); |
29 | stride = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_SRIOV_VF_STRIDE); |
30 | fn = fn + first_vf_offset + ((vfn - 1) * stride); |
31 | |
32 | return fn; |
33 | } |
34 | |
35 | static int (struct pci_epc *epc, u8 fn, u8 vfn, |
36 | struct pci_epf_header *hdr) |
37 | { |
38 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
39 | u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; |
40 | struct cdns_pcie *pcie = &ep->pcie; |
41 | u32 reg; |
42 | |
43 | if (vfn > 1) { |
44 | dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n" ); |
45 | return -EINVAL; |
46 | } else if (vfn == 1) { |
47 | reg = cap + PCI_SRIOV_VF_DID; |
48 | cdns_pcie_ep_fn_writew(pcie, fn, reg, value: hdr->deviceid); |
49 | return 0; |
50 | } |
51 | |
52 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, value: hdr->deviceid); |
53 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, value: hdr->revid); |
54 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, value: hdr->progif_code); |
55 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, |
56 | value: hdr->subclass_code | hdr->baseclass_code << 8); |
57 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, |
58 | value: hdr->cache_line_size); |
59 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, value: hdr->subsys_id); |
60 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, value: hdr->interrupt_pin); |
61 | |
62 | /* |
63 | * Vendor ID can only be modified from function 0, all other functions |
64 | * use the same vendor ID as function 0. |
65 | */ |
66 | if (fn == 0) { |
67 | /* Update the vendor IDs. */ |
68 | u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | |
69 | CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); |
70 | |
71 | cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, value: id); |
72 | } |
73 | |
74 | return 0; |
75 | } |
76 | |
77 | static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, u8 vfn, |
78 | struct pci_epf_bar *epf_bar) |
79 | { |
80 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
81 | struct cdns_pcie_epf *epf = &ep->epf[fn]; |
82 | struct cdns_pcie *pcie = &ep->pcie; |
83 | dma_addr_t bar_phys = epf_bar->phys_addr; |
84 | enum pci_barno bar = epf_bar->barno; |
85 | int flags = epf_bar->flags; |
86 | u32 addr0, addr1, reg, cfg, b, aperture, ctrl; |
87 | u64 sz; |
88 | |
89 | /* BAR size is 2^(aperture + 7) */ |
90 | sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); |
91 | /* |
92 | * roundup_pow_of_two() returns an unsigned long, which is not suited |
93 | * for 64bit values. |
94 | */ |
95 | sz = 1ULL << fls64(x: sz - 1); |
96 | aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ |
97 | |
98 | if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { |
99 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; |
100 | } else { |
101 | bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); |
102 | bool is_64bits = sz > SZ_2G; |
103 | |
104 | if (is_64bits && (bar & 1)) |
105 | return -EINVAL; |
106 | |
107 | if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) |
108 | epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; |
109 | |
110 | if (is_64bits && is_prefetch) |
111 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; |
112 | else if (is_prefetch) |
113 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; |
114 | else if (is_64bits) |
115 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; |
116 | else |
117 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; |
118 | } |
119 | |
120 | addr0 = lower_32_bits(bar_phys); |
121 | addr1 = upper_32_bits(bar_phys); |
122 | |
123 | if (vfn == 1) |
124 | reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); |
125 | else |
126 | reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); |
127 | b = (bar < BAR_4) ? bar : bar - BAR_4; |
128 | |
129 | if (vfn == 0 || vfn == 1) { |
130 | cfg = cdns_pcie_readl(pcie, reg); |
131 | cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | |
132 | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); |
133 | cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | |
134 | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); |
135 | cdns_pcie_writel(pcie, reg, value: cfg); |
136 | } |
137 | |
138 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
139 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), |
140 | value: addr0); |
141 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), |
142 | value: addr1); |
143 | |
144 | if (vfn > 0) |
145 | epf = &epf->epf[vfn - 1]; |
146 | epf->epf_bar[bar] = epf_bar; |
147 | |
148 | return 0; |
149 | } |
150 | |
151 | static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, |
152 | struct pci_epf_bar *epf_bar) |
153 | { |
154 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
155 | struct cdns_pcie_epf *epf = &ep->epf[fn]; |
156 | struct cdns_pcie *pcie = &ep->pcie; |
157 | enum pci_barno bar = epf_bar->barno; |
158 | u32 reg, cfg, b, ctrl; |
159 | |
160 | if (vfn == 1) |
161 | reg = CDNS_PCIE_LM_EP_VFUNC_BAR_CFG(bar, fn); |
162 | else |
163 | reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG(bar, fn); |
164 | b = (bar < BAR_4) ? bar : bar - BAR_4; |
165 | |
166 | if (vfn == 0 || vfn == 1) { |
167 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; |
168 | cfg = cdns_pcie_readl(pcie, reg); |
169 | cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | |
170 | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); |
171 | cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); |
172 | cdns_pcie_writel(pcie, reg, value: cfg); |
173 | } |
174 | |
175 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
176 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), value: 0); |
177 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), value: 0); |
178 | |
179 | if (vfn > 0) |
180 | epf = &epf->epf[vfn - 1]; |
181 | epf->epf_bar[bar] = NULL; |
182 | } |
183 | |
184 | static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, |
185 | phys_addr_t addr, u64 pci_addr, size_t size) |
186 | { |
187 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
188 | struct cdns_pcie *pcie = &ep->pcie; |
189 | u32 r; |
190 | |
191 | r = find_first_zero_bit(addr: &ep->ob_region_map, BITS_PER_LONG); |
192 | if (r >= ep->max_regions - 1) { |
193 | dev_err(&epc->dev, "no free outbound region\n" ); |
194 | return -EINVAL; |
195 | } |
196 | |
197 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
198 | cdns_pcie_set_outbound_region(pcie, busnr: 0, fn, r, is_io: false, cpu_addr: addr, pci_addr, size); |
199 | |
200 | set_bit(nr: r, addr: &ep->ob_region_map); |
201 | ep->ob_addr[r] = addr; |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, |
207 | phys_addr_t addr) |
208 | { |
209 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
210 | struct cdns_pcie *pcie = &ep->pcie; |
211 | u32 r; |
212 | |
213 | for (r = 0; r < ep->max_regions - 1; r++) |
214 | if (ep->ob_addr[r] == addr) |
215 | break; |
216 | |
217 | if (r == ep->max_regions - 1) |
218 | return; |
219 | |
220 | cdns_pcie_reset_outbound_region(pcie, r); |
221 | |
222 | ep->ob_addr[r] = 0; |
223 | clear_bit(nr: r, addr: &ep->ob_region_map); |
224 | } |
225 | |
226 | static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 mmc) |
227 | { |
228 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
229 | struct cdns_pcie *pcie = &ep->pcie; |
230 | u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; |
231 | u16 flags; |
232 | |
233 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
234 | |
235 | /* |
236 | * Set the Multiple Message Capable bitfield into the Message Control |
237 | * register. |
238 | */ |
239 | flags = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_MSI_FLAGS); |
240 | flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); |
241 | flags |= PCI_MSI_FLAGS_64BIT; |
242 | flags &= ~PCI_MSI_FLAGS_MASKBIT; |
243 | cdns_pcie_ep_fn_writew(pcie, fn, reg: cap + PCI_MSI_FLAGS, value: flags); |
244 | |
245 | return 0; |
246 | } |
247 | |
248 | static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn) |
249 | { |
250 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
251 | struct cdns_pcie *pcie = &ep->pcie; |
252 | u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; |
253 | u16 flags, mme; |
254 | |
255 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
256 | |
257 | /* Validate that the MSI feature is actually enabled. */ |
258 | flags = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_MSI_FLAGS); |
259 | if (!(flags & PCI_MSI_FLAGS_ENABLE)) |
260 | return -EINVAL; |
261 | |
262 | /* |
263 | * Get the Multiple Message Enable bitfield from the Message Control |
264 | * register. |
265 | */ |
266 | mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); |
267 | |
268 | return mme; |
269 | } |
270 | |
271 | static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) |
272 | { |
273 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
274 | struct cdns_pcie *pcie = &ep->pcie; |
275 | u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; |
276 | u32 val, reg; |
277 | |
278 | func_no = cdns_pcie_get_fn_from_vfn(pcie, fn: func_no, vfn: vfunc_no); |
279 | |
280 | reg = cap + PCI_MSIX_FLAGS; |
281 | val = cdns_pcie_ep_fn_readw(pcie, fn: func_no, reg); |
282 | if (!(val & PCI_MSIX_FLAGS_ENABLE)) |
283 | return -EINVAL; |
284 | |
285 | val &= PCI_MSIX_FLAGS_QSIZE; |
286 | |
287 | return val; |
288 | } |
289 | |
290 | static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn, |
291 | u16 interrupts, enum pci_barno bir, |
292 | u32 offset) |
293 | { |
294 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
295 | struct cdns_pcie *pcie = &ep->pcie; |
296 | u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; |
297 | u32 val, reg; |
298 | |
299 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
300 | |
301 | reg = cap + PCI_MSIX_FLAGS; |
302 | val = cdns_pcie_ep_fn_readw(pcie, fn, reg); |
303 | val &= ~PCI_MSIX_FLAGS_QSIZE; |
304 | val |= interrupts; |
305 | cdns_pcie_ep_fn_writew(pcie, fn, reg, value: val); |
306 | |
307 | /* Set MSIX BAR and offset */ |
308 | reg = cap + PCI_MSIX_TABLE; |
309 | val = offset | bir; |
310 | cdns_pcie_ep_fn_writel(pcie, fn, reg, value: val); |
311 | |
312 | /* Set PBA BAR and offset. BAR must match MSIX BAR */ |
313 | reg = cap + PCI_MSIX_PBA; |
314 | val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; |
315 | cdns_pcie_ep_fn_writel(pcie, fn, reg, value: val); |
316 | |
317 | return 0; |
318 | } |
319 | |
320 | static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, |
321 | bool is_asserted) |
322 | { |
323 | struct cdns_pcie *pcie = &ep->pcie; |
324 | unsigned long flags; |
325 | u32 offset; |
326 | u16 status; |
327 | u8 msg_code; |
328 | |
329 | intx &= 3; |
330 | |
331 | /* Set the outbound region if needed. */ |
332 | if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || |
333 | ep->irq_pci_fn != fn)) { |
334 | /* First region was reserved for IRQ writes. */ |
335 | cdns_pcie_set_outbound_region_for_normal_msg(pcie, busnr: 0, fn, r: 0, |
336 | cpu_addr: ep->irq_phys_addr); |
337 | ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; |
338 | ep->irq_pci_fn = fn; |
339 | } |
340 | |
341 | if (is_asserted) { |
342 | ep->irq_pending |= BIT(intx); |
343 | msg_code = MSG_CODE_ASSERT_INTA + intx; |
344 | } else { |
345 | ep->irq_pending &= ~BIT(intx); |
346 | msg_code = MSG_CODE_DEASSERT_INTA + intx; |
347 | } |
348 | |
349 | spin_lock_irqsave(&ep->lock, flags); |
350 | status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); |
351 | if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { |
352 | status ^= PCI_STATUS_INTERRUPT; |
353 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, value: status); |
354 | } |
355 | spin_unlock_irqrestore(lock: &ep->lock, flags); |
356 | |
357 | offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | |
358 | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | |
359 | CDNS_PCIE_MSG_NO_DATA; |
360 | writel(val: 0, addr: ep->irq_cpu_addr + offset); |
361 | } |
362 | |
363 | static int cdns_pcie_ep_send_intx_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, |
364 | u8 intx) |
365 | { |
366 | u16 cmd; |
367 | |
368 | cmd = cdns_pcie_ep_fn_readw(pcie: &ep->pcie, fn, PCI_COMMAND); |
369 | if (cmd & PCI_COMMAND_INTX_DISABLE) |
370 | return -EINVAL; |
371 | |
372 | cdns_pcie_ep_assert_intx(ep, fn, intx, is_asserted: true); |
373 | /* |
374 | * The mdelay() value was taken from dra7xx_pcie_raise_intx_irq() |
375 | */ |
376 | mdelay(1); |
377 | cdns_pcie_ep_assert_intx(ep, fn, intx, is_asserted: false); |
378 | return 0; |
379 | } |
380 | |
381 | static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, |
382 | u8 interrupt_num) |
383 | { |
384 | struct cdns_pcie *pcie = &ep->pcie; |
385 | u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; |
386 | u16 flags, mme, data, data_mask; |
387 | u8 msi_count; |
388 | u64 pci_addr, pci_addr_mask = 0xff; |
389 | |
390 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
391 | |
392 | /* Check whether the MSI feature has been enabled by the PCI host. */ |
393 | flags = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_MSI_FLAGS); |
394 | if (!(flags & PCI_MSI_FLAGS_ENABLE)) |
395 | return -EINVAL; |
396 | |
397 | /* Get the number of enabled MSIs */ |
398 | mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); |
399 | msi_count = 1 << mme; |
400 | if (!interrupt_num || interrupt_num > msi_count) |
401 | return -EINVAL; |
402 | |
403 | /* Compute the data value to be written. */ |
404 | data_mask = msi_count - 1; |
405 | data = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_MSI_DATA_64); |
406 | data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); |
407 | |
408 | /* Get the PCI address where to write the data into. */ |
409 | pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, reg: cap + PCI_MSI_ADDRESS_HI); |
410 | pci_addr <<= 32; |
411 | pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, reg: cap + PCI_MSI_ADDRESS_LO); |
412 | pci_addr &= GENMASK_ULL(63, 2); |
413 | |
414 | /* Set the outbound region if needed. */ |
415 | if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || |
416 | ep->irq_pci_fn != fn)) { |
417 | /* First region was reserved for IRQ writes. */ |
418 | cdns_pcie_set_outbound_region(pcie, busnr: 0, fn, r: 0, |
419 | is_io: false, |
420 | cpu_addr: ep->irq_phys_addr, |
421 | pci_addr: pci_addr & ~pci_addr_mask, |
422 | size: pci_addr_mask + 1); |
423 | ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); |
424 | ep->irq_pci_fn = fn; |
425 | } |
426 | writel(val: data, addr: ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); |
427 | |
428 | return 0; |
429 | } |
430 | |
431 | static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn, |
432 | phys_addr_t addr, u8 interrupt_num, |
433 | u32 entry_size, u32 *msi_data, |
434 | u32 *msi_addr_offset) |
435 | { |
436 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
437 | u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; |
438 | struct cdns_pcie *pcie = &ep->pcie; |
439 | u64 pci_addr, pci_addr_mask = 0xff; |
440 | u16 flags, mme, data, data_mask; |
441 | u8 msi_count; |
442 | int ret; |
443 | int i; |
444 | |
445 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
446 | |
447 | /* Check whether the MSI feature has been enabled by the PCI host. */ |
448 | flags = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_MSI_FLAGS); |
449 | if (!(flags & PCI_MSI_FLAGS_ENABLE)) |
450 | return -EINVAL; |
451 | |
452 | /* Get the number of enabled MSIs */ |
453 | mme = FIELD_GET(PCI_MSI_FLAGS_QSIZE, flags); |
454 | msi_count = 1 << mme; |
455 | if (!interrupt_num || interrupt_num > msi_count) |
456 | return -EINVAL; |
457 | |
458 | /* Compute the data value to be written. */ |
459 | data_mask = msi_count - 1; |
460 | data = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_MSI_DATA_64); |
461 | data = data & ~data_mask; |
462 | |
463 | /* Get the PCI address where to write the data into. */ |
464 | pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, reg: cap + PCI_MSI_ADDRESS_HI); |
465 | pci_addr <<= 32; |
466 | pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, reg: cap + PCI_MSI_ADDRESS_LO); |
467 | pci_addr &= GENMASK_ULL(63, 2); |
468 | |
469 | for (i = 0; i < interrupt_num; i++) { |
470 | ret = cdns_pcie_ep_map_addr(epc, fn, vfn, addr, |
471 | pci_addr: pci_addr & ~pci_addr_mask, |
472 | size: entry_size); |
473 | if (ret) |
474 | return ret; |
475 | addr = addr + entry_size; |
476 | } |
477 | |
478 | *msi_data = data; |
479 | *msi_addr_offset = pci_addr & pci_addr_mask; |
480 | |
481 | return 0; |
482 | } |
483 | |
484 | static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn, |
485 | u16 interrupt_num) |
486 | { |
487 | u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; |
488 | u32 tbl_offset, msg_data, reg; |
489 | struct cdns_pcie *pcie = &ep->pcie; |
490 | struct pci_epf_msix_tbl *msix_tbl; |
491 | struct cdns_pcie_epf *epf; |
492 | u64 pci_addr_mask = 0xff; |
493 | u64 msg_addr; |
494 | u16 flags; |
495 | u8 bir; |
496 | |
497 | epf = &ep->epf[fn]; |
498 | if (vfn > 0) |
499 | epf = &epf->epf[vfn - 1]; |
500 | |
501 | fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn); |
502 | |
503 | /* Check whether the MSI-X feature has been enabled by the PCI host. */ |
504 | flags = cdns_pcie_ep_fn_readw(pcie, fn, reg: cap + PCI_MSIX_FLAGS); |
505 | if (!(flags & PCI_MSIX_FLAGS_ENABLE)) |
506 | return -EINVAL; |
507 | |
508 | reg = cap + PCI_MSIX_TABLE; |
509 | tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); |
510 | bir = FIELD_GET(PCI_MSIX_TABLE_BIR, tbl_offset); |
511 | tbl_offset &= PCI_MSIX_TABLE_OFFSET; |
512 | |
513 | msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; |
514 | msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; |
515 | msg_data = msix_tbl[(interrupt_num - 1)].msg_data; |
516 | |
517 | /* Set the outbound region if needed. */ |
518 | if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || |
519 | ep->irq_pci_fn != fn) { |
520 | /* First region was reserved for IRQ writes. */ |
521 | cdns_pcie_set_outbound_region(pcie, busnr: 0, fn, r: 0, |
522 | is_io: false, |
523 | cpu_addr: ep->irq_phys_addr, |
524 | pci_addr: msg_addr & ~pci_addr_mask, |
525 | size: pci_addr_mask + 1); |
526 | ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); |
527 | ep->irq_pci_fn = fn; |
528 | } |
529 | writel(val: msg_data, addr: ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); |
530 | |
531 | return 0; |
532 | } |
533 | |
534 | static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, u8 vfn, |
535 | unsigned int type, u16 interrupt_num) |
536 | { |
537 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
538 | struct cdns_pcie *pcie = &ep->pcie; |
539 | struct device *dev = pcie->dev; |
540 | |
541 | switch (type) { |
542 | case PCI_IRQ_INTX: |
543 | if (vfn > 0) { |
544 | dev_err(dev, "Cannot raise INTX interrupts for VF\n" ); |
545 | return -EINVAL; |
546 | } |
547 | return cdns_pcie_ep_send_intx_irq(ep, fn, vfn, intx: 0); |
548 | |
549 | case PCI_IRQ_MSI: |
550 | return cdns_pcie_ep_send_msi_irq(ep, fn, vfn, interrupt_num); |
551 | |
552 | case PCI_IRQ_MSIX: |
553 | return cdns_pcie_ep_send_msix_irq(ep, fn, vfn, interrupt_num); |
554 | |
555 | default: |
556 | break; |
557 | } |
558 | |
559 | return -EINVAL; |
560 | } |
561 | |
562 | static int cdns_pcie_ep_start(struct pci_epc *epc) |
563 | { |
564 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); |
565 | struct cdns_pcie *pcie = &ep->pcie; |
566 | struct device *dev = pcie->dev; |
567 | int max_epfs = sizeof(epc->function_num_map) * 8; |
568 | int ret, epf, last_fn; |
569 | u32 reg, value; |
570 | |
571 | /* |
572 | * BIT(0) is hardwired to 1, hence function 0 is always enabled |
573 | * and can't be disabled anyway. |
574 | */ |
575 | cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, value: epc->function_num_map); |
576 | |
577 | /* |
578 | * Next function field in ARI_CAP_AND_CTR register for last function |
579 | * should be 0. |
580 | * Clearing Next Function Number field for the last function used. |
581 | */ |
582 | last_fn = find_last_bit(addr: &epc->function_num_map, BITS_PER_LONG); |
583 | reg = CDNS_PCIE_CORE_PF_I_ARI_CAP_AND_CTRL(last_fn); |
584 | value = cdns_pcie_readl(pcie, reg); |
585 | value &= ~CDNS_PCIE_ARI_CAP_NFN_MASK; |
586 | cdns_pcie_writel(pcie, reg, value); |
587 | |
588 | if (ep->quirk_disable_flr) { |
589 | for (epf = 0; epf < max_epfs; epf++) { |
590 | if (!(epc->function_num_map & BIT(epf))) |
591 | continue; |
592 | |
593 | value = cdns_pcie_ep_fn_readl(pcie, fn: epf, |
594 | CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + |
595 | PCI_EXP_DEVCAP); |
596 | value &= ~PCI_EXP_DEVCAP_FLR; |
597 | cdns_pcie_ep_fn_writel(pcie, fn: epf, |
598 | CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET + |
599 | PCI_EXP_DEVCAP, value); |
600 | } |
601 | } |
602 | |
603 | ret = cdns_pcie_start_link(pcie); |
604 | if (ret) { |
605 | dev_err(dev, "Failed to start link\n" ); |
606 | return ret; |
607 | } |
608 | |
609 | return 0; |
610 | } |
611 | |
612 | static const struct pci_epc_features cdns_pcie_epc_vf_features = { |
613 | .linkup_notifier = false, |
614 | .msi_capable = true, |
615 | .msix_capable = true, |
616 | .align = 65536, |
617 | }; |
618 | |
619 | static const struct pci_epc_features cdns_pcie_epc_features = { |
620 | .linkup_notifier = false, |
621 | .msi_capable = true, |
622 | .msix_capable = true, |
623 | .align = 256, |
624 | }; |
625 | |
626 | static const struct pci_epc_features* |
627 | cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no) |
628 | { |
629 | if (!vfunc_no) |
630 | return &cdns_pcie_epc_features; |
631 | |
632 | return &cdns_pcie_epc_vf_features; |
633 | } |
634 | |
635 | static const struct pci_epc_ops cdns_pcie_epc_ops = { |
636 | .write_header = cdns_pcie_ep_write_header, |
637 | .set_bar = cdns_pcie_ep_set_bar, |
638 | .clear_bar = cdns_pcie_ep_clear_bar, |
639 | .map_addr = cdns_pcie_ep_map_addr, |
640 | .unmap_addr = cdns_pcie_ep_unmap_addr, |
641 | .set_msi = cdns_pcie_ep_set_msi, |
642 | .get_msi = cdns_pcie_ep_get_msi, |
643 | .set_msix = cdns_pcie_ep_set_msix, |
644 | .get_msix = cdns_pcie_ep_get_msix, |
645 | .raise_irq = cdns_pcie_ep_raise_irq, |
646 | .map_msi_irq = cdns_pcie_ep_map_msi_irq, |
647 | .start = cdns_pcie_ep_start, |
648 | .get_features = cdns_pcie_ep_get_features, |
649 | }; |
650 | |
651 | |
652 | int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) |
653 | { |
654 | struct device *dev = ep->pcie.dev; |
655 | struct platform_device *pdev = to_platform_device(dev); |
656 | struct device_node *np = dev->of_node; |
657 | struct cdns_pcie *pcie = &ep->pcie; |
658 | struct cdns_pcie_epf *epf; |
659 | struct resource *res; |
660 | struct pci_epc *epc; |
661 | int ret; |
662 | int i; |
663 | |
664 | pcie->is_rc = false; |
665 | |
666 | pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, name: "reg" ); |
667 | if (IS_ERR(ptr: pcie->reg_base)) { |
668 | dev_err(dev, "missing \"reg\"\n" ); |
669 | return PTR_ERR(ptr: pcie->reg_base); |
670 | } |
671 | |
672 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem" ); |
673 | if (!res) { |
674 | dev_err(dev, "missing \"mem\"\n" ); |
675 | return -EINVAL; |
676 | } |
677 | pcie->mem_res = res; |
678 | |
679 | ep->max_regions = CDNS_PCIE_MAX_OB; |
680 | of_property_read_u32(np, propname: "cdns,max-outbound-regions" , out_value: &ep->max_regions); |
681 | |
682 | ep->ob_addr = devm_kcalloc(dev, |
683 | n: ep->max_regions, size: sizeof(*ep->ob_addr), |
684 | GFP_KERNEL); |
685 | if (!ep->ob_addr) |
686 | return -ENOMEM; |
687 | |
688 | /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ |
689 | cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); |
690 | |
691 | epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); |
692 | if (IS_ERR(ptr: epc)) { |
693 | dev_err(dev, "failed to create epc device\n" ); |
694 | return PTR_ERR(ptr: epc); |
695 | } |
696 | |
697 | epc_set_drvdata(epc, data: ep); |
698 | |
699 | if (of_property_read_u8(np, propname: "max-functions" , out_value: &epc->max_functions) < 0) |
700 | epc->max_functions = 1; |
701 | |
702 | ep->epf = devm_kcalloc(dev, n: epc->max_functions, size: sizeof(*ep->epf), |
703 | GFP_KERNEL); |
704 | if (!ep->epf) |
705 | return -ENOMEM; |
706 | |
707 | epc->max_vfs = devm_kcalloc(dev, n: epc->max_functions, |
708 | size: sizeof(*epc->max_vfs), GFP_KERNEL); |
709 | if (!epc->max_vfs) |
710 | return -ENOMEM; |
711 | |
712 | ret = of_property_read_u8_array(np, propname: "max-virtual-functions" , |
713 | out_values: epc->max_vfs, sz: epc->max_functions); |
714 | if (ret == 0) { |
715 | for (i = 0; i < epc->max_functions; i++) { |
716 | epf = &ep->epf[i]; |
717 | if (epc->max_vfs[i] == 0) |
718 | continue; |
719 | epf->epf = devm_kcalloc(dev, n: epc->max_vfs[i], |
720 | size: sizeof(*ep->epf), GFP_KERNEL); |
721 | if (!epf->epf) |
722 | return -ENOMEM; |
723 | } |
724 | } |
725 | |
726 | ret = pci_epc_mem_init(epc, base: pcie->mem_res->start, |
727 | size: resource_size(res: pcie->mem_res), PAGE_SIZE); |
728 | if (ret < 0) { |
729 | dev_err(dev, "failed to initialize the memory space\n" ); |
730 | return ret; |
731 | } |
732 | |
733 | ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, phys_addr: &ep->irq_phys_addr, |
734 | SZ_128K); |
735 | if (!ep->irq_cpu_addr) { |
736 | dev_err(dev, "failed to reserve memory space for MSI\n" ); |
737 | ret = -ENOMEM; |
738 | goto free_epc_mem; |
739 | } |
740 | ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; |
741 | /* Reserve region 0 for IRQs */ |
742 | set_bit(nr: 0, addr: &ep->ob_region_map); |
743 | |
744 | if (ep->quirk_detect_quiet_flag) |
745 | cdns_pcie_detect_quiet_min_delay_set(pcie: &ep->pcie); |
746 | |
747 | spin_lock_init(&ep->lock); |
748 | |
749 | return 0; |
750 | |
751 | free_epc_mem: |
752 | pci_epc_mem_exit(epc); |
753 | |
754 | return ret; |
755 | } |
756 | |