1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * MediaTek PCIe host controller driver. |
4 | * |
5 | * Copyright (c) 2020 MediaTek Inc. |
6 | * Author: Jianjun Wang <jianjun.wang@mediatek.com> |
7 | */ |
8 | |
9 | #include <linux/clk.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/iopoll.h> |
12 | #include <linux/irq.h> |
13 | #include <linux/irqchip/chained_irq.h> |
14 | #include <linux/irqdomain.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/module.h> |
17 | #include <linux/msi.h> |
18 | #include <linux/pci.h> |
19 | #include <linux/phy/phy.h> |
20 | #include <linux/platform_device.h> |
21 | #include <linux/pm_domain.h> |
22 | #include <linux/pm_runtime.h> |
23 | #include <linux/reset.h> |
24 | |
25 | #include "../pci.h" |
26 | |
27 | #define PCIE_SETTING_REG 0x80 |
28 | #define PCIE_PCI_IDS_1 0x9c |
29 | #define PCI_CLASS(class) (class << 8) |
30 | #define PCIE_RC_MODE BIT(0) |
31 | |
32 | #define PCIE_CFGNUM_REG 0x140 |
33 | #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0)) |
34 | #define PCIE_CFG_BUS(bus) (((bus) << 8) & GENMASK(15, 8)) |
35 | #define PCIE_CFG_BYTE_EN(bytes) (((bytes) << 16) & GENMASK(19, 16)) |
36 | #define PCIE_CFG_FORCE_BYTE_EN BIT(20) |
37 | #define PCIE_CFG_OFFSET_ADDR 0x1000 |
38 | #define (bus, devfn) \ |
39 | (PCIE_CFG_BUS(bus) | PCIE_CFG_DEVFN(devfn)) |
40 | |
41 | #define PCIE_RST_CTRL_REG 0x148 |
42 | #define PCIE_MAC_RSTB BIT(0) |
43 | #define PCIE_PHY_RSTB BIT(1) |
44 | #define PCIE_BRG_RSTB BIT(2) |
45 | #define PCIE_PE_RSTB BIT(3) |
46 | |
47 | #define PCIE_LTSSM_STATUS_REG 0x150 |
48 | #define PCIE_LTSSM_STATE_MASK GENMASK(28, 24) |
49 | #define PCIE_LTSSM_STATE(val) ((val & PCIE_LTSSM_STATE_MASK) >> 24) |
50 | #define PCIE_LTSSM_STATE_L2_IDLE 0x14 |
51 | |
52 | #define PCIE_LINK_STATUS_REG 0x154 |
53 | #define PCIE_PORT_LINKUP BIT(8) |
54 | |
55 | #define PCIE_MSI_SET_NUM 8 |
56 | #define PCIE_MSI_IRQS_PER_SET 32 |
57 | #define PCIE_MSI_IRQS_NUM \ |
58 | (PCIE_MSI_IRQS_PER_SET * PCIE_MSI_SET_NUM) |
59 | |
60 | #define PCIE_INT_ENABLE_REG 0x180 |
61 | #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8) |
62 | #define PCIE_MSI_SHIFT 8 |
63 | #define PCIE_INTX_SHIFT 24 |
64 | #define PCIE_INTX_ENABLE \ |
65 | GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT) |
66 | |
67 | #define PCIE_INT_STATUS_REG 0x184 |
68 | #define PCIE_MSI_SET_ENABLE_REG 0x190 |
69 | #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0) |
70 | |
71 | #define PCIE_MSI_SET_BASE_REG 0xc00 |
72 | #define PCIE_MSI_SET_OFFSET 0x10 |
73 | #define PCIE_MSI_SET_STATUS_OFFSET 0x04 |
74 | #define PCIE_MSI_SET_ENABLE_OFFSET 0x08 |
75 | |
76 | #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80 |
77 | #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04 |
78 | |
79 | #define PCIE_ICMD_PM_REG 0x198 |
80 | #define PCIE_TURN_OFF_LINK BIT(4) |
81 | |
82 | #define PCIE_MISC_CTRL_REG 0x348 |
83 | #define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1) |
84 | |
85 | #define PCIE_TRANS_TABLE_BASE_REG 0x800 |
86 | #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4 |
87 | #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8 |
88 | #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc |
89 | #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10 |
90 | #define PCIE_ATR_TLB_SET_OFFSET 0x20 |
91 | |
92 | #define PCIE_MAX_TRANS_TABLES 8 |
93 | #define PCIE_ATR_EN BIT(0) |
94 | #define PCIE_ATR_SIZE(size) \ |
95 | (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN) |
96 | #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0)) |
97 | #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0) |
98 | #define PCIE_ATR_TYPE_IO PCIE_ATR_ID(1) |
99 | #define PCIE_ATR_TLP_TYPE(type) (((type) << 16) & GENMASK(18, 16)) |
100 | #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0) |
101 | #define PCIE_ATR_TLP_TYPE_IO PCIE_ATR_TLP_TYPE(2) |
102 | |
103 | /** |
104 | * struct mtk_msi_set - MSI information for each set |
105 | * @base: IO mapped register base |
106 | * @msg_addr: MSI message address |
107 | * @saved_irq_state: IRQ enable state saved at suspend time |
108 | */ |
109 | struct mtk_msi_set { |
110 | void __iomem *base; |
111 | phys_addr_t msg_addr; |
112 | u32 saved_irq_state; |
113 | }; |
114 | |
115 | /** |
116 | * struct mtk_gen3_pcie - PCIe port information |
117 | * @dev: pointer to PCIe device |
118 | * @base: IO mapped register base |
119 | * @reg_base: physical register base |
120 | * @mac_reset: MAC reset control |
121 | * @phy_reset: PHY reset control |
122 | * @phy: PHY controller block |
123 | * @clks: PCIe clocks |
124 | * @num_clks: PCIe clocks count for this port |
125 | * @irq: PCIe controller interrupt number |
126 | * @saved_irq_state: IRQ enable state saved at suspend time |
127 | * @irq_lock: lock protecting IRQ register access |
128 | * @intx_domain: legacy INTx IRQ domain |
129 | * @msi_domain: MSI IRQ domain |
130 | * @msi_bottom_domain: MSI IRQ bottom domain |
131 | * @msi_sets: MSI sets information |
132 | * @lock: lock protecting IRQ bit map |
133 | * @msi_irq_in_use: bit map for assigned MSI IRQ |
134 | */ |
135 | struct mtk_gen3_pcie { |
136 | struct device *dev; |
137 | void __iomem *base; |
138 | phys_addr_t reg_base; |
139 | struct reset_control *mac_reset; |
140 | struct reset_control *phy_reset; |
141 | struct phy *phy; |
142 | struct clk_bulk_data *clks; |
143 | int num_clks; |
144 | |
145 | int irq; |
146 | u32 saved_irq_state; |
147 | raw_spinlock_t irq_lock; |
148 | struct irq_domain *intx_domain; |
149 | struct irq_domain *msi_domain; |
150 | struct irq_domain *msi_bottom_domain; |
151 | struct mtk_msi_set msi_sets[PCIE_MSI_SET_NUM]; |
152 | struct mutex lock; |
153 | DECLARE_BITMAP(msi_irq_in_use, PCIE_MSI_IRQS_NUM); |
154 | }; |
155 | |
156 | /* LTSSM state in PCIE_LTSSM_STATUS_REG bit[28:24] */ |
157 | static const char *const ltssm_str[] = { |
158 | "detect.quiet" , /* 0x00 */ |
159 | "detect.active" , /* 0x01 */ |
160 | "polling.active" , /* 0x02 */ |
161 | "polling.compliance" , /* 0x03 */ |
162 | "polling.configuration" , /* 0x04 */ |
163 | "config.linkwidthstart" , /* 0x05 */ |
164 | "config.linkwidthaccept" , /* 0x06 */ |
165 | "config.lanenumwait" , /* 0x07 */ |
166 | "config.lanenumaccept" , /* 0x08 */ |
167 | "config.complete" , /* 0x09 */ |
168 | "config.idle" , /* 0x0A */ |
169 | "recovery.receiverlock" , /* 0x0B */ |
170 | "recovery.equalization" , /* 0x0C */ |
171 | "recovery.speed" , /* 0x0D */ |
172 | "recovery.receiverconfig" , /* 0x0E */ |
173 | "recovery.idle" , /* 0x0F */ |
174 | "L0" , /* 0x10 */ |
175 | "L0s" , /* 0x11 */ |
176 | "L1.entry" , /* 0x12 */ |
177 | "L1.idle" , /* 0x13 */ |
178 | "L2.idle" , /* 0x14 */ |
179 | "L2.transmitwake" , /* 0x15 */ |
180 | "disable" , /* 0x16 */ |
181 | "loopback.entry" , /* 0x17 */ |
182 | "loopback.active" , /* 0x18 */ |
183 | "loopback.exit" , /* 0x19 */ |
184 | "hotreset" , /* 0x1A */ |
185 | }; |
186 | |
187 | /** |
188 | * mtk_pcie_config_tlp_header() - Configure a configuration TLP header |
189 | * @bus: PCI bus to query |
190 | * @devfn: device/function number |
191 | * @where: offset in config space |
192 | * @size: data size in TLP header |
193 | * |
194 | * Set byte enable field and device information in configuration TLP header. |
195 | */ |
196 | static void (struct pci_bus *bus, unsigned int devfn, |
197 | int where, int size) |
198 | { |
199 | struct mtk_gen3_pcie *pcie = bus->sysdata; |
200 | int bytes; |
201 | u32 val; |
202 | |
203 | bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3); |
204 | |
205 | val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) | |
206 | PCIE_CFG_HEADER(bus->number, devfn); |
207 | |
208 | writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG); |
209 | } |
210 | |
211 | static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, |
212 | int where) |
213 | { |
214 | struct mtk_gen3_pcie *pcie = bus->sysdata; |
215 | |
216 | return pcie->base + PCIE_CFG_OFFSET_ADDR + where; |
217 | } |
218 | |
219 | static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, |
220 | int where, int size, u32 *val) |
221 | { |
222 | mtk_pcie_config_tlp_header(bus, devfn, where, size); |
223 | |
224 | return pci_generic_config_read32(bus, devfn, where, size, val); |
225 | } |
226 | |
227 | static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, |
228 | int where, int size, u32 val) |
229 | { |
230 | mtk_pcie_config_tlp_header(bus, devfn, where, size); |
231 | |
232 | if (size <= 2) |
233 | val <<= (where & 0x3) * 8; |
234 | |
235 | return pci_generic_config_write32(bus, devfn, where, size: 4, val); |
236 | } |
237 | |
238 | static struct pci_ops mtk_pcie_ops = { |
239 | .map_bus = mtk_pcie_map_bus, |
240 | .read = mtk_pcie_config_read, |
241 | .write = mtk_pcie_config_write, |
242 | }; |
243 | |
244 | static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, |
245 | resource_size_t cpu_addr, |
246 | resource_size_t pci_addr, |
247 | resource_size_t size, |
248 | unsigned long type, int *num) |
249 | { |
250 | resource_size_t remaining = size; |
251 | resource_size_t table_size; |
252 | resource_size_t addr_align; |
253 | const char *range_type; |
254 | void __iomem *table; |
255 | u32 val; |
256 | |
257 | while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) { |
258 | /* Table size needs to be a power of 2 */ |
259 | table_size = BIT(fls(remaining) - 1); |
260 | |
261 | if (cpu_addr > 0) { |
262 | addr_align = BIT(ffs(cpu_addr) - 1); |
263 | table_size = min(table_size, addr_align); |
264 | } |
265 | |
266 | /* Minimum size of translate table is 4KiB */ |
267 | if (table_size < 0x1000) { |
268 | dev_err(pcie->dev, "illegal table size %#llx\n" , |
269 | (unsigned long long)table_size); |
270 | return -EINVAL; |
271 | } |
272 | |
273 | table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET; |
274 | writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table); |
275 | writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); |
276 | writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); |
277 | writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); |
278 | |
279 | if (type == IORESOURCE_IO) { |
280 | val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; |
281 | range_type = "IO" ; |
282 | } else { |
283 | val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; |
284 | range_type = "MEM" ; |
285 | } |
286 | |
287 | writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); |
288 | |
289 | dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n" , |
290 | range_type, *num, (unsigned long long)cpu_addr, |
291 | (unsigned long long)pci_addr, (unsigned long long)table_size); |
292 | |
293 | cpu_addr += table_size; |
294 | pci_addr += table_size; |
295 | remaining -= table_size; |
296 | (*num)++; |
297 | } |
298 | |
299 | if (remaining) |
300 | dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n" , |
301 | (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); |
302 | |
303 | return 0; |
304 | } |
305 | |
306 | static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie) |
307 | { |
308 | int i; |
309 | u32 val; |
310 | |
311 | for (i = 0; i < PCIE_MSI_SET_NUM; i++) { |
312 | struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; |
313 | |
314 | msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG + |
315 | i * PCIE_MSI_SET_OFFSET; |
316 | msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG + |
317 | i * PCIE_MSI_SET_OFFSET; |
318 | |
319 | /* Configure the MSI capture address */ |
320 | writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base); |
321 | writel_relaxed(upper_32_bits(msi_set->msg_addr), |
322 | pcie->base + PCIE_MSI_SET_ADDR_HI_BASE + |
323 | i * PCIE_MSI_SET_ADDR_HI_OFFSET); |
324 | } |
325 | |
326 | val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG); |
327 | val |= PCIE_MSI_SET_ENABLE; |
328 | writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG); |
329 | |
330 | val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); |
331 | val |= PCIE_MSI_ENABLE; |
332 | writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); |
333 | } |
334 | |
335 | static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) |
336 | { |
337 | struct resource_entry *entry; |
338 | struct pci_host_bridge *host = pci_host_bridge_from_priv(priv: pcie); |
339 | unsigned int table_index = 0; |
340 | int err; |
341 | u32 val; |
342 | |
343 | /* Set as RC mode */ |
344 | val = readl_relaxed(pcie->base + PCIE_SETTING_REG); |
345 | val |= PCIE_RC_MODE; |
346 | writel_relaxed(val, pcie->base + PCIE_SETTING_REG); |
347 | |
348 | /* Set class code */ |
349 | val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); |
350 | val &= ~GENMASK(31, 8); |
351 | val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL); |
352 | writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); |
353 | |
354 | /* Mask all INTx interrupts */ |
355 | val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); |
356 | val &= ~PCIE_INTX_ENABLE; |
357 | writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); |
358 | |
359 | /* Disable DVFSRC voltage request */ |
360 | val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG); |
361 | val |= PCIE_DISABLE_DVFSRC_VLT_REQ; |
362 | writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); |
363 | |
364 | /* Assert all reset signals */ |
365 | val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); |
366 | val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB; |
367 | writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); |
368 | |
369 | /* |
370 | * Described in PCIe CEM specification sections 2.2 (PERST# Signal) |
371 | * and 2.2.1 (Initial Power-Up (G3 to S0)). |
372 | * The deassertion of PERST# should be delayed 100ms (TPVPERL) |
373 | * for the power and clock to become stable. |
374 | */ |
375 | msleep(msecs: 100); |
376 | |
377 | /* De-assert reset signals */ |
378 | val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB); |
379 | writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); |
380 | |
381 | /* Check if the link is up or not */ |
382 | err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, |
383 | !!(val & PCIE_PORT_LINKUP), 20, |
384 | PCI_PM_D3COLD_WAIT * USEC_PER_MSEC); |
385 | if (err) { |
386 | const char *ltssm_state; |
387 | int ltssm_index; |
388 | |
389 | val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); |
390 | ltssm_index = PCIE_LTSSM_STATE(val); |
391 | ltssm_state = ltssm_index >= ARRAY_SIZE(ltssm_str) ? |
392 | "Unknown state" : ltssm_str[ltssm_index]; |
393 | dev_err(pcie->dev, |
394 | "PCIe link down, current LTSSM state: %s (%#x)\n" , |
395 | ltssm_state, val); |
396 | return err; |
397 | } |
398 | |
399 | mtk_pcie_enable_msi(pcie); |
400 | |
401 | /* Set PCIe translation windows */ |
402 | resource_list_for_each_entry(entry, &host->windows) { |
403 | struct resource *res = entry->res; |
404 | unsigned long type = resource_type(res); |
405 | resource_size_t cpu_addr; |
406 | resource_size_t pci_addr; |
407 | resource_size_t size; |
408 | |
409 | if (type == IORESOURCE_IO) |
410 | cpu_addr = pci_pio_to_address(pio: res->start); |
411 | else if (type == IORESOURCE_MEM) |
412 | cpu_addr = res->start; |
413 | else |
414 | continue; |
415 | |
416 | pci_addr = res->start - entry->offset; |
417 | size = resource_size(res); |
418 | err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, |
419 | type, num: &table_index); |
420 | if (err) |
421 | return err; |
422 | } |
423 | |
424 | return 0; |
425 | } |
426 | |
427 | static int mtk_pcie_set_affinity(struct irq_data *data, |
428 | const struct cpumask *mask, bool force) |
429 | { |
430 | return -EINVAL; |
431 | } |
432 | |
433 | static void mtk_pcie_msi_irq_mask(struct irq_data *data) |
434 | { |
435 | pci_msi_mask_irq(data); |
436 | irq_chip_mask_parent(data); |
437 | } |
438 | |
439 | static void mtk_pcie_msi_irq_unmask(struct irq_data *data) |
440 | { |
441 | pci_msi_unmask_irq(data); |
442 | irq_chip_unmask_parent(data); |
443 | } |
444 | |
445 | static struct irq_chip mtk_msi_irq_chip = { |
446 | .irq_ack = irq_chip_ack_parent, |
447 | .irq_mask = mtk_pcie_msi_irq_mask, |
448 | .irq_unmask = mtk_pcie_msi_irq_unmask, |
449 | .name = "MSI" , |
450 | }; |
451 | |
452 | static struct msi_domain_info mtk_msi_domain_info = { |
453 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
454 | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), |
455 | .chip = &mtk_msi_irq_chip, |
456 | }; |
457 | |
458 | static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
459 | { |
460 | struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(d: data); |
461 | struct mtk_gen3_pcie *pcie = data->domain->host_data; |
462 | unsigned long hwirq; |
463 | |
464 | hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; |
465 | |
466 | msg->address_hi = upper_32_bits(msi_set->msg_addr); |
467 | msg->address_lo = lower_32_bits(msi_set->msg_addr); |
468 | msg->data = hwirq; |
469 | dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n" , |
470 | hwirq, msg->address_hi, msg->address_lo, msg->data); |
471 | } |
472 | |
473 | static void mtk_msi_bottom_irq_ack(struct irq_data *data) |
474 | { |
475 | struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(d: data); |
476 | unsigned long hwirq; |
477 | |
478 | hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; |
479 | |
480 | writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET); |
481 | } |
482 | |
483 | static void mtk_msi_bottom_irq_mask(struct irq_data *data) |
484 | { |
485 | struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(d: data); |
486 | struct mtk_gen3_pcie *pcie = data->domain->host_data; |
487 | unsigned long hwirq, flags; |
488 | u32 val; |
489 | |
490 | hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; |
491 | |
492 | raw_spin_lock_irqsave(&pcie->irq_lock, flags); |
493 | val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); |
494 | val &= ~BIT(hwirq); |
495 | writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); |
496 | raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); |
497 | } |
498 | |
499 | static void mtk_msi_bottom_irq_unmask(struct irq_data *data) |
500 | { |
501 | struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(d: data); |
502 | struct mtk_gen3_pcie *pcie = data->domain->host_data; |
503 | unsigned long hwirq, flags; |
504 | u32 val; |
505 | |
506 | hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; |
507 | |
508 | raw_spin_lock_irqsave(&pcie->irq_lock, flags); |
509 | val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); |
510 | val |= BIT(hwirq); |
511 | writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); |
512 | raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); |
513 | } |
514 | |
515 | static struct irq_chip mtk_msi_bottom_irq_chip = { |
516 | .irq_ack = mtk_msi_bottom_irq_ack, |
517 | .irq_mask = mtk_msi_bottom_irq_mask, |
518 | .irq_unmask = mtk_msi_bottom_irq_unmask, |
519 | .irq_compose_msi_msg = mtk_compose_msi_msg, |
520 | .irq_set_affinity = mtk_pcie_set_affinity, |
521 | .name = "MSI" , |
522 | }; |
523 | |
524 | static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain, |
525 | unsigned int virq, unsigned int nr_irqs, |
526 | void *arg) |
527 | { |
528 | struct mtk_gen3_pcie *pcie = domain->host_data; |
529 | struct mtk_msi_set *msi_set; |
530 | int i, hwirq, set_idx; |
531 | |
532 | mutex_lock(&pcie->lock); |
533 | |
534 | hwirq = bitmap_find_free_region(bitmap: pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM, |
535 | order_base_2(nr_irqs)); |
536 | |
537 | mutex_unlock(lock: &pcie->lock); |
538 | |
539 | if (hwirq < 0) |
540 | return -ENOSPC; |
541 | |
542 | set_idx = hwirq / PCIE_MSI_IRQS_PER_SET; |
543 | msi_set = &pcie->msi_sets[set_idx]; |
544 | |
545 | for (i = 0; i < nr_irqs; i++) |
546 | irq_domain_set_info(domain, virq: virq + i, hwirq: hwirq + i, |
547 | chip: &mtk_msi_bottom_irq_chip, chip_data: msi_set, |
548 | handler: handle_edge_irq, NULL, NULL); |
549 | |
550 | return 0; |
551 | } |
552 | |
553 | static void mtk_msi_bottom_domain_free(struct irq_domain *domain, |
554 | unsigned int virq, unsigned int nr_irqs) |
555 | { |
556 | struct mtk_gen3_pcie *pcie = domain->host_data; |
557 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); |
558 | |
559 | mutex_lock(&pcie->lock); |
560 | |
561 | bitmap_release_region(bitmap: pcie->msi_irq_in_use, pos: data->hwirq, |
562 | order_base_2(nr_irqs)); |
563 | |
564 | mutex_unlock(lock: &pcie->lock); |
565 | |
566 | irq_domain_free_irqs_common(domain, virq, nr_irqs); |
567 | } |
568 | |
569 | static const struct irq_domain_ops mtk_msi_bottom_domain_ops = { |
570 | .alloc = mtk_msi_bottom_domain_alloc, |
571 | .free = mtk_msi_bottom_domain_free, |
572 | }; |
573 | |
574 | static void mtk_intx_mask(struct irq_data *data) |
575 | { |
576 | struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(d: data); |
577 | unsigned long flags; |
578 | u32 val; |
579 | |
580 | raw_spin_lock_irqsave(&pcie->irq_lock, flags); |
581 | val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); |
582 | val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT); |
583 | writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); |
584 | raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); |
585 | } |
586 | |
587 | static void mtk_intx_unmask(struct irq_data *data) |
588 | { |
589 | struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(d: data); |
590 | unsigned long flags; |
591 | u32 val; |
592 | |
593 | raw_spin_lock_irqsave(&pcie->irq_lock, flags); |
594 | val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); |
595 | val |= BIT(data->hwirq + PCIE_INTX_SHIFT); |
596 | writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); |
597 | raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); |
598 | } |
599 | |
600 | /** |
601 | * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt |
602 | * @data: pointer to chip specific data |
603 | * |
604 | * As an emulated level IRQ, its interrupt status will remain |
605 | * until the corresponding de-assert message is received; hence that |
606 | * the status can only be cleared when the interrupt has been serviced. |
607 | */ |
608 | static void mtk_intx_eoi(struct irq_data *data) |
609 | { |
610 | struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(d: data); |
611 | unsigned long hwirq; |
612 | |
613 | hwirq = data->hwirq + PCIE_INTX_SHIFT; |
614 | writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG); |
615 | } |
616 | |
617 | static struct irq_chip mtk_intx_irq_chip = { |
618 | .irq_mask = mtk_intx_mask, |
619 | .irq_unmask = mtk_intx_unmask, |
620 | .irq_eoi = mtk_intx_eoi, |
621 | .irq_set_affinity = mtk_pcie_set_affinity, |
622 | .name = "INTx" , |
623 | }; |
624 | |
625 | static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, |
626 | irq_hw_number_t hwirq) |
627 | { |
628 | irq_set_chip_data(irq, data: domain->host_data); |
629 | irq_set_chip_and_handler_name(irq, chip: &mtk_intx_irq_chip, |
630 | handle: handle_fasteoi_irq, name: "INTx" ); |
631 | return 0; |
632 | } |
633 | |
634 | static const struct irq_domain_ops intx_domain_ops = { |
635 | .map = mtk_pcie_intx_map, |
636 | }; |
637 | |
638 | static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) |
639 | { |
640 | struct device *dev = pcie->dev; |
641 | struct device_node *intc_node, *node = dev->of_node; |
642 | int ret; |
643 | |
644 | raw_spin_lock_init(&pcie->irq_lock); |
645 | |
646 | /* Setup INTx */ |
647 | intc_node = of_get_child_by_name(node, name: "interrupt-controller" ); |
648 | if (!intc_node) { |
649 | dev_err(dev, "missing interrupt-controller node\n" ); |
650 | return -ENODEV; |
651 | } |
652 | |
653 | pcie->intx_domain = irq_domain_add_linear(of_node: intc_node, PCI_NUM_INTX, |
654 | ops: &intx_domain_ops, host_data: pcie); |
655 | if (!pcie->intx_domain) { |
656 | dev_err(dev, "failed to create INTx IRQ domain\n" ); |
657 | ret = -ENODEV; |
658 | goto out_put_node; |
659 | } |
660 | |
661 | /* Setup MSI */ |
662 | mutex_init(&pcie->lock); |
663 | |
664 | pcie->msi_bottom_domain = irq_domain_add_linear(of_node: node, PCIE_MSI_IRQS_NUM, |
665 | ops: &mtk_msi_bottom_domain_ops, host_data: pcie); |
666 | if (!pcie->msi_bottom_domain) { |
667 | dev_err(dev, "failed to create MSI bottom domain\n" ); |
668 | ret = -ENODEV; |
669 | goto err_msi_bottom_domain; |
670 | } |
671 | |
672 | pcie->msi_domain = pci_msi_create_irq_domain(fwnode: dev->fwnode, |
673 | info: &mtk_msi_domain_info, |
674 | parent: pcie->msi_bottom_domain); |
675 | if (!pcie->msi_domain) { |
676 | dev_err(dev, "failed to create MSI domain\n" ); |
677 | ret = -ENODEV; |
678 | goto err_msi_domain; |
679 | } |
680 | |
681 | of_node_put(node: intc_node); |
682 | return 0; |
683 | |
684 | err_msi_domain: |
685 | irq_domain_remove(host: pcie->msi_bottom_domain); |
686 | err_msi_bottom_domain: |
687 | irq_domain_remove(host: pcie->intx_domain); |
688 | out_put_node: |
689 | of_node_put(node: intc_node); |
690 | return ret; |
691 | } |
692 | |
693 | static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) |
694 | { |
695 | irq_set_chained_handler_and_data(irq: pcie->irq, NULL, NULL); |
696 | |
697 | if (pcie->intx_domain) |
698 | irq_domain_remove(host: pcie->intx_domain); |
699 | |
700 | if (pcie->msi_domain) |
701 | irq_domain_remove(host: pcie->msi_domain); |
702 | |
703 | if (pcie->msi_bottom_domain) |
704 | irq_domain_remove(host: pcie->msi_bottom_domain); |
705 | |
706 | irq_dispose_mapping(virq: pcie->irq); |
707 | } |
708 | |
709 | static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx) |
710 | { |
711 | struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx]; |
712 | unsigned long msi_enable, msi_status; |
713 | irq_hw_number_t bit, hwirq; |
714 | |
715 | msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); |
716 | |
717 | do { |
718 | msi_status = readl_relaxed(msi_set->base + |
719 | PCIE_MSI_SET_STATUS_OFFSET); |
720 | msi_status &= msi_enable; |
721 | if (!msi_status) |
722 | break; |
723 | |
724 | for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) { |
725 | hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET; |
726 | generic_handle_domain_irq(domain: pcie->msi_bottom_domain, hwirq); |
727 | } |
728 | } while (true); |
729 | } |
730 | |
731 | static void mtk_pcie_irq_handler(struct irq_desc *desc) |
732 | { |
733 | struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc); |
734 | struct irq_chip *irqchip = irq_desc_get_chip(desc); |
735 | unsigned long status; |
736 | irq_hw_number_t irq_bit = PCIE_INTX_SHIFT; |
737 | |
738 | chained_irq_enter(chip: irqchip, desc); |
739 | |
740 | status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG); |
741 | for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX + |
742 | PCIE_INTX_SHIFT) |
743 | generic_handle_domain_irq(domain: pcie->intx_domain, |
744 | hwirq: irq_bit - PCIE_INTX_SHIFT); |
745 | |
746 | irq_bit = PCIE_MSI_SHIFT; |
747 | for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM + |
748 | PCIE_MSI_SHIFT) { |
749 | mtk_pcie_msi_handler(pcie, set_idx: irq_bit - PCIE_MSI_SHIFT); |
750 | |
751 | writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG); |
752 | } |
753 | |
754 | chained_irq_exit(chip: irqchip, desc); |
755 | } |
756 | |
757 | static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie) |
758 | { |
759 | struct device *dev = pcie->dev; |
760 | struct platform_device *pdev = to_platform_device(dev); |
761 | int err; |
762 | |
763 | err = mtk_pcie_init_irq_domains(pcie); |
764 | if (err) |
765 | return err; |
766 | |
767 | pcie->irq = platform_get_irq(pdev, 0); |
768 | if (pcie->irq < 0) |
769 | return pcie->irq; |
770 | |
771 | irq_set_chained_handler_and_data(irq: pcie->irq, handle: mtk_pcie_irq_handler, data: pcie); |
772 | |
773 | return 0; |
774 | } |
775 | |
776 | static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) |
777 | { |
778 | struct device *dev = pcie->dev; |
779 | struct platform_device *pdev = to_platform_device(dev); |
780 | struct resource *regs; |
781 | int ret; |
782 | |
783 | regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac" ); |
784 | if (!regs) |
785 | return -EINVAL; |
786 | pcie->base = devm_ioremap_resource(dev, res: regs); |
787 | if (IS_ERR(ptr: pcie->base)) { |
788 | dev_err(dev, "failed to map register base\n" ); |
789 | return PTR_ERR(ptr: pcie->base); |
790 | } |
791 | |
792 | pcie->reg_base = regs->start; |
793 | |
794 | pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, id: "phy" ); |
795 | if (IS_ERR(ptr: pcie->phy_reset)) { |
796 | ret = PTR_ERR(ptr: pcie->phy_reset); |
797 | if (ret != -EPROBE_DEFER) |
798 | dev_err(dev, "failed to get PHY reset\n" ); |
799 | |
800 | return ret; |
801 | } |
802 | |
803 | pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, id: "mac" ); |
804 | if (IS_ERR(ptr: pcie->mac_reset)) { |
805 | ret = PTR_ERR(ptr: pcie->mac_reset); |
806 | if (ret != -EPROBE_DEFER) |
807 | dev_err(dev, "failed to get MAC reset\n" ); |
808 | |
809 | return ret; |
810 | } |
811 | |
812 | pcie->phy = devm_phy_optional_get(dev, string: "pcie-phy" ); |
813 | if (IS_ERR(ptr: pcie->phy)) { |
814 | ret = PTR_ERR(ptr: pcie->phy); |
815 | if (ret != -EPROBE_DEFER) |
816 | dev_err(dev, "failed to get PHY\n" ); |
817 | |
818 | return ret; |
819 | } |
820 | |
821 | pcie->num_clks = devm_clk_bulk_get_all(dev, clks: &pcie->clks); |
822 | if (pcie->num_clks < 0) { |
823 | dev_err(dev, "failed to get clocks\n" ); |
824 | return pcie->num_clks; |
825 | } |
826 | |
827 | return 0; |
828 | } |
829 | |
830 | static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) |
831 | { |
832 | struct device *dev = pcie->dev; |
833 | int err; |
834 | |
835 | /* PHY power on and enable pipe clock */ |
836 | reset_control_deassert(rstc: pcie->phy_reset); |
837 | |
838 | err = phy_init(phy: pcie->phy); |
839 | if (err) { |
840 | dev_err(dev, "failed to initialize PHY\n" ); |
841 | goto err_phy_init; |
842 | } |
843 | |
844 | err = phy_power_on(phy: pcie->phy); |
845 | if (err) { |
846 | dev_err(dev, "failed to power on PHY\n" ); |
847 | goto err_phy_on; |
848 | } |
849 | |
850 | /* MAC power on and enable transaction layer clocks */ |
851 | reset_control_deassert(rstc: pcie->mac_reset); |
852 | |
853 | pm_runtime_enable(dev); |
854 | pm_runtime_get_sync(dev); |
855 | |
856 | err = clk_bulk_prepare_enable(num_clks: pcie->num_clks, clks: pcie->clks); |
857 | if (err) { |
858 | dev_err(dev, "failed to enable clocks\n" ); |
859 | goto err_clk_init; |
860 | } |
861 | |
862 | return 0; |
863 | |
864 | err_clk_init: |
865 | pm_runtime_put_sync(dev); |
866 | pm_runtime_disable(dev); |
867 | reset_control_assert(rstc: pcie->mac_reset); |
868 | phy_power_off(phy: pcie->phy); |
869 | err_phy_on: |
870 | phy_exit(phy: pcie->phy); |
871 | err_phy_init: |
872 | reset_control_assert(rstc: pcie->phy_reset); |
873 | |
874 | return err; |
875 | } |
876 | |
877 | static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) |
878 | { |
879 | clk_bulk_disable_unprepare(num_clks: pcie->num_clks, clks: pcie->clks); |
880 | |
881 | pm_runtime_put_sync(dev: pcie->dev); |
882 | pm_runtime_disable(dev: pcie->dev); |
883 | reset_control_assert(rstc: pcie->mac_reset); |
884 | |
885 | phy_power_off(phy: pcie->phy); |
886 | phy_exit(phy: pcie->phy); |
887 | reset_control_assert(rstc: pcie->phy_reset); |
888 | } |
889 | |
890 | static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) |
891 | { |
892 | int err; |
893 | |
894 | err = mtk_pcie_parse_port(pcie); |
895 | if (err) |
896 | return err; |
897 | |
898 | /* |
899 | * The controller may have been left out of reset by the bootloader |
900 | * so make sure that we get a clean start by asserting resets here. |
901 | */ |
902 | reset_control_assert(rstc: pcie->phy_reset); |
903 | reset_control_assert(rstc: pcie->mac_reset); |
904 | usleep_range(min: 10, max: 20); |
905 | |
906 | /* Don't touch the hardware registers before power up */ |
907 | err = mtk_pcie_power_up(pcie); |
908 | if (err) |
909 | return err; |
910 | |
911 | /* Try link up */ |
912 | err = mtk_pcie_startup_port(pcie); |
913 | if (err) |
914 | goto err_setup; |
915 | |
916 | err = mtk_pcie_setup_irq(pcie); |
917 | if (err) |
918 | goto err_setup; |
919 | |
920 | return 0; |
921 | |
922 | err_setup: |
923 | mtk_pcie_power_down(pcie); |
924 | |
925 | return err; |
926 | } |
927 | |
928 | static int mtk_pcie_probe(struct platform_device *pdev) |
929 | { |
930 | struct device *dev = &pdev->dev; |
931 | struct mtk_gen3_pcie *pcie; |
932 | struct pci_host_bridge *host; |
933 | int err; |
934 | |
935 | host = devm_pci_alloc_host_bridge(dev, priv: sizeof(*pcie)); |
936 | if (!host) |
937 | return -ENOMEM; |
938 | |
939 | pcie = pci_host_bridge_priv(bridge: host); |
940 | |
941 | pcie->dev = dev; |
942 | platform_set_drvdata(pdev, data: pcie); |
943 | |
944 | err = mtk_pcie_setup(pcie); |
945 | if (err) |
946 | return err; |
947 | |
948 | host->ops = &mtk_pcie_ops; |
949 | host->sysdata = pcie; |
950 | |
951 | err = pci_host_probe(bridge: host); |
952 | if (err) { |
953 | mtk_pcie_irq_teardown(pcie); |
954 | mtk_pcie_power_down(pcie); |
955 | return err; |
956 | } |
957 | |
958 | return 0; |
959 | } |
960 | |
961 | static void mtk_pcie_remove(struct platform_device *pdev) |
962 | { |
963 | struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev); |
964 | struct pci_host_bridge *host = pci_host_bridge_from_priv(priv: pcie); |
965 | |
966 | pci_lock_rescan_remove(); |
967 | pci_stop_root_bus(bus: host->bus); |
968 | pci_remove_root_bus(bus: host->bus); |
969 | pci_unlock_rescan_remove(); |
970 | |
971 | mtk_pcie_irq_teardown(pcie); |
972 | mtk_pcie_power_down(pcie); |
973 | } |
974 | |
975 | static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) |
976 | { |
977 | int i; |
978 | |
979 | raw_spin_lock(&pcie->irq_lock); |
980 | |
981 | pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); |
982 | |
983 | for (i = 0; i < PCIE_MSI_SET_NUM; i++) { |
984 | struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; |
985 | |
986 | msi_set->saved_irq_state = readl_relaxed(msi_set->base + |
987 | PCIE_MSI_SET_ENABLE_OFFSET); |
988 | } |
989 | |
990 | raw_spin_unlock(&pcie->irq_lock); |
991 | } |
992 | |
993 | static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) |
994 | { |
995 | int i; |
996 | |
997 | raw_spin_lock(&pcie->irq_lock); |
998 | |
999 | writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG); |
1000 | |
1001 | for (i = 0; i < PCIE_MSI_SET_NUM; i++) { |
1002 | struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; |
1003 | |
1004 | writel_relaxed(msi_set->saved_irq_state, |
1005 | msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); |
1006 | } |
1007 | |
1008 | raw_spin_unlock(&pcie->irq_lock); |
1009 | } |
1010 | |
1011 | static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) |
1012 | { |
1013 | u32 val; |
1014 | |
1015 | val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG); |
1016 | val |= PCIE_TURN_OFF_LINK; |
1017 | writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG); |
1018 | |
1019 | /* Check the link is L2 */ |
1020 | return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val, |
1021 | (PCIE_LTSSM_STATE(val) == |
1022 | PCIE_LTSSM_STATE_L2_IDLE), 20, |
1023 | 50 * USEC_PER_MSEC); |
1024 | } |
1025 | |
1026 | static int mtk_pcie_suspend_noirq(struct device *dev) |
1027 | { |
1028 | struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); |
1029 | int err; |
1030 | u32 val; |
1031 | |
1032 | /* Trigger link to L2 state */ |
1033 | err = mtk_pcie_turn_off_link(pcie); |
1034 | if (err) { |
1035 | dev_err(pcie->dev, "cannot enter L2 state\n" ); |
1036 | return err; |
1037 | } |
1038 | |
1039 | /* Pull down the PERST# pin */ |
1040 | val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); |
1041 | val |= PCIE_PE_RSTB; |
1042 | writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); |
1043 | |
1044 | dev_dbg(pcie->dev, "entered L2 states successfully" ); |
1045 | |
1046 | mtk_pcie_irq_save(pcie); |
1047 | mtk_pcie_power_down(pcie); |
1048 | |
1049 | return 0; |
1050 | } |
1051 | |
1052 | static int mtk_pcie_resume_noirq(struct device *dev) |
1053 | { |
1054 | struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); |
1055 | int err; |
1056 | |
1057 | err = mtk_pcie_power_up(pcie); |
1058 | if (err) |
1059 | return err; |
1060 | |
1061 | err = mtk_pcie_startup_port(pcie); |
1062 | if (err) { |
1063 | mtk_pcie_power_down(pcie); |
1064 | return err; |
1065 | } |
1066 | |
1067 | mtk_pcie_irq_restore(pcie); |
1068 | |
1069 | return 0; |
1070 | } |
1071 | |
1072 | static const struct dev_pm_ops mtk_pcie_pm_ops = { |
1073 | NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, |
1074 | mtk_pcie_resume_noirq) |
1075 | }; |
1076 | |
1077 | static const struct of_device_id mtk_pcie_of_match[] = { |
1078 | { .compatible = "mediatek,mt8192-pcie" }, |
1079 | {}, |
1080 | }; |
1081 | MODULE_DEVICE_TABLE(of, mtk_pcie_of_match); |
1082 | |
1083 | static struct platform_driver mtk_pcie_driver = { |
1084 | .probe = mtk_pcie_probe, |
1085 | .remove_new = mtk_pcie_remove, |
1086 | .driver = { |
1087 | .name = "mtk-pcie-gen3" , |
1088 | .of_match_table = mtk_pcie_of_match, |
1089 | .pm = &mtk_pcie_pm_ops, |
1090 | }, |
1091 | }; |
1092 | |
1093 | module_platform_driver(mtk_pcie_driver); |
1094 | MODULE_LICENSE("GPL v2" ); |
1095 | |