1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * sata_mv.c - Marvell SATA support |
4 | * |
5 | * Copyright 2008-2009: Marvell Corporation, all rights reserved. |
6 | * Copyright 2005: EMC Corporation, all rights reserved. |
7 | * Copyright 2005 Red Hat, Inc. All rights reserved. |
8 | * |
9 | * Originally written by Brett Russ. |
10 | * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. |
11 | * |
12 | * Please ALWAYS copy linux-ide@vger.kernel.org on emails. |
13 | */ |
14 | |
15 | /* |
16 | * sata_mv TODO list: |
17 | * |
18 | * --> Develop a low-power-consumption strategy, and implement it. |
19 | * |
20 | * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. |
21 | * |
22 | * --> [Experiment, Marvell value added] Is it possible to use target |
23 | * mode to cross-connect two Linux boxes with Marvell cards? If so, |
24 | * creating LibATA target mode support would be very interesting. |
25 | * |
26 | * Target mode, for those without docs, is the ability to directly |
27 | * connect two SATA ports. |
28 | */ |
29 | |
30 | /* |
31 | * 80x1-B2 errata PCI#11: |
32 | * |
33 | * Users of the 6041/6081 Rev.B2 chips (current is C0) |
34 | * should be careful to insert those cards only onto PCI-X bus #0, |
35 | * and only in device slots 0..7, not higher. The chips may not |
36 | * work correctly otherwise (note: this is a pretty rare condition). |
37 | */ |
38 | |
39 | #include <linux/kernel.h> |
40 | #include <linux/module.h> |
41 | #include <linux/pci.h> |
42 | #include <linux/init.h> |
43 | #include <linux/blkdev.h> |
44 | #include <linux/delay.h> |
45 | #include <linux/interrupt.h> |
46 | #include <linux/dmapool.h> |
47 | #include <linux/dma-mapping.h> |
48 | #include <linux/device.h> |
49 | #include <linux/clk.h> |
50 | #include <linux/phy/phy.h> |
51 | #include <linux/platform_device.h> |
52 | #include <linux/ata_platform.h> |
53 | #include <linux/mbus.h> |
54 | #include <linux/bitops.h> |
55 | #include <linux/gfp.h> |
56 | #include <linux/of.h> |
57 | #include <linux/of_irq.h> |
58 | #include <scsi/scsi_host.h> |
59 | #include <scsi/scsi_cmnd.h> |
60 | #include <scsi/scsi_device.h> |
61 | #include <linux/libata.h> |
62 | |
63 | #define DRV_NAME "sata_mv" |
64 | #define DRV_VERSION "1.28" |
65 | |
66 | /* |
67 | * module options |
68 | */ |
69 | |
70 | #ifdef CONFIG_PCI |
71 | static int msi; |
72 | module_param(msi, int, S_IRUGO); |
73 | MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)" ); |
74 | #endif |
75 | |
76 | static int irq_coalescing_io_count; |
77 | module_param(irq_coalescing_io_count, int, S_IRUGO); |
78 | MODULE_PARM_DESC(irq_coalescing_io_count, |
79 | "IRQ coalescing I/O count threshold (0..255)" ); |
80 | |
81 | static int irq_coalescing_usecs; |
82 | module_param(irq_coalescing_usecs, int, S_IRUGO); |
83 | MODULE_PARM_DESC(irq_coalescing_usecs, |
84 | "IRQ coalescing time threshold in usecs" ); |
85 | |
86 | enum { |
87 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
88 | MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ |
89 | MV_IO_BAR = 2, /* offset 0x18: IO space */ |
90 | MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ |
91 | |
92 | MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ |
93 | MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ |
94 | |
95 | /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ |
96 | COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ |
97 | MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ |
98 | MAX_COAL_IO_COUNT = 255, /* completed I/O count */ |
99 | |
100 | MV_PCI_REG_BASE = 0, |
101 | |
102 | /* |
103 | * Per-chip ("all ports") interrupt coalescing feature. |
104 | * This is only for GEN_II / GEN_IIE hardware. |
105 | * |
106 | * Coalescing defers the interrupt until either the IO_THRESHOLD |
107 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. |
108 | */ |
109 | COAL_REG_BASE = 0x18000, |
110 | IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), |
111 | ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ |
112 | |
113 | IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), |
114 | IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), |
115 | |
116 | /* |
117 | * Registers for the (unused here) transaction coalescing feature: |
118 | */ |
119 | TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), |
120 | TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), |
121 | |
122 | SATAHC0_REG_BASE = 0x20000, |
123 | FLASH_CTL = 0x1046c, |
124 | GPIO_PORT_CTL = 0x104f0, |
125 | RESET_CFG = 0x180d8, |
126 | |
127 | MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
128 | MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, |
129 | MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ |
130 | MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, |
131 | |
132 | MV_MAX_Q_DEPTH = 32, |
133 | MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, |
134 | |
135 | /* CRQB needs alignment on a 1KB boundary. Size == 1KB |
136 | * CRPB needs alignment on a 256B boundary. Size == 256B |
137 | * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B |
138 | */ |
139 | MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), |
140 | MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), |
141 | MV_MAX_SG_CT = 256, |
142 | MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), |
143 | |
144 | /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ |
145 | MV_PORT_HC_SHIFT = 2, |
146 | MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ |
147 | /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ |
148 | MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ |
149 | |
150 | /* Host Flags */ |
151 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
152 | |
153 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, |
154 | |
155 | MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, |
156 | |
157 | MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | |
158 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, |
159 | |
160 | MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, |
161 | |
162 | CRQB_FLAG_READ = (1 << 0), |
163 | CRQB_TAG_SHIFT = 1, |
164 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ |
165 | CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ |
166 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ |
167 | CRQB_CMD_ADDR_SHIFT = 8, |
168 | CRQB_CMD_CS = (0x2 << 11), |
169 | CRQB_CMD_LAST = (1 << 15), |
170 | |
171 | CRPB_FLAG_STATUS_SHIFT = 8, |
172 | CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ |
173 | CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ |
174 | |
175 | EPRD_FLAG_END_OF_TBL = (1 << 31), |
176 | |
177 | /* PCI interface registers */ |
178 | |
179 | MV_PCI_COMMAND = 0xc00, |
180 | MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ |
181 | MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ |
182 | |
183 | PCI_MAIN_CMD_STS = 0xd30, |
184 | STOP_PCI_MASTER = (1 << 2), |
185 | PCI_MASTER_EMPTY = (1 << 3), |
186 | GLOB_SFT_RST = (1 << 4), |
187 | |
188 | MV_PCI_MODE = 0xd00, |
189 | MV_PCI_MODE_MASK = 0x30, |
190 | |
191 | MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, |
192 | MV_PCI_DISC_TIMER = 0xd04, |
193 | MV_PCI_MSI_TRIGGER = 0xc38, |
194 | MV_PCI_SERR_MASK = 0xc28, |
195 | MV_PCI_XBAR_TMOUT = 0x1d04, |
196 | MV_PCI_ERR_LOW_ADDRESS = 0x1d40, |
197 | MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, |
198 | MV_PCI_ERR_ATTRIBUTE = 0x1d48, |
199 | MV_PCI_ERR_COMMAND = 0x1d50, |
200 | |
201 | PCI_IRQ_CAUSE = 0x1d58, |
202 | PCI_IRQ_MASK = 0x1d5c, |
203 | PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ |
204 | |
205 | PCIE_IRQ_CAUSE = 0x1900, |
206 | PCIE_IRQ_MASK = 0x1910, |
207 | PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ |
208 | |
209 | /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ |
210 | PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, |
211 | PCI_HC_MAIN_IRQ_MASK = 0x1d64, |
212 | SOC_HC_MAIN_IRQ_CAUSE = 0x20020, |
213 | SOC_HC_MAIN_IRQ_MASK = 0x20024, |
214 | ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ |
215 | DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ |
216 | HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ |
217 | HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ |
218 | DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ |
219 | DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ |
220 | PCI_ERR = (1 << 18), |
221 | TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ |
222 | TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ |
223 | PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ |
224 | PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ |
225 | ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ |
226 | GPIO_INT = (1 << 22), |
227 | SELF_INT = (1 << 23), |
228 | TWSI_INT = (1 << 24), |
229 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ |
230 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ |
231 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
232 | |
233 | /* SATAHC registers */ |
234 | HC_CFG = 0x00, |
235 | |
236 | HC_IRQ_CAUSE = 0x14, |
237 | DMA_IRQ = (1 << 0), /* shift by port # */ |
238 | HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ |
239 | DEV_IRQ = (1 << 8), /* shift by port # */ |
240 | |
241 | /* |
242 | * Per-HC (Host-Controller) interrupt coalescing feature. |
243 | * This is present on all chip generations. |
244 | * |
245 | * Coalescing defers the interrupt until either the IO_THRESHOLD |
246 | * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. |
247 | */ |
248 | HC_IRQ_COAL_IO_THRESHOLD = 0x000c, |
249 | HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, |
250 | |
251 | SOC_LED_CTRL = 0x2c, |
252 | SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ |
253 | SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ |
254 | /* with dev activity LED */ |
255 | |
256 | /* Shadow block registers */ |
257 | SHD_BLK = 0x100, |
258 | SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ |
259 | |
260 | /* SATA registers */ |
261 | SATA_STATUS = 0x300, /* ctrl, err regs follow status */ |
262 | SATA_ACTIVE = 0x350, |
263 | FIS_IRQ_CAUSE = 0x364, |
264 | FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ |
265 | |
266 | LTMODE = 0x30c, /* requires read-after-write */ |
267 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ |
268 | |
269 | PHY_MODE2 = 0x330, |
270 | PHY_MODE3 = 0x310, |
271 | |
272 | PHY_MODE4 = 0x314, /* requires read-after-write */ |
273 | PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ |
274 | PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ |
275 | PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ |
276 | PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ |
277 | |
278 | SATA_IFCTL = 0x344, |
279 | SATA_TESTCTL = 0x348, |
280 | SATA_IFSTAT = 0x34c, |
281 | VENDOR_UNIQUE_FIS = 0x35c, |
282 | |
283 | FISCFG = 0x360, |
284 | FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ |
285 | FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ |
286 | |
287 | PHY_MODE9_GEN2 = 0x398, |
288 | PHY_MODE9_GEN1 = 0x39c, |
289 | PHYCFG_OFS = 0x3a0, /* only in 65n devices */ |
290 | |
291 | MV5_PHY_MODE = 0x74, |
292 | MV5_LTMODE = 0x30, |
293 | MV5_PHY_CTL = 0x0C, |
294 | SATA_IFCFG = 0x050, |
295 | LP_PHY_CTL = 0x058, |
296 | LP_PHY_CTL_PIN_PU_PLL = (1 << 0), |
297 | LP_PHY_CTL_PIN_PU_RX = (1 << 1), |
298 | LP_PHY_CTL_PIN_PU_TX = (1 << 2), |
299 | LP_PHY_CTL_GEN_TX_3G = (1 << 5), |
300 | LP_PHY_CTL_GEN_RX_3G = (1 << 9), |
301 | |
302 | MV_M2_PREAMP_MASK = 0x7e0, |
303 | |
304 | /* Port registers */ |
305 | EDMA_CFG = 0, |
306 | EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ |
307 | EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ |
308 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ |
309 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ |
310 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ |
311 | EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ |
312 | EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ |
313 | |
314 | EDMA_ERR_IRQ_CAUSE = 0x8, |
315 | EDMA_ERR_IRQ_MASK = 0xc, |
316 | EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ |
317 | EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ |
318 | EDMA_ERR_DEV = (1 << 2), /* device error */ |
319 | EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ |
320 | EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ |
321 | EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ |
322 | EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ |
323 | EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ |
324 | EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ |
325 | EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ |
326 | EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ |
327 | EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ |
328 | EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ |
329 | EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ |
330 | |
331 | EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ |
332 | EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ |
333 | EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ |
334 | EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ |
335 | EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ |
336 | |
337 | EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ |
338 | |
339 | EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ |
340 | EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ |
341 | EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ |
342 | EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ |
343 | EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ |
344 | EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ |
345 | |
346 | EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ |
347 | |
348 | EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ |
349 | EDMA_ERR_OVERRUN_5 = (1 << 5), |
350 | EDMA_ERR_UNDERRUN_5 = (1 << 6), |
351 | |
352 | EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | |
353 | EDMA_ERR_LNK_CTRL_RX_1 | |
354 | EDMA_ERR_LNK_CTRL_RX_3 | |
355 | EDMA_ERR_LNK_CTRL_TX, |
356 | |
357 | EDMA_EH_FREEZE = EDMA_ERR_D_PAR | |
358 | EDMA_ERR_PRD_PAR | |
359 | EDMA_ERR_DEV_DCON | |
360 | EDMA_ERR_DEV_CON | |
361 | EDMA_ERR_SERR | |
362 | EDMA_ERR_SELF_DIS | |
363 | EDMA_ERR_CRQB_PAR | |
364 | EDMA_ERR_CRPB_PAR | |
365 | EDMA_ERR_INTRL_PAR | |
366 | EDMA_ERR_IORDY | |
367 | EDMA_ERR_LNK_CTRL_RX_2 | |
368 | EDMA_ERR_LNK_DATA_RX | |
369 | EDMA_ERR_LNK_DATA_TX | |
370 | EDMA_ERR_TRANS_PROTO, |
371 | |
372 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | |
373 | EDMA_ERR_PRD_PAR | |
374 | EDMA_ERR_DEV_DCON | |
375 | EDMA_ERR_DEV_CON | |
376 | EDMA_ERR_OVERRUN_5 | |
377 | EDMA_ERR_UNDERRUN_5 | |
378 | EDMA_ERR_SELF_DIS_5 | |
379 | EDMA_ERR_CRQB_PAR | |
380 | EDMA_ERR_CRPB_PAR | |
381 | EDMA_ERR_INTRL_PAR | |
382 | EDMA_ERR_IORDY, |
383 | |
384 | EDMA_REQ_Q_BASE_HI = 0x10, |
385 | EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ |
386 | |
387 | EDMA_REQ_Q_OUT_PTR = 0x18, |
388 | EDMA_REQ_Q_PTR_SHIFT = 5, |
389 | |
390 | EDMA_RSP_Q_BASE_HI = 0x1c, |
391 | EDMA_RSP_Q_IN_PTR = 0x20, |
392 | EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ |
393 | EDMA_RSP_Q_PTR_SHIFT = 3, |
394 | |
395 | EDMA_CMD = 0x28, /* EDMA command register */ |
396 | EDMA_EN = (1 << 0), /* enable EDMA */ |
397 | EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ |
398 | EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ |
399 | |
400 | EDMA_STATUS = 0x30, /* EDMA engine status */ |
401 | EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ |
402 | EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ |
403 | |
404 | EDMA_IORDY_TMOUT = 0x34, |
405 | EDMA_ARB_CFG = 0x38, |
406 | |
407 | EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ |
408 | EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ |
409 | |
410 | BMDMA_CMD = 0x224, /* bmdma command register */ |
411 | BMDMA_STATUS = 0x228, /* bmdma status register */ |
412 | BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ |
413 | BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ |
414 | |
415 | /* Host private flags (hp_flags) */ |
416 | MV_HP_FLAG_MSI = (1 << 0), |
417 | MV_HP_ERRATA_50XXB0 = (1 << 1), |
418 | MV_HP_ERRATA_50XXB2 = (1 << 2), |
419 | MV_HP_ERRATA_60X1B2 = (1 << 3), |
420 | MV_HP_ERRATA_60X1C0 = (1 << 4), |
421 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ |
422 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ |
423 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ |
424 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
425 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ |
426 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ |
427 | MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ |
428 | MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */ |
429 | |
430 | /* Port private flags (pp_flags) */ |
431 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
432 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
433 | MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ |
434 | MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ |
435 | MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ |
436 | }; |
437 | |
438 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
439 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) |
440 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
441 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) |
442 | #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) |
443 | |
444 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
445 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) |
446 | |
447 | enum { |
448 | /* DMA boundary 0xffff is required by the s/g splitting |
449 | * we need on /length/ in mv_fill-sg(). |
450 | */ |
451 | MV_DMA_BOUNDARY = 0xffffU, |
452 | |
453 | /* mask of register bits containing lower 32 bits |
454 | * of EDMA request queue DMA address |
455 | */ |
456 | EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, |
457 | |
458 | /* ditto, for response queue */ |
459 | EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, |
460 | }; |
461 | |
462 | enum chip_type { |
463 | chip_504x, |
464 | chip_508x, |
465 | chip_5080, |
466 | chip_604x, |
467 | chip_608x, |
468 | chip_6042, |
469 | chip_7042, |
470 | chip_soc, |
471 | }; |
472 | |
473 | /* Command ReQuest Block: 32B */ |
474 | struct mv_crqb { |
475 | __le32 sg_addr; |
476 | __le32 sg_addr_hi; |
477 | __le16 ctrl_flags; |
478 | __le16 ata_cmd[11]; |
479 | }; |
480 | |
481 | struct mv_crqb_iie { |
482 | __le32 addr; |
483 | __le32 addr_hi; |
484 | __le32 flags; |
485 | __le32 len; |
486 | __le32 ata_cmd[4]; |
487 | }; |
488 | |
489 | /* Command ResPonse Block: 8B */ |
490 | struct mv_crpb { |
491 | __le16 id; |
492 | __le16 flags; |
493 | __le32 tmstmp; |
494 | }; |
495 | |
496 | /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ |
497 | struct mv_sg { |
498 | __le32 addr; |
499 | __le32 flags_size; |
500 | __le32 addr_hi; |
501 | __le32 reserved; |
502 | }; |
503 | |
504 | /* |
505 | * We keep a local cache of a few frequently accessed port |
506 | * registers here, to avoid having to read them (very slow) |
507 | * when switching between EDMA and non-EDMA modes. |
508 | */ |
509 | struct mv_cached_regs { |
510 | u32 fiscfg; |
511 | u32 ltmode; |
512 | u32 haltcond; |
513 | u32 unknown_rsvd; |
514 | }; |
515 | |
516 | struct mv_port_priv { |
517 | struct mv_crqb *crqb; |
518 | dma_addr_t crqb_dma; |
519 | struct mv_crpb *crpb; |
520 | dma_addr_t crpb_dma; |
521 | struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; |
522 | dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; |
523 | |
524 | unsigned int req_idx; |
525 | unsigned int resp_idx; |
526 | |
527 | u32 pp_flags; |
528 | struct mv_cached_regs cached; |
529 | unsigned int delayed_eh_pmp_map; |
530 | }; |
531 | |
532 | struct mv_port_signal { |
533 | u32 amps; |
534 | u32 pre; |
535 | }; |
536 | |
537 | struct mv_host_priv { |
538 | u32 hp_flags; |
539 | unsigned int board_idx; |
540 | u32 main_irq_mask; |
541 | struct mv_port_signal signal[8]; |
542 | const struct mv_hw_ops *ops; |
543 | int n_ports; |
544 | void __iomem *base; |
545 | void __iomem *main_irq_cause_addr; |
546 | void __iomem *main_irq_mask_addr; |
547 | u32 irq_cause_offset; |
548 | u32 irq_mask_offset; |
549 | u32 unmask_all_irqs; |
550 | |
551 | /* |
552 | * Needed on some devices that require their clocks to be enabled. |
553 | * These are optional: if the platform device does not have any |
554 | * clocks, they won't be used. Also, if the underlying hardware |
555 | * does not support the common clock framework (CONFIG_HAVE_CLK=n), |
556 | * all the clock operations become no-ops (see clk.h). |
557 | */ |
558 | struct clk *clk; |
559 | struct clk **port_clks; |
560 | /* |
561 | * Some devices have a SATA PHY which can be enabled/disabled |
562 | * in order to save power. These are optional: if the platform |
563 | * devices does not have any phy, they won't be used. |
564 | */ |
565 | struct phy **port_phys; |
566 | /* |
567 | * These consistent DMA memory pools give us guaranteed |
568 | * alignment for hardware-accessed data structures, |
569 | * and less memory waste in accomplishing the alignment. |
570 | */ |
571 | struct dma_pool *crqb_pool; |
572 | struct dma_pool *crpb_pool; |
573 | struct dma_pool *sg_tbl_pool; |
574 | }; |
575 | |
576 | struct mv_hw_ops { |
577 | void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, |
578 | unsigned int port); |
579 | void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); |
580 | void (*read_preamp)(struct mv_host_priv *hpriv, int idx, |
581 | void __iomem *mmio); |
582 | int (*reset_hc)(struct ata_host *host, void __iomem *mmio, |
583 | unsigned int n_hc); |
584 | void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); |
585 | void (*reset_bus)(struct ata_host *host, void __iomem *mmio); |
586 | }; |
587 | |
588 | static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); |
589 | static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); |
590 | static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); |
591 | static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); |
592 | static int mv_port_start(struct ata_port *ap); |
593 | static void mv_port_stop(struct ata_port *ap); |
594 | static int mv_qc_defer(struct ata_queued_cmd *qc); |
595 | static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); |
596 | static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); |
597 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); |
598 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
599 | unsigned long deadline); |
600 | static void mv_eh_freeze(struct ata_port *ap); |
601 | static void mv_eh_thaw(struct ata_port *ap); |
602 | static void mv6_dev_config(struct ata_device *dev); |
603 | |
604 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
605 | unsigned int port); |
606 | static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); |
607 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, |
608 | void __iomem *mmio); |
609 | static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio, |
610 | unsigned int n_hc); |
611 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); |
612 | static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); |
613 | |
614 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
615 | unsigned int port); |
616 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); |
617 | static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, |
618 | void __iomem *mmio); |
619 | static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio, |
620 | unsigned int n_hc); |
621 | static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); |
622 | static void mv_soc_enable_leds(struct mv_host_priv *hpriv, |
623 | void __iomem *mmio); |
624 | static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, |
625 | void __iomem *mmio); |
626 | static int mv_soc_reset_hc(struct ata_host *host, |
627 | void __iomem *mmio, unsigned int n_hc); |
628 | static void mv_soc_reset_flash(struct mv_host_priv *hpriv, |
629 | void __iomem *mmio); |
630 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); |
631 | static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, |
632 | void __iomem *mmio, unsigned int port); |
633 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); |
634 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
635 | unsigned int port_no); |
636 | static int mv_stop_edma(struct ata_port *ap); |
637 | static int mv_stop_edma_engine(void __iomem *port_mmio); |
638 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); |
639 | |
640 | static void mv_pmp_select(struct ata_port *ap, int pmp); |
641 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, |
642 | unsigned long deadline); |
643 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
644 | unsigned long deadline); |
645 | static void mv_pmp_error_handler(struct ata_port *ap); |
646 | static void mv_process_crpb_entries(struct ata_port *ap, |
647 | struct mv_port_priv *pp); |
648 | |
649 | static void mv_sff_irq_clear(struct ata_port *ap); |
650 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc); |
651 | static void mv_bmdma_setup(struct ata_queued_cmd *qc); |
652 | static void mv_bmdma_start(struct ata_queued_cmd *qc); |
653 | static void mv_bmdma_stop(struct ata_queued_cmd *qc); |
654 | static u8 mv_bmdma_status(struct ata_port *ap); |
655 | static u8 mv_sff_check_status(struct ata_port *ap); |
656 | |
657 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
658 | * because we have to allow room for worst case splitting of |
659 | * PRDs for 64K boundaries in mv_fill_sg(). |
660 | */ |
661 | #ifdef CONFIG_PCI |
662 | static const struct scsi_host_template mv5_sht = { |
663 | ATA_BASE_SHT(DRV_NAME), |
664 | .sg_tablesize = MV_MAX_SG_CT / 2, |
665 | .dma_boundary = MV_DMA_BOUNDARY, |
666 | }; |
667 | #endif |
668 | static const struct scsi_host_template mv6_sht = { |
669 | __ATA_BASE_SHT(DRV_NAME), |
670 | .can_queue = MV_MAX_Q_DEPTH - 1, |
671 | .sg_tablesize = MV_MAX_SG_CT / 2, |
672 | .dma_boundary = MV_DMA_BOUNDARY, |
673 | .sdev_groups = ata_ncq_sdev_groups, |
674 | .change_queue_depth = ata_scsi_change_queue_depth, |
675 | .tag_alloc_policy = BLK_TAG_ALLOC_RR, |
676 | .slave_configure = ata_scsi_slave_config |
677 | }; |
678 | |
679 | static struct ata_port_operations mv5_ops = { |
680 | .inherits = &ata_sff_port_ops, |
681 | |
682 | .lost_interrupt = ATA_OP_NULL, |
683 | |
684 | .qc_defer = mv_qc_defer, |
685 | .qc_prep = mv_qc_prep, |
686 | .qc_issue = mv_qc_issue, |
687 | |
688 | .freeze = mv_eh_freeze, |
689 | .thaw = mv_eh_thaw, |
690 | .hardreset = mv_hardreset, |
691 | |
692 | .scr_read = mv5_scr_read, |
693 | .scr_write = mv5_scr_write, |
694 | |
695 | .port_start = mv_port_start, |
696 | .port_stop = mv_port_stop, |
697 | }; |
698 | |
699 | static struct ata_port_operations mv6_ops = { |
700 | .inherits = &ata_bmdma_port_ops, |
701 | |
702 | .lost_interrupt = ATA_OP_NULL, |
703 | |
704 | .qc_defer = mv_qc_defer, |
705 | .qc_prep = mv_qc_prep, |
706 | .qc_issue = mv_qc_issue, |
707 | |
708 | .dev_config = mv6_dev_config, |
709 | |
710 | .freeze = mv_eh_freeze, |
711 | .thaw = mv_eh_thaw, |
712 | .hardreset = mv_hardreset, |
713 | .softreset = mv_softreset, |
714 | .pmp_hardreset = mv_pmp_hardreset, |
715 | .pmp_softreset = mv_softreset, |
716 | .error_handler = mv_pmp_error_handler, |
717 | |
718 | .scr_read = mv_scr_read, |
719 | .scr_write = mv_scr_write, |
720 | |
721 | .sff_check_status = mv_sff_check_status, |
722 | .sff_irq_clear = mv_sff_irq_clear, |
723 | .check_atapi_dma = mv_check_atapi_dma, |
724 | .bmdma_setup = mv_bmdma_setup, |
725 | .bmdma_start = mv_bmdma_start, |
726 | .bmdma_stop = mv_bmdma_stop, |
727 | .bmdma_status = mv_bmdma_status, |
728 | |
729 | .port_start = mv_port_start, |
730 | .port_stop = mv_port_stop, |
731 | }; |
732 | |
733 | static struct ata_port_operations mv_iie_ops = { |
734 | .inherits = &mv6_ops, |
735 | .dev_config = ATA_OP_NULL, |
736 | .qc_prep = mv_qc_prep_iie, |
737 | }; |
738 | |
739 | static const struct ata_port_info mv_port_info[] = { |
740 | { /* chip_504x */ |
741 | .flags = MV_GEN_I_FLAGS, |
742 | .pio_mask = ATA_PIO4, |
743 | .udma_mask = ATA_UDMA6, |
744 | .port_ops = &mv5_ops, |
745 | }, |
746 | { /* chip_508x */ |
747 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
748 | .pio_mask = ATA_PIO4, |
749 | .udma_mask = ATA_UDMA6, |
750 | .port_ops = &mv5_ops, |
751 | }, |
752 | { /* chip_5080 */ |
753 | .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, |
754 | .pio_mask = ATA_PIO4, |
755 | .udma_mask = ATA_UDMA6, |
756 | .port_ops = &mv5_ops, |
757 | }, |
758 | { /* chip_604x */ |
759 | .flags = MV_GEN_II_FLAGS, |
760 | .pio_mask = ATA_PIO4, |
761 | .udma_mask = ATA_UDMA6, |
762 | .port_ops = &mv6_ops, |
763 | }, |
764 | { /* chip_608x */ |
765 | .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, |
766 | .pio_mask = ATA_PIO4, |
767 | .udma_mask = ATA_UDMA6, |
768 | .port_ops = &mv6_ops, |
769 | }, |
770 | { /* chip_6042 */ |
771 | .flags = MV_GEN_IIE_FLAGS, |
772 | .pio_mask = ATA_PIO4, |
773 | .udma_mask = ATA_UDMA6, |
774 | .port_ops = &mv_iie_ops, |
775 | }, |
776 | { /* chip_7042 */ |
777 | .flags = MV_GEN_IIE_FLAGS, |
778 | .pio_mask = ATA_PIO4, |
779 | .udma_mask = ATA_UDMA6, |
780 | .port_ops = &mv_iie_ops, |
781 | }, |
782 | { /* chip_soc */ |
783 | .flags = MV_GEN_IIE_FLAGS, |
784 | .pio_mask = ATA_PIO4, |
785 | .udma_mask = ATA_UDMA6, |
786 | .port_ops = &mv_iie_ops, |
787 | }, |
788 | }; |
789 | |
790 | static const struct pci_device_id mv_pci_tbl[] = { |
791 | { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, |
792 | { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, |
793 | { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, |
794 | { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, |
795 | /* RocketRAID 1720/174x have different identifiers */ |
796 | { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, |
797 | { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, |
798 | { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, |
799 | |
800 | { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, |
801 | { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, |
802 | { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, |
803 | { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, |
804 | { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, |
805 | |
806 | { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, |
807 | |
808 | /* Adaptec 1430SA */ |
809 | { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, |
810 | |
811 | /* Marvell 7042 support */ |
812 | { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, |
813 | |
814 | /* Highpoint RocketRAID PCIe series */ |
815 | { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, |
816 | { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, |
817 | |
818 | { } /* terminate list */ |
819 | }; |
820 | |
821 | static const struct mv_hw_ops mv5xxx_ops = { |
822 | .phy_errata = mv5_phy_errata, |
823 | .enable_leds = mv5_enable_leds, |
824 | .read_preamp = mv5_read_preamp, |
825 | .reset_hc = mv5_reset_hc, |
826 | .reset_flash = mv5_reset_flash, |
827 | .reset_bus = mv5_reset_bus, |
828 | }; |
829 | |
830 | static const struct mv_hw_ops mv6xxx_ops = { |
831 | .phy_errata = mv6_phy_errata, |
832 | .enable_leds = mv6_enable_leds, |
833 | .read_preamp = mv6_read_preamp, |
834 | .reset_hc = mv6_reset_hc, |
835 | .reset_flash = mv6_reset_flash, |
836 | .reset_bus = mv_reset_pci_bus, |
837 | }; |
838 | |
839 | static const struct mv_hw_ops mv_soc_ops = { |
840 | .phy_errata = mv6_phy_errata, |
841 | .enable_leds = mv_soc_enable_leds, |
842 | .read_preamp = mv_soc_read_preamp, |
843 | .reset_hc = mv_soc_reset_hc, |
844 | .reset_flash = mv_soc_reset_flash, |
845 | .reset_bus = mv_soc_reset_bus, |
846 | }; |
847 | |
848 | static const struct mv_hw_ops mv_soc_65n_ops = { |
849 | .phy_errata = mv_soc_65n_phy_errata, |
850 | .enable_leds = mv_soc_enable_leds, |
851 | .reset_hc = mv_soc_reset_hc, |
852 | .reset_flash = mv_soc_reset_flash, |
853 | .reset_bus = mv_soc_reset_bus, |
854 | }; |
855 | |
856 | /* |
857 | * Functions |
858 | */ |
859 | |
860 | static inline void writelfl(unsigned long data, void __iomem *addr) |
861 | { |
862 | writel(val: data, addr); |
863 | (void) readl(addr); /* flush to avoid PCI posted write */ |
864 | } |
865 | |
866 | static inline unsigned int mv_hc_from_port(unsigned int port) |
867 | { |
868 | return port >> MV_PORT_HC_SHIFT; |
869 | } |
870 | |
871 | static inline unsigned int mv_hardport_from_port(unsigned int port) |
872 | { |
873 | return port & MV_PORT_MASK; |
874 | } |
875 | |
876 | /* |
877 | * Consolidate some rather tricky bit shift calculations. |
878 | * This is hot-path stuff, so not a function. |
879 | * Simple code, with two return values, so macro rather than inline. |
880 | * |
881 | * port is the sole input, in range 0..7. |
882 | * shift is one output, for use with main_irq_cause / main_irq_mask registers. |
883 | * hardport is the other output, in range 0..3. |
884 | * |
885 | * Note that port and hardport may be the same variable in some cases. |
886 | */ |
887 | #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ |
888 | { \ |
889 | shift = mv_hc_from_port(port) * HC_SHIFT; \ |
890 | hardport = mv_hardport_from_port(port); \ |
891 | shift += hardport * 2; \ |
892 | } |
893 | |
894 | static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) |
895 | { |
896 | return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); |
897 | } |
898 | |
899 | static inline void __iomem *mv_hc_base_from_port(void __iomem *base, |
900 | unsigned int port) |
901 | { |
902 | return mv_hc_base(base, hc: mv_hc_from_port(port)); |
903 | } |
904 | |
905 | static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) |
906 | { |
907 | return mv_hc_base_from_port(base, port) + |
908 | MV_SATAHC_ARBTR_REG_SZ + |
909 | (mv_hardport_from_port(port) * MV_PORT_REG_SZ); |
910 | } |
911 | |
912 | static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) |
913 | { |
914 | void __iomem *hc_mmio = mv_hc_base_from_port(base: mmio, port); |
915 | unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; |
916 | |
917 | return hc_mmio + ofs; |
918 | } |
919 | |
920 | static inline void __iomem *mv_host_base(struct ata_host *host) |
921 | { |
922 | struct mv_host_priv *hpriv = host->private_data; |
923 | return hpriv->base; |
924 | } |
925 | |
926 | static inline void __iomem *mv_ap_base(struct ata_port *ap) |
927 | { |
928 | return mv_port_base(base: mv_host_base(host: ap->host), port: ap->port_no); |
929 | } |
930 | |
931 | static inline int mv_get_hc_count(unsigned long port_flags) |
932 | { |
933 | return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); |
934 | } |
935 | |
936 | /** |
937 | * mv_save_cached_regs - (re-)initialize cached port registers |
938 | * @ap: the port whose registers we are caching |
939 | * |
940 | * Initialize the local cache of port registers, |
941 | * so that reading them over and over again can |
942 | * be avoided on the hotter paths of this driver. |
943 | * This saves a few microseconds each time we switch |
944 | * to/from EDMA mode to perform (eg.) a drive cache flush. |
945 | */ |
946 | static void mv_save_cached_regs(struct ata_port *ap) |
947 | { |
948 | void __iomem *port_mmio = mv_ap_base(ap); |
949 | struct mv_port_priv *pp = ap->private_data; |
950 | |
951 | pp->cached.fiscfg = readl(addr: port_mmio + FISCFG); |
952 | pp->cached.ltmode = readl(addr: port_mmio + LTMODE); |
953 | pp->cached.haltcond = readl(addr: port_mmio + EDMA_HALTCOND); |
954 | pp->cached.unknown_rsvd = readl(addr: port_mmio + EDMA_UNKNOWN_RSVD); |
955 | } |
956 | |
957 | /** |
958 | * mv_write_cached_reg - write to a cached port register |
959 | * @addr: hardware address of the register |
960 | * @old: pointer to cached value of the register |
961 | * @new: new value for the register |
962 | * |
963 | * Write a new value to a cached register, |
964 | * but only if the value is different from before. |
965 | */ |
966 | static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) |
967 | { |
968 | if (new != *old) { |
969 | unsigned long laddr; |
970 | *old = new; |
971 | /* |
972 | * Workaround for 88SX60x1-B2 FEr SATA#13: |
973 | * Read-after-write is needed to prevent generating 64-bit |
974 | * write cycles on the PCI bus for SATA interface registers |
975 | * at offsets ending in 0x4 or 0xc. |
976 | * |
977 | * Looks like a lot of fuss, but it avoids an unnecessary |
978 | * +1 usec read-after-write delay for unaffected registers. |
979 | */ |
980 | laddr = (unsigned long)addr & 0xffff; |
981 | if (laddr >= 0x300 && laddr <= 0x33c) { |
982 | laddr &= 0x000f; |
983 | if (laddr == 0x4 || laddr == 0xc) { |
984 | writelfl(data: new, addr); /* read after write */ |
985 | return; |
986 | } |
987 | } |
988 | writel(val: new, addr); /* unaffected by the errata */ |
989 | } |
990 | } |
991 | |
992 | static void mv_set_edma_ptrs(void __iomem *port_mmio, |
993 | struct mv_host_priv *hpriv, |
994 | struct mv_port_priv *pp) |
995 | { |
996 | u32 index; |
997 | |
998 | /* |
999 | * initialize request queue |
1000 | */ |
1001 | pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
1002 | index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; |
1003 | |
1004 | WARN_ON(pp->crqb_dma & 0x3ff); |
1005 | writel(val: (pp->crqb_dma >> 16) >> 16, addr: port_mmio + EDMA_REQ_Q_BASE_HI); |
1006 | writelfl(data: (pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
1007 | addr: port_mmio + EDMA_REQ_Q_IN_PTR); |
1008 | writelfl(data: index, addr: port_mmio + EDMA_REQ_Q_OUT_PTR); |
1009 | |
1010 | /* |
1011 | * initialize response queue |
1012 | */ |
1013 | pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
1014 | index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; |
1015 | |
1016 | WARN_ON(pp->crpb_dma & 0xff); |
1017 | writel(val: (pp->crpb_dma >> 16) >> 16, addr: port_mmio + EDMA_RSP_Q_BASE_HI); |
1018 | writelfl(data: index, addr: port_mmio + EDMA_RSP_Q_IN_PTR); |
1019 | writelfl(data: (pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
1020 | addr: port_mmio + EDMA_RSP_Q_OUT_PTR); |
1021 | } |
1022 | |
1023 | static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) |
1024 | { |
1025 | /* |
1026 | * When writing to the main_irq_mask in hardware, |
1027 | * we must ensure exclusivity between the interrupt coalescing bits |
1028 | * and the corresponding individual port DONE_IRQ bits. |
1029 | * |
1030 | * Note that this register is really an "IRQ enable" register, |
1031 | * not an "IRQ mask" register as Marvell's naming might suggest. |
1032 | */ |
1033 | if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) |
1034 | mask &= ~DONE_IRQ_0_3; |
1035 | if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) |
1036 | mask &= ~DONE_IRQ_4_7; |
1037 | writelfl(data: mask, addr: hpriv->main_irq_mask_addr); |
1038 | } |
1039 | |
1040 | static void mv_set_main_irq_mask(struct ata_host *host, |
1041 | u32 disable_bits, u32 enable_bits) |
1042 | { |
1043 | struct mv_host_priv *hpriv = host->private_data; |
1044 | u32 old_mask, new_mask; |
1045 | |
1046 | old_mask = hpriv->main_irq_mask; |
1047 | new_mask = (old_mask & ~disable_bits) | enable_bits; |
1048 | if (new_mask != old_mask) { |
1049 | hpriv->main_irq_mask = new_mask; |
1050 | mv_write_main_irq_mask(mask: new_mask, hpriv); |
1051 | } |
1052 | } |
1053 | |
1054 | static void mv_enable_port_irqs(struct ata_port *ap, |
1055 | unsigned int port_bits) |
1056 | { |
1057 | unsigned int shift, hardport, port = ap->port_no; |
1058 | u32 disable_bits, enable_bits; |
1059 | |
1060 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); |
1061 | |
1062 | disable_bits = (DONE_IRQ | ERR_IRQ) << shift; |
1063 | enable_bits = port_bits << shift; |
1064 | mv_set_main_irq_mask(host: ap->host, disable_bits, enable_bits); |
1065 | } |
1066 | |
1067 | static void mv_clear_and_enable_port_irqs(struct ata_port *ap, |
1068 | void __iomem *port_mmio, |
1069 | unsigned int port_irqs) |
1070 | { |
1071 | struct mv_host_priv *hpriv = ap->host->private_data; |
1072 | int hardport = mv_hardport_from_port(port: ap->port_no); |
1073 | void __iomem *hc_mmio = mv_hc_base_from_port( |
1074 | base: mv_host_base(host: ap->host), port: ap->port_no); |
1075 | u32 hc_irq_cause; |
1076 | |
1077 | /* clear EDMA event indicators, if any */ |
1078 | writelfl(data: 0, addr: port_mmio + EDMA_ERR_IRQ_CAUSE); |
1079 | |
1080 | /* clear pending irq events */ |
1081 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); |
1082 | writelfl(data: hc_irq_cause, addr: hc_mmio + HC_IRQ_CAUSE); |
1083 | |
1084 | /* clear FIS IRQ Cause */ |
1085 | if (IS_GEN_IIE(hpriv)) |
1086 | writelfl(data: 0, addr: port_mmio + FIS_IRQ_CAUSE); |
1087 | |
1088 | mv_enable_port_irqs(ap, port_bits: port_irqs); |
1089 | } |
1090 | |
1091 | static void mv_set_irq_coalescing(struct ata_host *host, |
1092 | unsigned int count, unsigned int usecs) |
1093 | { |
1094 | struct mv_host_priv *hpriv = host->private_data; |
1095 | void __iomem *mmio = hpriv->base, *hc_mmio; |
1096 | u32 coal_enable = 0; |
1097 | unsigned long flags; |
1098 | unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; |
1099 | const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | |
1100 | ALL_PORTS_COAL_DONE; |
1101 | |
1102 | /* Disable IRQ coalescing if either threshold is zero */ |
1103 | if (!usecs || !count) { |
1104 | clks = count = 0; |
1105 | } else { |
1106 | /* Respect maximum limits of the hardware */ |
1107 | clks = usecs * COAL_CLOCKS_PER_USEC; |
1108 | if (clks > MAX_COAL_TIME_THRESHOLD) |
1109 | clks = MAX_COAL_TIME_THRESHOLD; |
1110 | if (count > MAX_COAL_IO_COUNT) |
1111 | count = MAX_COAL_IO_COUNT; |
1112 | } |
1113 | |
1114 | spin_lock_irqsave(&host->lock, flags); |
1115 | mv_set_main_irq_mask(host, disable_bits: coal_disable, enable_bits: 0); |
1116 | |
1117 | if (is_dual_hc && !IS_GEN_I(hpriv)) { |
1118 | /* |
1119 | * GEN_II/GEN_IIE with dual host controllers: |
1120 | * one set of global thresholds for the entire chip. |
1121 | */ |
1122 | writel(val: clks, addr: mmio + IRQ_COAL_TIME_THRESHOLD); |
1123 | writel(val: count, addr: mmio + IRQ_COAL_IO_THRESHOLD); |
1124 | /* clear leftover coal IRQ bit */ |
1125 | writel(val: ~ALL_PORTS_COAL_IRQ, addr: mmio + IRQ_COAL_CAUSE); |
1126 | if (count) |
1127 | coal_enable = ALL_PORTS_COAL_DONE; |
1128 | clks = count = 0; /* force clearing of regular regs below */ |
1129 | } |
1130 | |
1131 | /* |
1132 | * All chips: independent thresholds for each HC on the chip. |
1133 | */ |
1134 | hc_mmio = mv_hc_base_from_port(base: mmio, port: 0); |
1135 | writel(val: clks, addr: hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); |
1136 | writel(val: count, addr: hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); |
1137 | writel(val: ~HC_COAL_IRQ, addr: hc_mmio + HC_IRQ_CAUSE); |
1138 | if (count) |
1139 | coal_enable |= PORTS_0_3_COAL_DONE; |
1140 | if (is_dual_hc) { |
1141 | hc_mmio = mv_hc_base_from_port(base: mmio, port: MV_PORTS_PER_HC); |
1142 | writel(val: clks, addr: hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); |
1143 | writel(val: count, addr: hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); |
1144 | writel(val: ~HC_COAL_IRQ, addr: hc_mmio + HC_IRQ_CAUSE); |
1145 | if (count) |
1146 | coal_enable |= PORTS_4_7_COAL_DONE; |
1147 | } |
1148 | |
1149 | mv_set_main_irq_mask(host, disable_bits: 0, enable_bits: coal_enable); |
1150 | spin_unlock_irqrestore(lock: &host->lock, flags); |
1151 | } |
1152 | |
1153 | /* |
1154 | * mv_start_edma - Enable eDMA engine |
1155 | * @pp: port private data |
1156 | * |
1157 | * Verify the local cache of the eDMA state is accurate with a |
1158 | * WARN_ON. |
1159 | * |
1160 | * LOCKING: |
1161 | * Inherited from caller. |
1162 | */ |
1163 | static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, |
1164 | struct mv_port_priv *pp, u8 protocol) |
1165 | { |
1166 | int want_ncq = (protocol == ATA_PROT_NCQ); |
1167 | |
1168 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
1169 | int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); |
1170 | if (want_ncq != using_ncq) |
1171 | mv_stop_edma(ap); |
1172 | } |
1173 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
1174 | struct mv_host_priv *hpriv = ap->host->private_data; |
1175 | |
1176 | mv_edma_cfg(ap, want_ncq, want_edma: 1); |
1177 | |
1178 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
1179 | mv_clear_and_enable_port_irqs(ap, port_mmio, port_irqs: DONE_IRQ|ERR_IRQ); |
1180 | |
1181 | writelfl(data: EDMA_EN, addr: port_mmio + EDMA_CMD); |
1182 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
1183 | } |
1184 | } |
1185 | |
1186 | static void mv_wait_for_edma_empty_idle(struct ata_port *ap) |
1187 | { |
1188 | void __iomem *port_mmio = mv_ap_base(ap); |
1189 | const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); |
1190 | const int per_loop = 5, timeout = (15 * 1000 / per_loop); |
1191 | int i; |
1192 | |
1193 | /* |
1194 | * Wait for the EDMA engine to finish transactions in progress. |
1195 | * No idea what a good "timeout" value might be, but measurements |
1196 | * indicate that it often requires hundreds of microseconds |
1197 | * with two drives in-use. So we use the 15msec value above |
1198 | * as a rough guess at what even more drives might require. |
1199 | */ |
1200 | for (i = 0; i < timeout; ++i) { |
1201 | u32 edma_stat = readl(addr: port_mmio + EDMA_STATUS); |
1202 | if ((edma_stat & empty_idle) == empty_idle) |
1203 | break; |
1204 | udelay(per_loop); |
1205 | } |
1206 | /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */ |
1207 | } |
1208 | |
1209 | /** |
1210 | * mv_stop_edma_engine - Disable eDMA engine |
1211 | * @port_mmio: io base address |
1212 | * |
1213 | * LOCKING: |
1214 | * Inherited from caller. |
1215 | */ |
1216 | static int mv_stop_edma_engine(void __iomem *port_mmio) |
1217 | { |
1218 | int i; |
1219 | |
1220 | /* Disable eDMA. The disable bit auto clears. */ |
1221 | writelfl(data: EDMA_DS, addr: port_mmio + EDMA_CMD); |
1222 | |
1223 | /* Wait for the chip to confirm eDMA is off. */ |
1224 | for (i = 10000; i > 0; i--) { |
1225 | u32 reg = readl(addr: port_mmio + EDMA_CMD); |
1226 | if (!(reg & EDMA_EN)) |
1227 | return 0; |
1228 | udelay(10); |
1229 | } |
1230 | return -EIO; |
1231 | } |
1232 | |
1233 | static int mv_stop_edma(struct ata_port *ap) |
1234 | { |
1235 | void __iomem *port_mmio = mv_ap_base(ap); |
1236 | struct mv_port_priv *pp = ap->private_data; |
1237 | int err = 0; |
1238 | |
1239 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
1240 | return 0; |
1241 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
1242 | mv_wait_for_edma_empty_idle(ap); |
1243 | if (mv_stop_edma_engine(port_mmio)) { |
1244 | ata_port_err(ap, "Unable to stop eDMA\n" ); |
1245 | err = -EIO; |
1246 | } |
1247 | mv_edma_cfg(ap, want_ncq: 0, want_edma: 0); |
1248 | return err; |
1249 | } |
1250 | |
1251 | static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes) |
1252 | { |
1253 | int b, w, o; |
1254 | unsigned char linebuf[38]; |
1255 | |
1256 | for (b = 0; b < bytes; ) { |
1257 | for (w = 0, o = 0; b < bytes && w < 4; w++) { |
1258 | o += scnprintf(buf: linebuf + o, size: sizeof(linebuf) - o, |
1259 | fmt: "%08x " , readl(addr: start + b)); |
1260 | b += sizeof(u32); |
1261 | } |
1262 | dev_dbg(dev, "%s: %p: %s\n" , |
1263 | __func__, start + b, linebuf); |
1264 | } |
1265 | } |
1266 | |
1267 | static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) |
1268 | { |
1269 | int b, w, o; |
1270 | u32 dw = 0; |
1271 | unsigned char linebuf[38]; |
1272 | |
1273 | for (b = 0; b < bytes; ) { |
1274 | for (w = 0, o = 0; b < bytes && w < 4; w++) { |
1275 | (void) pci_read_config_dword(dev: pdev, where: b, val: &dw); |
1276 | o += snprintf(buf: linebuf + o, size: sizeof(linebuf) - o, |
1277 | fmt: "%08x " , dw); |
1278 | b += sizeof(u32); |
1279 | } |
1280 | dev_dbg(&pdev->dev, "%s: %02x: %s\n" , |
1281 | __func__, b, linebuf); |
1282 | } |
1283 | } |
1284 | |
1285 | static void mv_dump_all_regs(void __iomem *mmio_base, |
1286 | struct pci_dev *pdev) |
1287 | { |
1288 | void __iomem *hc_base; |
1289 | void __iomem *port_base; |
1290 | int start_port, num_ports, p, start_hc, num_hcs, hc; |
1291 | |
1292 | start_hc = start_port = 0; |
1293 | num_ports = 8; /* should be benign for 4 port devs */ |
1294 | num_hcs = 2; |
1295 | dev_dbg(&pdev->dev, |
1296 | "%s: All registers for port(s) %u-%u:\n" , __func__, |
1297 | start_port, num_ports > 1 ? num_ports - 1 : start_port); |
1298 | |
1299 | dev_dbg(&pdev->dev, "%s: PCI config space regs:\n" , __func__); |
1300 | mv_dump_pci_cfg(pdev, bytes: 0x68); |
1301 | |
1302 | dev_dbg(&pdev->dev, "%s: PCI regs:\n" , __func__); |
1303 | mv_dump_mem(dev: &pdev->dev, start: mmio_base+0xc00, bytes: 0x3c); |
1304 | mv_dump_mem(dev: &pdev->dev, start: mmio_base+0xd00, bytes: 0x34); |
1305 | mv_dump_mem(dev: &pdev->dev, start: mmio_base+0xf00, bytes: 0x4); |
1306 | mv_dump_mem(dev: &pdev->dev, start: mmio_base+0x1d00, bytes: 0x6c); |
1307 | for (hc = start_hc; hc < start_hc + num_hcs; hc++) { |
1308 | hc_base = mv_hc_base(base: mmio_base, hc); |
1309 | dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n" , __func__, hc); |
1310 | mv_dump_mem(dev: &pdev->dev, start: hc_base, bytes: 0x1c); |
1311 | } |
1312 | for (p = start_port; p < start_port + num_ports; p++) { |
1313 | port_base = mv_port_base(base: mmio_base, port: p); |
1314 | dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n" , __func__, p); |
1315 | mv_dump_mem(dev: &pdev->dev, start: port_base, bytes: 0x54); |
1316 | dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n" , __func__, p); |
1317 | mv_dump_mem(dev: &pdev->dev, start: port_base+0x300, bytes: 0x60); |
1318 | } |
1319 | } |
1320 | |
1321 | static unsigned int mv_scr_offset(unsigned int sc_reg_in) |
1322 | { |
1323 | unsigned int ofs; |
1324 | |
1325 | switch (sc_reg_in) { |
1326 | case SCR_STATUS: |
1327 | case SCR_CONTROL: |
1328 | case SCR_ERROR: |
1329 | ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); |
1330 | break; |
1331 | case SCR_ACTIVE: |
1332 | ofs = SATA_ACTIVE; /* active is not with the others */ |
1333 | break; |
1334 | default: |
1335 | ofs = 0xffffffffU; |
1336 | break; |
1337 | } |
1338 | return ofs; |
1339 | } |
1340 | |
1341 | static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) |
1342 | { |
1343 | unsigned int ofs = mv_scr_offset(sc_reg_in); |
1344 | |
1345 | if (ofs != 0xffffffffU) { |
1346 | *val = readl(addr: mv_ap_base(ap: link->ap) + ofs); |
1347 | return 0; |
1348 | } else |
1349 | return -EINVAL; |
1350 | } |
1351 | |
1352 | static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
1353 | { |
1354 | unsigned int ofs = mv_scr_offset(sc_reg_in); |
1355 | |
1356 | if (ofs != 0xffffffffU) { |
1357 | void __iomem *addr = mv_ap_base(ap: link->ap) + ofs; |
1358 | struct mv_host_priv *hpriv = link->ap->host->private_data; |
1359 | if (sc_reg_in == SCR_CONTROL) { |
1360 | /* |
1361 | * Workaround for 88SX60x1 FEr SATA#26: |
1362 | * |
1363 | * COMRESETs have to take care not to accidentally |
1364 | * put the drive to sleep when writing SCR_CONTROL. |
1365 | * Setting bits 12..15 prevents this problem. |
1366 | * |
1367 | * So if we see an outbound COMMRESET, set those bits. |
1368 | * Ditto for the followup write that clears the reset. |
1369 | * |
1370 | * The proprietary driver does this for |
1371 | * all chip versions, and so do we. |
1372 | */ |
1373 | if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) |
1374 | val |= 0xf000; |
1375 | |
1376 | if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) { |
1377 | void __iomem *lp_phy_addr = |
1378 | mv_ap_base(ap: link->ap) + LP_PHY_CTL; |
1379 | /* |
1380 | * Set PHY speed according to SControl speed. |
1381 | */ |
1382 | u32 lp_phy_val = |
1383 | LP_PHY_CTL_PIN_PU_PLL | |
1384 | LP_PHY_CTL_PIN_PU_RX | |
1385 | LP_PHY_CTL_PIN_PU_TX; |
1386 | |
1387 | if ((val & 0xf0) != 0x10) |
1388 | lp_phy_val |= |
1389 | LP_PHY_CTL_GEN_TX_3G | |
1390 | LP_PHY_CTL_GEN_RX_3G; |
1391 | |
1392 | writelfl(data: lp_phy_val, addr: lp_phy_addr); |
1393 | } |
1394 | } |
1395 | writelfl(data: val, addr); |
1396 | return 0; |
1397 | } else |
1398 | return -EINVAL; |
1399 | } |
1400 | |
1401 | static void mv6_dev_config(struct ata_device *adev) |
1402 | { |
1403 | /* |
1404 | * Deal with Gen-II ("mv6") hardware quirks/restrictions: |
1405 | * |
1406 | * Gen-II does not support NCQ over a port multiplier |
1407 | * (no FIS-based switching). |
1408 | */ |
1409 | if (adev->flags & ATA_DFLAG_NCQ) { |
1410 | if (sata_pmp_attached(ap: adev->link->ap)) { |
1411 | adev->flags &= ~ATA_DFLAG_NCQ; |
1412 | ata_dev_info(adev, |
1413 | "NCQ disabled for command-based switching\n" ); |
1414 | } |
1415 | } |
1416 | } |
1417 | |
1418 | static int mv_qc_defer(struct ata_queued_cmd *qc) |
1419 | { |
1420 | struct ata_link *link = qc->dev->link; |
1421 | struct ata_port *ap = link->ap; |
1422 | struct mv_port_priv *pp = ap->private_data; |
1423 | |
1424 | /* |
1425 | * Don't allow new commands if we're in a delayed EH state |
1426 | * for NCQ and/or FIS-based switching. |
1427 | */ |
1428 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) |
1429 | return ATA_DEFER_PORT; |
1430 | |
1431 | /* PIO commands need exclusive link: no other commands [DMA or PIO] |
1432 | * can run concurrently. |
1433 | * set excl_link when we want to send a PIO command in DMA mode |
1434 | * or a non-NCQ command in NCQ mode. |
1435 | * When we receive a command from that link, and there are no |
1436 | * outstanding commands, mark a flag to clear excl_link and let |
1437 | * the command go through. |
1438 | */ |
1439 | if (unlikely(ap->excl_link)) { |
1440 | if (link == ap->excl_link) { |
1441 | if (ap->nr_active_links) |
1442 | return ATA_DEFER_PORT; |
1443 | qc->flags |= ATA_QCFLAG_CLEAR_EXCL; |
1444 | return 0; |
1445 | } else |
1446 | return ATA_DEFER_PORT; |
1447 | } |
1448 | |
1449 | /* |
1450 | * If the port is completely idle, then allow the new qc. |
1451 | */ |
1452 | if (ap->nr_active_links == 0) |
1453 | return 0; |
1454 | |
1455 | /* |
1456 | * The port is operating in host queuing mode (EDMA) with NCQ |
1457 | * enabled, allow multiple NCQ commands. EDMA also allows |
1458 | * queueing multiple DMA commands but libata core currently |
1459 | * doesn't allow it. |
1460 | */ |
1461 | if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && |
1462 | (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { |
1463 | if (ata_is_ncq(prot: qc->tf.protocol)) |
1464 | return 0; |
1465 | else { |
1466 | ap->excl_link = link; |
1467 | return ATA_DEFER_PORT; |
1468 | } |
1469 | } |
1470 | |
1471 | return ATA_DEFER_PORT; |
1472 | } |
1473 | |
1474 | static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) |
1475 | { |
1476 | struct mv_port_priv *pp = ap->private_data; |
1477 | void __iomem *port_mmio; |
1478 | |
1479 | u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; |
1480 | u32 ltmode, *old_ltmode = &pp->cached.ltmode; |
1481 | u32 haltcond, *old_haltcond = &pp->cached.haltcond; |
1482 | |
1483 | ltmode = *old_ltmode & ~LTMODE_BIT8; |
1484 | haltcond = *old_haltcond | EDMA_ERR_DEV; |
1485 | |
1486 | if (want_fbs) { |
1487 | fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; |
1488 | ltmode = *old_ltmode | LTMODE_BIT8; |
1489 | if (want_ncq) |
1490 | haltcond &= ~EDMA_ERR_DEV; |
1491 | else |
1492 | fiscfg |= FISCFG_WAIT_DEV_ERR; |
1493 | } else { |
1494 | fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); |
1495 | } |
1496 | |
1497 | port_mmio = mv_ap_base(ap); |
1498 | mv_write_cached_reg(addr: port_mmio + FISCFG, old: old_fiscfg, new: fiscfg); |
1499 | mv_write_cached_reg(addr: port_mmio + LTMODE, old: old_ltmode, new: ltmode); |
1500 | mv_write_cached_reg(addr: port_mmio + EDMA_HALTCOND, old: old_haltcond, new: haltcond); |
1501 | } |
1502 | |
1503 | static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) |
1504 | { |
1505 | struct mv_host_priv *hpriv = ap->host->private_data; |
1506 | u32 old, new; |
1507 | |
1508 | /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ |
1509 | old = readl(addr: hpriv->base + GPIO_PORT_CTL); |
1510 | if (want_ncq) |
1511 | new = old | (1 << 22); |
1512 | else |
1513 | new = old & ~(1 << 22); |
1514 | if (new != old) |
1515 | writel(val: new, addr: hpriv->base + GPIO_PORT_CTL); |
1516 | } |
1517 | |
1518 | /* |
1519 | * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma |
1520 | * @ap: Port being initialized |
1521 | * |
1522 | * There are two DMA modes on these chips: basic DMA, and EDMA. |
1523 | * |
1524 | * Bit-0 of the "EDMA RESERVED" register enables/disables use |
1525 | * of basic DMA on the GEN_IIE versions of the chips. |
1526 | * |
1527 | * This bit survives EDMA resets, and must be set for basic DMA |
1528 | * to function, and should be cleared when EDMA is active. |
1529 | */ |
1530 | static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) |
1531 | { |
1532 | struct mv_port_priv *pp = ap->private_data; |
1533 | u32 new, *old = &pp->cached.unknown_rsvd; |
1534 | |
1535 | if (enable_bmdma) |
1536 | new = *old | 1; |
1537 | else |
1538 | new = *old & ~1; |
1539 | mv_write_cached_reg(addr: mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); |
1540 | } |
1541 | |
1542 | /* |
1543 | * SOC chips have an issue whereby the HDD LEDs don't always blink |
1544 | * during I/O when NCQ is enabled. Enabling a special "LED blink" mode |
1545 | * of the SOC takes care of it, generating a steady blink rate when |
1546 | * any drive on the chip is active. |
1547 | * |
1548 | * Unfortunately, the blink mode is a global hardware setting for the SOC, |
1549 | * so we must use it whenever at least one port on the SOC has NCQ enabled. |
1550 | * |
1551 | * We turn "LED blink" off when NCQ is not in use anywhere, because the normal |
1552 | * LED operation works then, and provides better (more accurate) feedback. |
1553 | * |
1554 | * Note that this code assumes that an SOC never has more than one HC onboard. |
1555 | */ |
1556 | static void mv_soc_led_blink_enable(struct ata_port *ap) |
1557 | { |
1558 | struct ata_host *host = ap->host; |
1559 | struct mv_host_priv *hpriv = host->private_data; |
1560 | void __iomem *hc_mmio; |
1561 | u32 led_ctrl; |
1562 | |
1563 | if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) |
1564 | return; |
1565 | hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; |
1566 | hc_mmio = mv_hc_base_from_port(base: mv_host_base(host), port: ap->port_no); |
1567 | led_ctrl = readl(addr: hc_mmio + SOC_LED_CTRL); |
1568 | writel(val: led_ctrl | SOC_LED_CTRL_BLINK, addr: hc_mmio + SOC_LED_CTRL); |
1569 | } |
1570 | |
1571 | static void mv_soc_led_blink_disable(struct ata_port *ap) |
1572 | { |
1573 | struct ata_host *host = ap->host; |
1574 | struct mv_host_priv *hpriv = host->private_data; |
1575 | void __iomem *hc_mmio; |
1576 | u32 led_ctrl; |
1577 | unsigned int port; |
1578 | |
1579 | if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) |
1580 | return; |
1581 | |
1582 | /* disable led-blink only if no ports are using NCQ */ |
1583 | for (port = 0; port < hpriv->n_ports; port++) { |
1584 | struct ata_port *this_ap = host->ports[port]; |
1585 | struct mv_port_priv *pp = this_ap->private_data; |
1586 | |
1587 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) |
1588 | return; |
1589 | } |
1590 | |
1591 | hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; |
1592 | hc_mmio = mv_hc_base_from_port(base: mv_host_base(host), port: ap->port_no); |
1593 | led_ctrl = readl(addr: hc_mmio + SOC_LED_CTRL); |
1594 | writel(val: led_ctrl & ~SOC_LED_CTRL_BLINK, addr: hc_mmio + SOC_LED_CTRL); |
1595 | } |
1596 | |
1597 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) |
1598 | { |
1599 | u32 cfg; |
1600 | struct mv_port_priv *pp = ap->private_data; |
1601 | struct mv_host_priv *hpriv = ap->host->private_data; |
1602 | void __iomem *port_mmio = mv_ap_base(ap); |
1603 | |
1604 | /* set up non-NCQ EDMA configuration */ |
1605 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
1606 | pp->pp_flags &= |
1607 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); |
1608 | |
1609 | if (IS_GEN_I(hpriv)) |
1610 | cfg |= (1 << 8); /* enab config burst size mask */ |
1611 | |
1612 | else if (IS_GEN_II(hpriv)) { |
1613 | cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; |
1614 | mv_60x1_errata_sata25(ap, want_ncq); |
1615 | |
1616 | } else if (IS_GEN_IIE(hpriv)) { |
1617 | int want_fbs = sata_pmp_attached(ap); |
1618 | /* |
1619 | * Possible future enhancement: |
1620 | * |
1621 | * The chip can use FBS with non-NCQ, if we allow it, |
1622 | * But first we need to have the error handling in place |
1623 | * for this mode (datasheet section 7.3.15.4.2.3). |
1624 | * So disallow non-NCQ FBS for now. |
1625 | */ |
1626 | want_fbs &= want_ncq; |
1627 | |
1628 | mv_config_fbs(ap, want_ncq, want_fbs); |
1629 | |
1630 | if (want_fbs) { |
1631 | pp->pp_flags |= MV_PP_FLAG_FBS_EN; |
1632 | cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ |
1633 | } |
1634 | |
1635 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
1636 | if (want_edma) { |
1637 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ |
1638 | if (!IS_SOC(hpriv)) |
1639 | cfg |= (1 << 18); /* enab early completion */ |
1640 | } |
1641 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1642 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ |
1643 | mv_bmdma_enable_iie(ap, enable_bmdma: !want_edma); |
1644 | |
1645 | if (IS_SOC(hpriv)) { |
1646 | if (want_ncq) |
1647 | mv_soc_led_blink_enable(ap); |
1648 | else |
1649 | mv_soc_led_blink_disable(ap); |
1650 | } |
1651 | } |
1652 | |
1653 | if (want_ncq) { |
1654 | cfg |= EDMA_CFG_NCQ; |
1655 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; |
1656 | } |
1657 | |
1658 | writelfl(data: cfg, addr: port_mmio + EDMA_CFG); |
1659 | } |
1660 | |
1661 | static void mv_port_free_dma_mem(struct ata_port *ap) |
1662 | { |
1663 | struct mv_host_priv *hpriv = ap->host->private_data; |
1664 | struct mv_port_priv *pp = ap->private_data; |
1665 | int tag; |
1666 | |
1667 | if (pp->crqb) { |
1668 | dma_pool_free(pool: hpriv->crqb_pool, vaddr: pp->crqb, addr: pp->crqb_dma); |
1669 | pp->crqb = NULL; |
1670 | } |
1671 | if (pp->crpb) { |
1672 | dma_pool_free(pool: hpriv->crpb_pool, vaddr: pp->crpb, addr: pp->crpb_dma); |
1673 | pp->crpb = NULL; |
1674 | } |
1675 | /* |
1676 | * For GEN_I, there's no NCQ, so we have only a single sg_tbl. |
1677 | * For later hardware, we have one unique sg_tbl per NCQ tag. |
1678 | */ |
1679 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { |
1680 | if (pp->sg_tbl[tag]) { |
1681 | if (tag == 0 || !IS_GEN_I(hpriv)) |
1682 | dma_pool_free(pool: hpriv->sg_tbl_pool, |
1683 | vaddr: pp->sg_tbl[tag], |
1684 | addr: pp->sg_tbl_dma[tag]); |
1685 | pp->sg_tbl[tag] = NULL; |
1686 | } |
1687 | } |
1688 | } |
1689 | |
1690 | /** |
1691 | * mv_port_start - Port specific init/start routine. |
1692 | * @ap: ATA channel to manipulate |
1693 | * |
1694 | * Allocate and point to DMA memory, init port private memory, |
1695 | * zero indices. |
1696 | * |
1697 | * LOCKING: |
1698 | * Inherited from caller. |
1699 | */ |
1700 | static int mv_port_start(struct ata_port *ap) |
1701 | { |
1702 | struct device *dev = ap->host->dev; |
1703 | struct mv_host_priv *hpriv = ap->host->private_data; |
1704 | struct mv_port_priv *pp; |
1705 | unsigned long flags; |
1706 | int tag; |
1707 | |
1708 | pp = devm_kzalloc(dev, size: sizeof(*pp), GFP_KERNEL); |
1709 | if (!pp) |
1710 | return -ENOMEM; |
1711 | ap->private_data = pp; |
1712 | |
1713 | pp->crqb = dma_pool_zalloc(pool: hpriv->crqb_pool, GFP_KERNEL, handle: &pp->crqb_dma); |
1714 | if (!pp->crqb) |
1715 | return -ENOMEM; |
1716 | |
1717 | pp->crpb = dma_pool_zalloc(pool: hpriv->crpb_pool, GFP_KERNEL, handle: &pp->crpb_dma); |
1718 | if (!pp->crpb) |
1719 | goto out_port_free_dma_mem; |
1720 | |
1721 | /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ |
1722 | if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) |
1723 | ap->flags |= ATA_FLAG_AN; |
1724 | /* |
1725 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. |
1726 | * For later hardware, we need one unique sg_tbl per NCQ tag. |
1727 | */ |
1728 | for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { |
1729 | if (tag == 0 || !IS_GEN_I(hpriv)) { |
1730 | pp->sg_tbl[tag] = dma_pool_alloc(pool: hpriv->sg_tbl_pool, |
1731 | GFP_KERNEL, handle: &pp->sg_tbl_dma[tag]); |
1732 | if (!pp->sg_tbl[tag]) |
1733 | goto out_port_free_dma_mem; |
1734 | } else { |
1735 | pp->sg_tbl[tag] = pp->sg_tbl[0]; |
1736 | pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; |
1737 | } |
1738 | } |
1739 | |
1740 | spin_lock_irqsave(ap->lock, flags); |
1741 | mv_save_cached_regs(ap); |
1742 | mv_edma_cfg(ap, want_ncq: 0, want_edma: 0); |
1743 | spin_unlock_irqrestore(lock: ap->lock, flags); |
1744 | |
1745 | return 0; |
1746 | |
1747 | out_port_free_dma_mem: |
1748 | mv_port_free_dma_mem(ap); |
1749 | return -ENOMEM; |
1750 | } |
1751 | |
1752 | /** |
1753 | * mv_port_stop - Port specific cleanup/stop routine. |
1754 | * @ap: ATA channel to manipulate |
1755 | * |
1756 | * Stop DMA, cleanup port memory. |
1757 | * |
1758 | * LOCKING: |
1759 | * This routine uses the host lock to protect the DMA stop. |
1760 | */ |
1761 | static void mv_port_stop(struct ata_port *ap) |
1762 | { |
1763 | unsigned long flags; |
1764 | |
1765 | spin_lock_irqsave(ap->lock, flags); |
1766 | mv_stop_edma(ap); |
1767 | mv_enable_port_irqs(ap, port_bits: 0); |
1768 | spin_unlock_irqrestore(lock: ap->lock, flags); |
1769 | mv_port_free_dma_mem(ap); |
1770 | } |
1771 | |
1772 | /** |
1773 | * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries |
1774 | * @qc: queued command whose SG list to source from |
1775 | * |
1776 | * Populate the SG list and mark the last entry. |
1777 | * |
1778 | * LOCKING: |
1779 | * Inherited from caller. |
1780 | */ |
1781 | static void mv_fill_sg(struct ata_queued_cmd *qc) |
1782 | { |
1783 | struct mv_port_priv *pp = qc->ap->private_data; |
1784 | struct scatterlist *sg; |
1785 | struct mv_sg *mv_sg, *last_sg = NULL; |
1786 | unsigned int si; |
1787 | |
1788 | mv_sg = pp->sg_tbl[qc->hw_tag]; |
1789 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
1790 | dma_addr_t addr = sg_dma_address(sg); |
1791 | u32 sg_len = sg_dma_len(sg); |
1792 | |
1793 | while (sg_len) { |
1794 | u32 offset = addr & 0xffff; |
1795 | u32 len = sg_len; |
1796 | |
1797 | if (offset + len > 0x10000) |
1798 | len = 0x10000 - offset; |
1799 | |
1800 | mv_sg->addr = cpu_to_le32(addr & 0xffffffff); |
1801 | mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); |
1802 | mv_sg->flags_size = cpu_to_le32(len & 0xffff); |
1803 | mv_sg->reserved = 0; |
1804 | |
1805 | sg_len -= len; |
1806 | addr += len; |
1807 | |
1808 | last_sg = mv_sg; |
1809 | mv_sg++; |
1810 | } |
1811 | } |
1812 | |
1813 | if (likely(last_sg)) |
1814 | last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); |
1815 | mb(); /* ensure data structure is visible to the chipset */ |
1816 | } |
1817 | |
1818 | static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) |
1819 | { |
1820 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
1821 | (last ? CRQB_CMD_LAST : 0); |
1822 | *cmdw = cpu_to_le16(tmp); |
1823 | } |
1824 | |
1825 | /** |
1826 | * mv_sff_irq_clear - Clear hardware interrupt after DMA. |
1827 | * @ap: Port associated with this ATA transaction. |
1828 | * |
1829 | * We need this only for ATAPI bmdma transactions, |
1830 | * as otherwise we experience spurious interrupts |
1831 | * after libata-sff handles the bmdma interrupts. |
1832 | */ |
1833 | static void mv_sff_irq_clear(struct ata_port *ap) |
1834 | { |
1835 | mv_clear_and_enable_port_irqs(ap, port_mmio: mv_ap_base(ap), port_irqs: ERR_IRQ); |
1836 | } |
1837 | |
1838 | /** |
1839 | * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. |
1840 | * @qc: queued command to check for chipset/DMA compatibility. |
1841 | * |
1842 | * The bmdma engines cannot handle speculative data sizes |
1843 | * (bytecount under/over flow). So only allow DMA for |
1844 | * data transfer commands with known data sizes. |
1845 | * |
1846 | * LOCKING: |
1847 | * Inherited from caller. |
1848 | */ |
1849 | static int mv_check_atapi_dma(struct ata_queued_cmd *qc) |
1850 | { |
1851 | struct scsi_cmnd *scmd = qc->scsicmd; |
1852 | |
1853 | if (scmd) { |
1854 | switch (scmd->cmnd[0]) { |
1855 | case READ_6: |
1856 | case READ_10: |
1857 | case READ_12: |
1858 | case WRITE_6: |
1859 | case WRITE_10: |
1860 | case WRITE_12: |
1861 | case GPCMD_READ_CD: |
1862 | case GPCMD_SEND_DVD_STRUCTURE: |
1863 | case GPCMD_SEND_CUE_SHEET: |
1864 | return 0; /* DMA is safe */ |
1865 | } |
1866 | } |
1867 | return -EOPNOTSUPP; /* use PIO instead */ |
1868 | } |
1869 | |
1870 | /** |
1871 | * mv_bmdma_setup - Set up BMDMA transaction |
1872 | * @qc: queued command to prepare DMA for. |
1873 | * |
1874 | * LOCKING: |
1875 | * Inherited from caller. |
1876 | */ |
1877 | static void mv_bmdma_setup(struct ata_queued_cmd *qc) |
1878 | { |
1879 | struct ata_port *ap = qc->ap; |
1880 | void __iomem *port_mmio = mv_ap_base(ap); |
1881 | struct mv_port_priv *pp = ap->private_data; |
1882 | |
1883 | mv_fill_sg(qc); |
1884 | |
1885 | /* clear all DMA cmd bits */ |
1886 | writel(val: 0, addr: port_mmio + BMDMA_CMD); |
1887 | |
1888 | /* load PRD table addr. */ |
1889 | writel(val: (pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16, |
1890 | addr: port_mmio + BMDMA_PRD_HIGH); |
1891 | writelfl(data: pp->sg_tbl_dma[qc->hw_tag], |
1892 | addr: port_mmio + BMDMA_PRD_LOW); |
1893 | |
1894 | /* issue r/w command */ |
1895 | ap->ops->sff_exec_command(ap, &qc->tf); |
1896 | } |
1897 | |
1898 | /** |
1899 | * mv_bmdma_start - Start a BMDMA transaction |
1900 | * @qc: queued command to start DMA on. |
1901 | * |
1902 | * LOCKING: |
1903 | * Inherited from caller. |
1904 | */ |
1905 | static void mv_bmdma_start(struct ata_queued_cmd *qc) |
1906 | { |
1907 | struct ata_port *ap = qc->ap; |
1908 | void __iomem *port_mmio = mv_ap_base(ap); |
1909 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); |
1910 | u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; |
1911 | |
1912 | /* start host DMA transaction */ |
1913 | writelfl(data: cmd, addr: port_mmio + BMDMA_CMD); |
1914 | } |
1915 | |
1916 | /** |
1917 | * mv_bmdma_stop_ap - Stop BMDMA transfer |
1918 | * @ap: port to stop |
1919 | * |
1920 | * Clears the ATA_DMA_START flag in the bmdma control register |
1921 | * |
1922 | * LOCKING: |
1923 | * Inherited from caller. |
1924 | */ |
1925 | static void mv_bmdma_stop_ap(struct ata_port *ap) |
1926 | { |
1927 | void __iomem *port_mmio = mv_ap_base(ap); |
1928 | u32 cmd; |
1929 | |
1930 | /* clear start/stop bit */ |
1931 | cmd = readl(addr: port_mmio + BMDMA_CMD); |
1932 | if (cmd & ATA_DMA_START) { |
1933 | cmd &= ~ATA_DMA_START; |
1934 | writelfl(data: cmd, addr: port_mmio + BMDMA_CMD); |
1935 | |
1936 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
1937 | ata_sff_dma_pause(ap); |
1938 | } |
1939 | } |
1940 | |
1941 | static void mv_bmdma_stop(struct ata_queued_cmd *qc) |
1942 | { |
1943 | mv_bmdma_stop_ap(ap: qc->ap); |
1944 | } |
1945 | |
1946 | /** |
1947 | * mv_bmdma_status - Read BMDMA status |
1948 | * @ap: port for which to retrieve DMA status. |
1949 | * |
1950 | * Read and return equivalent of the sff BMDMA status register. |
1951 | * |
1952 | * LOCKING: |
1953 | * Inherited from caller. |
1954 | */ |
1955 | static u8 mv_bmdma_status(struct ata_port *ap) |
1956 | { |
1957 | void __iomem *port_mmio = mv_ap_base(ap); |
1958 | u32 reg, status; |
1959 | |
1960 | /* |
1961 | * Other bits are valid only if ATA_DMA_ACTIVE==0, |
1962 | * and the ATA_DMA_INTR bit doesn't exist. |
1963 | */ |
1964 | reg = readl(addr: port_mmio + BMDMA_STATUS); |
1965 | if (reg & ATA_DMA_ACTIVE) |
1966 | status = ATA_DMA_ACTIVE; |
1967 | else if (reg & ATA_DMA_ERR) |
1968 | status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; |
1969 | else { |
1970 | /* |
1971 | * Just because DMA_ACTIVE is 0 (DMA completed), |
1972 | * this does _not_ mean the device is "done". |
1973 | * So we should not yet be signalling ATA_DMA_INTR |
1974 | * in some cases. Eg. DSM/TRIM, and perhaps others. |
1975 | */ |
1976 | mv_bmdma_stop_ap(ap); |
1977 | if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) |
1978 | status = 0; |
1979 | else |
1980 | status = ATA_DMA_INTR; |
1981 | } |
1982 | return status; |
1983 | } |
1984 | |
1985 | static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) |
1986 | { |
1987 | struct ata_taskfile *tf = &qc->tf; |
1988 | /* |
1989 | * Workaround for 88SX60x1 FEr SATA#24. |
1990 | * |
1991 | * Chip may corrupt WRITEs if multi_count >= 4kB. |
1992 | * Note that READs are unaffected. |
1993 | * |
1994 | * It's not clear if this errata really means "4K bytes", |
1995 | * or if it always happens for multi_count > 7 |
1996 | * regardless of device sector_size. |
1997 | * |
1998 | * So, for safety, any write with multi_count > 7 |
1999 | * gets converted here into a regular PIO write instead: |
2000 | */ |
2001 | if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { |
2002 | if (qc->dev->multi_count > 7) { |
2003 | switch (tf->command) { |
2004 | case ATA_CMD_WRITE_MULTI: |
2005 | tf->command = ATA_CMD_PIO_WRITE; |
2006 | break; |
2007 | case ATA_CMD_WRITE_MULTI_FUA_EXT: |
2008 | tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ |
2009 | fallthrough; |
2010 | case ATA_CMD_WRITE_MULTI_EXT: |
2011 | tf->command = ATA_CMD_PIO_WRITE_EXT; |
2012 | break; |
2013 | } |
2014 | } |
2015 | } |
2016 | } |
2017 | |
2018 | /** |
2019 | * mv_qc_prep - Host specific command preparation. |
2020 | * @qc: queued command to prepare |
2021 | * |
2022 | * This routine simply redirects to the general purpose routine |
2023 | * if command is not DMA. Else, it handles prep of the CRQB |
2024 | * (command request block), does some sanity checking, and calls |
2025 | * the SG load routine. |
2026 | * |
2027 | * LOCKING: |
2028 | * Inherited from caller. |
2029 | */ |
2030 | static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) |
2031 | { |
2032 | struct ata_port *ap = qc->ap; |
2033 | struct mv_port_priv *pp = ap->private_data; |
2034 | __le16 *cw; |
2035 | struct ata_taskfile *tf = &qc->tf; |
2036 | u16 flags = 0; |
2037 | unsigned in_index; |
2038 | |
2039 | switch (tf->protocol) { |
2040 | case ATA_PROT_DMA: |
2041 | if (tf->command == ATA_CMD_DSM) |
2042 | return AC_ERR_OK; |
2043 | fallthrough; |
2044 | case ATA_PROT_NCQ: |
2045 | break; /* continue below */ |
2046 | case ATA_PROT_PIO: |
2047 | mv_rw_multi_errata_sata24(qc); |
2048 | return AC_ERR_OK; |
2049 | default: |
2050 | return AC_ERR_OK; |
2051 | } |
2052 | |
2053 | /* Fill in command request block |
2054 | */ |
2055 | if (!(tf->flags & ATA_TFLAG_WRITE)) |
2056 | flags |= CRQB_FLAG_READ; |
2057 | WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag); |
2058 | flags |= qc->hw_tag << CRQB_TAG_SHIFT; |
2059 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
2060 | |
2061 | /* get current queue index from software */ |
2062 | in_index = pp->req_idx; |
2063 | |
2064 | pp->crqb[in_index].sg_addr = |
2065 | cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); |
2066 | pp->crqb[in_index].sg_addr_hi = |
2067 | cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); |
2068 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); |
2069 | |
2070 | cw = &pp->crqb[in_index].ata_cmd[0]; |
2071 | |
2072 | /* Sadly, the CRQB cannot accommodate all registers--there are |
2073 | * only 11 bytes...so we must pick and choose required |
2074 | * registers based on the command. So, we drop feature and |
2075 | * hob_feature for [RW] DMA commands, but they are needed for |
2076 | * NCQ. NCQ will drop hob_nsect, which is not needed there |
2077 | * (nsect is used only for the tag; feat/hob_feat hold true nsect). |
2078 | */ |
2079 | switch (tf->command) { |
2080 | case ATA_CMD_READ: |
2081 | case ATA_CMD_READ_EXT: |
2082 | case ATA_CMD_WRITE: |
2083 | case ATA_CMD_WRITE_EXT: |
2084 | case ATA_CMD_WRITE_FUA_EXT: |
2085 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->hob_nsect, addr: ATA_REG_NSECT, last: 0); |
2086 | break; |
2087 | case ATA_CMD_FPDMA_READ: |
2088 | case ATA_CMD_FPDMA_WRITE: |
2089 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->hob_feature, addr: ATA_REG_FEATURE, last: 0); |
2090 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->feature, addr: ATA_REG_FEATURE, last: 0); |
2091 | break; |
2092 | default: |
2093 | /* The only other commands EDMA supports in non-queued and |
2094 | * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none |
2095 | * of which are defined/used by Linux. If we get here, this |
2096 | * driver needs work. |
2097 | */ |
2098 | ata_port_err(ap, "%s: unsupported command: %.2x\n" , __func__, |
2099 | tf->command); |
2100 | return AC_ERR_INVALID; |
2101 | } |
2102 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->nsect, addr: ATA_REG_NSECT, last: 0); |
2103 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->hob_lbal, addr: ATA_REG_LBAL, last: 0); |
2104 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->lbal, addr: ATA_REG_LBAL, last: 0); |
2105 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->hob_lbam, addr: ATA_REG_LBAM, last: 0); |
2106 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->lbam, addr: ATA_REG_LBAM, last: 0); |
2107 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->hob_lbah, addr: ATA_REG_LBAH, last: 0); |
2108 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->lbah, addr: ATA_REG_LBAH, last: 0); |
2109 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->device, addr: ATA_REG_DEVICE, last: 0); |
2110 | mv_crqb_pack_cmd(cmdw: cw++, data: tf->command, addr: ATA_REG_CMD, last: 1); /* last */ |
2111 | |
2112 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2113 | return AC_ERR_OK; |
2114 | mv_fill_sg(qc); |
2115 | |
2116 | return AC_ERR_OK; |
2117 | } |
2118 | |
2119 | /** |
2120 | * mv_qc_prep_iie - Host specific command preparation. |
2121 | * @qc: queued command to prepare |
2122 | * |
2123 | * This routine simply redirects to the general purpose routine |
2124 | * if command is not DMA. Else, it handles prep of the CRQB |
2125 | * (command request block), does some sanity checking, and calls |
2126 | * the SG load routine. |
2127 | * |
2128 | * LOCKING: |
2129 | * Inherited from caller. |
2130 | */ |
2131 | static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) |
2132 | { |
2133 | struct ata_port *ap = qc->ap; |
2134 | struct mv_port_priv *pp = ap->private_data; |
2135 | struct mv_crqb_iie *crqb; |
2136 | struct ata_taskfile *tf = &qc->tf; |
2137 | unsigned in_index; |
2138 | u32 flags = 0; |
2139 | |
2140 | if ((tf->protocol != ATA_PROT_DMA) && |
2141 | (tf->protocol != ATA_PROT_NCQ)) |
2142 | return AC_ERR_OK; |
2143 | if (tf->command == ATA_CMD_DSM) |
2144 | return AC_ERR_OK; /* use bmdma for this */ |
2145 | |
2146 | /* Fill in Gen IIE command request block */ |
2147 | if (!(tf->flags & ATA_TFLAG_WRITE)) |
2148 | flags |= CRQB_FLAG_READ; |
2149 | |
2150 | WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag); |
2151 | flags |= qc->hw_tag << CRQB_TAG_SHIFT; |
2152 | flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT; |
2153 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
2154 | |
2155 | /* get current queue index from software */ |
2156 | in_index = pp->req_idx; |
2157 | |
2158 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; |
2159 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); |
2160 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); |
2161 | crqb->flags = cpu_to_le32(flags); |
2162 | |
2163 | crqb->ata_cmd[0] = cpu_to_le32( |
2164 | (tf->command << 16) | |
2165 | (tf->feature << 24) |
2166 | ); |
2167 | crqb->ata_cmd[1] = cpu_to_le32( |
2168 | (tf->lbal << 0) | |
2169 | (tf->lbam << 8) | |
2170 | (tf->lbah << 16) | |
2171 | (tf->device << 24) |
2172 | ); |
2173 | crqb->ata_cmd[2] = cpu_to_le32( |
2174 | (tf->hob_lbal << 0) | |
2175 | (tf->hob_lbam << 8) | |
2176 | (tf->hob_lbah << 16) | |
2177 | (tf->hob_feature << 24) |
2178 | ); |
2179 | crqb->ata_cmd[3] = cpu_to_le32( |
2180 | (tf->nsect << 0) | |
2181 | (tf->hob_nsect << 8) |
2182 | ); |
2183 | |
2184 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2185 | return AC_ERR_OK; |
2186 | mv_fill_sg(qc); |
2187 | |
2188 | return AC_ERR_OK; |
2189 | } |
2190 | |
2191 | /** |
2192 | * mv_sff_check_status - fetch device status, if valid |
2193 | * @ap: ATA port to fetch status from |
2194 | * |
2195 | * When using command issue via mv_qc_issue_fis(), |
2196 | * the initial ATA_BUSY state does not show up in the |
2197 | * ATA status (shadow) register. This can confuse libata! |
2198 | * |
2199 | * So we have a hook here to fake ATA_BUSY for that situation, |
2200 | * until the first time a BUSY, DRQ, or ERR bit is seen. |
2201 | * |
2202 | * The rest of the time, it simply returns the ATA status register. |
2203 | */ |
2204 | static u8 mv_sff_check_status(struct ata_port *ap) |
2205 | { |
2206 | u8 stat = ioread8(ap->ioaddr.status_addr); |
2207 | struct mv_port_priv *pp = ap->private_data; |
2208 | |
2209 | if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { |
2210 | if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) |
2211 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; |
2212 | else |
2213 | stat = ATA_BUSY; |
2214 | } |
2215 | return stat; |
2216 | } |
2217 | |
2218 | /** |
2219 | * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register |
2220 | * @ap: ATA port to send a FIS |
2221 | * @fis: fis to be sent |
2222 | * @nwords: number of 32-bit words in the fis |
2223 | */ |
2224 | static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) |
2225 | { |
2226 | void __iomem *port_mmio = mv_ap_base(ap); |
2227 | u32 ifctl, old_ifctl, ifstat; |
2228 | int i, timeout = 200, final_word = nwords - 1; |
2229 | |
2230 | /* Initiate FIS transmission mode */ |
2231 | old_ifctl = readl(addr: port_mmio + SATA_IFCTL); |
2232 | ifctl = 0x100 | (old_ifctl & 0xf); |
2233 | writelfl(data: ifctl, addr: port_mmio + SATA_IFCTL); |
2234 | |
2235 | /* Send all words of the FIS except for the final word */ |
2236 | for (i = 0; i < final_word; ++i) |
2237 | writel(val: fis[i], addr: port_mmio + VENDOR_UNIQUE_FIS); |
2238 | |
2239 | /* Flag end-of-transmission, and then send the final word */ |
2240 | writelfl(data: ifctl | 0x200, addr: port_mmio + SATA_IFCTL); |
2241 | writelfl(data: fis[final_word], addr: port_mmio + VENDOR_UNIQUE_FIS); |
2242 | |
2243 | /* |
2244 | * Wait for FIS transmission to complete. |
2245 | * This typically takes just a single iteration. |
2246 | */ |
2247 | do { |
2248 | ifstat = readl(addr: port_mmio + SATA_IFSTAT); |
2249 | } while (!(ifstat & 0x1000) && --timeout); |
2250 | |
2251 | /* Restore original port configuration */ |
2252 | writelfl(data: old_ifctl, addr: port_mmio + SATA_IFCTL); |
2253 | |
2254 | /* See if it worked */ |
2255 | if ((ifstat & 0x3000) != 0x1000) { |
2256 | ata_port_warn(ap, "%s transmission error, ifstat=%08x\n" , |
2257 | __func__, ifstat); |
2258 | return AC_ERR_OTHER; |
2259 | } |
2260 | return 0; |
2261 | } |
2262 | |
2263 | /** |
2264 | * mv_qc_issue_fis - Issue a command directly as a FIS |
2265 | * @qc: queued command to start |
2266 | * |
2267 | * Note that the ATA shadow registers are not updated |
2268 | * after command issue, so the device will appear "READY" |
2269 | * if polled, even while it is BUSY processing the command. |
2270 | * |
2271 | * So we use a status hook to fake ATA_BUSY until the drive changes state. |
2272 | * |
2273 | * Note: we don't get updated shadow regs on *completion* |
2274 | * of non-data commands. So avoid sending them via this function, |
2275 | * as they will appear to have completed immediately. |
2276 | * |
2277 | * GEN_IIE has special registers that we could get the result tf from, |
2278 | * but earlier chipsets do not. For now, we ignore those registers. |
2279 | */ |
2280 | static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) |
2281 | { |
2282 | struct ata_port *ap = qc->ap; |
2283 | struct mv_port_priv *pp = ap->private_data; |
2284 | struct ata_link *link = qc->dev->link; |
2285 | u32 fis[5]; |
2286 | int err = 0; |
2287 | |
2288 | ata_tf_to_fis(tf: &qc->tf, pmp: link->pmp, is_cmd: 1, fis: (void *)fis); |
2289 | err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); |
2290 | if (err) |
2291 | return err; |
2292 | |
2293 | switch (qc->tf.protocol) { |
2294 | case ATAPI_PROT_PIO: |
2295 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; |
2296 | fallthrough; |
2297 | case ATAPI_PROT_NODATA: |
2298 | ap->hsm_task_state = HSM_ST_FIRST; |
2299 | break; |
2300 | case ATA_PROT_PIO: |
2301 | pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; |
2302 | if (qc->tf.flags & ATA_TFLAG_WRITE) |
2303 | ap->hsm_task_state = HSM_ST_FIRST; |
2304 | else |
2305 | ap->hsm_task_state = HSM_ST; |
2306 | break; |
2307 | default: |
2308 | ap->hsm_task_state = HSM_ST_LAST; |
2309 | break; |
2310 | } |
2311 | |
2312 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
2313 | ata_sff_queue_pio_task(link, delay: 0); |
2314 | return 0; |
2315 | } |
2316 | |
2317 | /** |
2318 | * mv_qc_issue - Initiate a command to the host |
2319 | * @qc: queued command to start |
2320 | * |
2321 | * This routine simply redirects to the general purpose routine |
2322 | * if command is not DMA. Else, it sanity checks our local |
2323 | * caches of the request producer/consumer indices then enables |
2324 | * DMA and bumps the request producer index. |
2325 | * |
2326 | * LOCKING: |
2327 | * Inherited from caller. |
2328 | */ |
2329 | static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) |
2330 | { |
2331 | static int limit_warnings = 10; |
2332 | struct ata_port *ap = qc->ap; |
2333 | void __iomem *port_mmio = mv_ap_base(ap); |
2334 | struct mv_port_priv *pp = ap->private_data; |
2335 | u32 in_index; |
2336 | unsigned int port_irqs; |
2337 | |
2338 | pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ |
2339 | |
2340 | switch (qc->tf.protocol) { |
2341 | case ATA_PROT_DMA: |
2342 | if (qc->tf.command == ATA_CMD_DSM) { |
2343 | if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ |
2344 | return AC_ERR_OTHER; |
2345 | break; /* use bmdma for this */ |
2346 | } |
2347 | fallthrough; |
2348 | case ATA_PROT_NCQ: |
2349 | mv_start_edma(ap, port_mmio, pp, protocol: qc->tf.protocol); |
2350 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; |
2351 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; |
2352 | |
2353 | /* Write the request in pointer to kick the EDMA to life */ |
2354 | writelfl(data: (pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, |
2355 | addr: port_mmio + EDMA_REQ_Q_IN_PTR); |
2356 | return 0; |
2357 | |
2358 | case ATA_PROT_PIO: |
2359 | /* |
2360 | * Errata SATA#16, SATA#24: warn if multiple DRQs expected. |
2361 | * |
2362 | * Someday, we might implement special polling workarounds |
2363 | * for these, but it all seems rather unnecessary since we |
2364 | * normally use only DMA for commands which transfer more |
2365 | * than a single block of data. |
2366 | * |
2367 | * Much of the time, this could just work regardless. |
2368 | * So for now, just log the incident, and allow the attempt. |
2369 | */ |
2370 | if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { |
2371 | --limit_warnings; |
2372 | ata_link_warn(qc->dev->link, DRV_NAME |
2373 | ": attempting PIO w/multiple DRQ: " |
2374 | "this may fail due to h/w errata\n" ); |
2375 | } |
2376 | fallthrough; |
2377 | case ATA_PROT_NODATA: |
2378 | case ATAPI_PROT_PIO: |
2379 | case ATAPI_PROT_NODATA: |
2380 | if (ap->flags & ATA_FLAG_PIO_POLLING) |
2381 | qc->tf.flags |= ATA_TFLAG_POLLING; |
2382 | break; |
2383 | } |
2384 | |
2385 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
2386 | port_irqs = ERR_IRQ; /* mask device interrupt when polling */ |
2387 | else |
2388 | port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ |
2389 | |
2390 | /* |
2391 | * We're about to send a non-EDMA capable command to the |
2392 | * port. Turn off EDMA so there won't be problems accessing |
2393 | * shadow block, etc registers. |
2394 | */ |
2395 | mv_stop_edma(ap); |
2396 | mv_clear_and_enable_port_irqs(ap, port_mmio: mv_ap_base(ap), port_irqs); |
2397 | mv_pmp_select(ap, pmp: qc->dev->link->pmp); |
2398 | |
2399 | if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { |
2400 | struct mv_host_priv *hpriv = ap->host->private_data; |
2401 | /* |
2402 | * Workaround for 88SX60x1 FEr SATA#25 (part 2). |
2403 | * |
2404 | * After any NCQ error, the READ_LOG_EXT command |
2405 | * from libata-eh *must* use mv_qc_issue_fis(). |
2406 | * Otherwise it might fail, due to chip errata. |
2407 | * |
2408 | * Rather than special-case it, we'll just *always* |
2409 | * use this method here for READ_LOG_EXT, making for |
2410 | * easier testing. |
2411 | */ |
2412 | if (IS_GEN_II(hpriv)) |
2413 | return mv_qc_issue_fis(qc); |
2414 | } |
2415 | return ata_bmdma_qc_issue(qc); |
2416 | } |
2417 | |
2418 | static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) |
2419 | { |
2420 | struct mv_port_priv *pp = ap->private_data; |
2421 | struct ata_queued_cmd *qc; |
2422 | |
2423 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) |
2424 | return NULL; |
2425 | qc = ata_qc_from_tag(ap, tag: ap->link.active_tag); |
2426 | if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) |
2427 | return qc; |
2428 | return NULL; |
2429 | } |
2430 | |
2431 | static void mv_pmp_error_handler(struct ata_port *ap) |
2432 | { |
2433 | unsigned int pmp, pmp_map; |
2434 | struct mv_port_priv *pp = ap->private_data; |
2435 | |
2436 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { |
2437 | /* |
2438 | * Perform NCQ error analysis on failed PMPs |
2439 | * before we freeze the port entirely. |
2440 | * |
2441 | * The failed PMPs are marked earlier by mv_pmp_eh_prep(). |
2442 | */ |
2443 | pmp_map = pp->delayed_eh_pmp_map; |
2444 | pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; |
2445 | for (pmp = 0; pmp_map != 0; pmp++) { |
2446 | unsigned int this_pmp = (1 << pmp); |
2447 | if (pmp_map & this_pmp) { |
2448 | struct ata_link *link = &ap->pmp_link[pmp]; |
2449 | pmp_map &= ~this_pmp; |
2450 | ata_eh_analyze_ncq_error(link); |
2451 | } |
2452 | } |
2453 | ata_port_freeze(ap); |
2454 | } |
2455 | sata_pmp_error_handler(ap); |
2456 | } |
2457 | |
2458 | static unsigned int mv_get_err_pmp_map(struct ata_port *ap) |
2459 | { |
2460 | void __iomem *port_mmio = mv_ap_base(ap); |
2461 | |
2462 | return readl(addr: port_mmio + SATA_TESTCTL) >> 16; |
2463 | } |
2464 | |
2465 | static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) |
2466 | { |
2467 | unsigned int pmp; |
2468 | |
2469 | /* |
2470 | * Initialize EH info for PMPs which saw device errors |
2471 | */ |
2472 | for (pmp = 0; pmp_map != 0; pmp++) { |
2473 | unsigned int this_pmp = (1 << pmp); |
2474 | if (pmp_map & this_pmp) { |
2475 | struct ata_link *link = &ap->pmp_link[pmp]; |
2476 | struct ata_eh_info *ehi = &link->eh_info; |
2477 | |
2478 | pmp_map &= ~this_pmp; |
2479 | ata_ehi_clear_desc(ehi); |
2480 | ata_ehi_push_desc(ehi, fmt: "dev err" ); |
2481 | ehi->err_mask |= AC_ERR_DEV; |
2482 | ehi->action |= ATA_EH_RESET; |
2483 | ata_link_abort(link); |
2484 | } |
2485 | } |
2486 | } |
2487 | |
2488 | static int mv_req_q_empty(struct ata_port *ap) |
2489 | { |
2490 | void __iomem *port_mmio = mv_ap_base(ap); |
2491 | u32 in_ptr, out_ptr; |
2492 | |
2493 | in_ptr = (readl(addr: port_mmio + EDMA_REQ_Q_IN_PTR) |
2494 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; |
2495 | out_ptr = (readl(addr: port_mmio + EDMA_REQ_Q_OUT_PTR) |
2496 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; |
2497 | return (in_ptr == out_ptr); /* 1 == queue_is_empty */ |
2498 | } |
2499 | |
2500 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) |
2501 | { |
2502 | struct mv_port_priv *pp = ap->private_data; |
2503 | int failed_links; |
2504 | unsigned int old_map, new_map; |
2505 | |
2506 | /* |
2507 | * Device error during FBS+NCQ operation: |
2508 | * |
2509 | * Set a port flag to prevent further I/O being enqueued. |
2510 | * Leave the EDMA running to drain outstanding commands from this port. |
2511 | * Perform the post-mortem/EH only when all responses are complete. |
2512 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). |
2513 | */ |
2514 | if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { |
2515 | pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; |
2516 | pp->delayed_eh_pmp_map = 0; |
2517 | } |
2518 | old_map = pp->delayed_eh_pmp_map; |
2519 | new_map = old_map | mv_get_err_pmp_map(ap); |
2520 | |
2521 | if (old_map != new_map) { |
2522 | pp->delayed_eh_pmp_map = new_map; |
2523 | mv_pmp_eh_prep(ap, pmp_map: new_map & ~old_map); |
2524 | } |
2525 | failed_links = hweight16(new_map); |
2526 | |
2527 | ata_port_info(ap, |
2528 | "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n" , |
2529 | __func__, pp->delayed_eh_pmp_map, |
2530 | ap->qc_active, failed_links, |
2531 | ap->nr_active_links); |
2532 | |
2533 | if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { |
2534 | mv_process_crpb_entries(ap, pp); |
2535 | mv_stop_edma(ap); |
2536 | mv_eh_freeze(ap); |
2537 | ata_port_info(ap, "%s: done\n" , __func__); |
2538 | return 1; /* handled */ |
2539 | } |
2540 | ata_port_info(ap, "%s: waiting\n" , __func__); |
2541 | return 1; /* handled */ |
2542 | } |
2543 | |
2544 | static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) |
2545 | { |
2546 | /* |
2547 | * Possible future enhancement: |
2548 | * |
2549 | * FBS+non-NCQ operation is not yet implemented. |
2550 | * See related notes in mv_edma_cfg(). |
2551 | * |
2552 | * Device error during FBS+non-NCQ operation: |
2553 | * |
2554 | * We need to snapshot the shadow registers for each failed command. |
2555 | * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). |
2556 | */ |
2557 | return 0; /* not handled */ |
2558 | } |
2559 | |
2560 | static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) |
2561 | { |
2562 | struct mv_port_priv *pp = ap->private_data; |
2563 | |
2564 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) |
2565 | return 0; /* EDMA was not active: not handled */ |
2566 | if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) |
2567 | return 0; /* FBS was not active: not handled */ |
2568 | |
2569 | if (!(edma_err_cause & EDMA_ERR_DEV)) |
2570 | return 0; /* non DEV error: not handled */ |
2571 | edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; |
2572 | if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) |
2573 | return 0; /* other problems: not handled */ |
2574 | |
2575 | if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { |
2576 | /* |
2577 | * EDMA should NOT have self-disabled for this case. |
2578 | * If it did, then something is wrong elsewhere, |
2579 | * and we cannot handle it here. |
2580 | */ |
2581 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { |
2582 | ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n" , |
2583 | __func__, edma_err_cause, pp->pp_flags); |
2584 | return 0; /* not handled */ |
2585 | } |
2586 | return mv_handle_fbs_ncq_dev_err(ap); |
2587 | } else { |
2588 | /* |
2589 | * EDMA should have self-disabled for this case. |
2590 | * If it did not, then something is wrong elsewhere, |
2591 | * and we cannot handle it here. |
2592 | */ |
2593 | if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { |
2594 | ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n" , |
2595 | __func__, edma_err_cause, pp->pp_flags); |
2596 | return 0; /* not handled */ |
2597 | } |
2598 | return mv_handle_fbs_non_ncq_dev_err(ap); |
2599 | } |
2600 | return 0; /* not handled */ |
2601 | } |
2602 | |
2603 | static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) |
2604 | { |
2605 | struct ata_eh_info *ehi = &ap->link.eh_info; |
2606 | char *when = "idle" ; |
2607 | |
2608 | ata_ehi_clear_desc(ehi); |
2609 | if (edma_was_enabled) { |
2610 | when = "EDMA enabled" ; |
2611 | } else { |
2612 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag: ap->link.active_tag); |
2613 | if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) |
2614 | when = "polling" ; |
2615 | } |
2616 | ata_ehi_push_desc(ehi, fmt: "unexpected device interrupt while %s" , when); |
2617 | ehi->err_mask |= AC_ERR_OTHER; |
2618 | ehi->action |= ATA_EH_RESET; |
2619 | ata_port_freeze(ap); |
2620 | } |
2621 | |
2622 | /** |
2623 | * mv_err_intr - Handle error interrupts on the port |
2624 | * @ap: ATA channel to manipulate |
2625 | * |
2626 | * Most cases require a full reset of the chip's state machine, |
2627 | * which also performs a COMRESET. |
2628 | * Also, if the port disabled DMA, update our cached copy to match. |
2629 | * |
2630 | * LOCKING: |
2631 | * Inherited from caller. |
2632 | */ |
2633 | static void mv_err_intr(struct ata_port *ap) |
2634 | { |
2635 | void __iomem *port_mmio = mv_ap_base(ap); |
2636 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
2637 | u32 fis_cause = 0; |
2638 | struct mv_port_priv *pp = ap->private_data; |
2639 | struct mv_host_priv *hpriv = ap->host->private_data; |
2640 | unsigned int action = 0, err_mask = 0; |
2641 | struct ata_eh_info *ehi = &ap->link.eh_info; |
2642 | struct ata_queued_cmd *qc; |
2643 | int abort = 0; |
2644 | |
2645 | /* |
2646 | * Read and clear the SError and err_cause bits. |
2647 | * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear |
2648 | * the FIS_IRQ_CAUSE register before clearing edma_err_cause. |
2649 | */ |
2650 | sata_scr_read(link: &ap->link, reg: SCR_ERROR, val: &serr); |
2651 | sata_scr_write_flush(link: &ap->link, reg: SCR_ERROR, val: serr); |
2652 | |
2653 | edma_err_cause = readl(addr: port_mmio + EDMA_ERR_IRQ_CAUSE); |
2654 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
2655 | fis_cause = readl(addr: port_mmio + FIS_IRQ_CAUSE); |
2656 | writelfl(data: ~fis_cause, addr: port_mmio + FIS_IRQ_CAUSE); |
2657 | } |
2658 | writelfl(data: ~edma_err_cause, addr: port_mmio + EDMA_ERR_IRQ_CAUSE); |
2659 | |
2660 | if (edma_err_cause & EDMA_ERR_DEV) { |
2661 | /* |
2662 | * Device errors during FIS-based switching operation |
2663 | * require special handling. |
2664 | */ |
2665 | if (mv_handle_dev_err(ap, edma_err_cause)) |
2666 | return; |
2667 | } |
2668 | |
2669 | qc = mv_get_active_qc(ap); |
2670 | ata_ehi_clear_desc(ehi); |
2671 | ata_ehi_push_desc(ehi, fmt: "edma_err_cause=%08x pp_flags=%08x" , |
2672 | edma_err_cause, pp->pp_flags); |
2673 | |
2674 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { |
2675 | ata_ehi_push_desc(ehi, fmt: "fis_cause=%08x" , fis_cause); |
2676 | if (fis_cause & FIS_IRQ_CAUSE_AN) { |
2677 | u32 ec = edma_err_cause & |
2678 | ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); |
2679 | sata_async_notification(ap); |
2680 | if (!ec) |
2681 | return; /* Just an AN; no need for the nukes */ |
2682 | ata_ehi_push_desc(ehi, fmt: "SDB notify" ); |
2683 | } |
2684 | } |
2685 | /* |
2686 | * All generations share these EDMA error cause bits: |
2687 | */ |
2688 | if (edma_err_cause & EDMA_ERR_DEV) { |
2689 | err_mask |= AC_ERR_DEV; |
2690 | action |= ATA_EH_RESET; |
2691 | ata_ehi_push_desc(ehi, fmt: "dev error" ); |
2692 | } |
2693 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | |
2694 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
2695 | EDMA_ERR_INTRL_PAR)) { |
2696 | err_mask |= AC_ERR_ATA_BUS; |
2697 | action |= ATA_EH_RESET; |
2698 | ata_ehi_push_desc(ehi, fmt: "parity error" ); |
2699 | } |
2700 | if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { |
2701 | ata_ehi_hotplugged(ehi); |
2702 | ata_ehi_push_desc(ehi, fmt: edma_err_cause & EDMA_ERR_DEV_DCON ? |
2703 | "dev disconnect" : "dev connect" ); |
2704 | action |= ATA_EH_RESET; |
2705 | } |
2706 | |
2707 | /* |
2708 | * Gen-I has a different SELF_DIS bit, |
2709 | * different FREEZE bits, and no SERR bit: |
2710 | */ |
2711 | if (IS_GEN_I(hpriv)) { |
2712 | eh_freeze_mask = EDMA_EH_FREEZE_5; |
2713 | if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { |
2714 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
2715 | ata_ehi_push_desc(ehi, fmt: "EDMA self-disable" ); |
2716 | } |
2717 | } else { |
2718 | eh_freeze_mask = EDMA_EH_FREEZE; |
2719 | if (edma_err_cause & EDMA_ERR_SELF_DIS) { |
2720 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
2721 | ata_ehi_push_desc(ehi, fmt: "EDMA self-disable" ); |
2722 | } |
2723 | if (edma_err_cause & EDMA_ERR_SERR) { |
2724 | ata_ehi_push_desc(ehi, fmt: "SError=%08x" , serr); |
2725 | err_mask |= AC_ERR_ATA_BUS; |
2726 | action |= ATA_EH_RESET; |
2727 | } |
2728 | } |
2729 | |
2730 | if (!err_mask) { |
2731 | err_mask = AC_ERR_OTHER; |
2732 | action |= ATA_EH_RESET; |
2733 | } |
2734 | |
2735 | ehi->serror |= serr; |
2736 | ehi->action |= action; |
2737 | |
2738 | if (qc) |
2739 | qc->err_mask |= err_mask; |
2740 | else |
2741 | ehi->err_mask |= err_mask; |
2742 | |
2743 | if (err_mask == AC_ERR_DEV) { |
2744 | /* |
2745 | * Cannot do ata_port_freeze() here, |
2746 | * because it would kill PIO access, |
2747 | * which is needed for further diagnosis. |
2748 | */ |
2749 | mv_eh_freeze(ap); |
2750 | abort = 1; |
2751 | } else if (edma_err_cause & eh_freeze_mask) { |
2752 | /* |
2753 | * Note to self: ata_port_freeze() calls ata_port_abort() |
2754 | */ |
2755 | ata_port_freeze(ap); |
2756 | } else { |
2757 | abort = 1; |
2758 | } |
2759 | |
2760 | if (abort) { |
2761 | if (qc) |
2762 | ata_link_abort(link: qc->dev->link); |
2763 | else |
2764 | ata_port_abort(ap); |
2765 | } |
2766 | } |
2767 | |
2768 | static bool mv_process_crpb_response(struct ata_port *ap, |
2769 | struct mv_crpb *response, unsigned int tag, int ncq_enabled) |
2770 | { |
2771 | u8 ata_status; |
2772 | u16 edma_status = le16_to_cpu(response->flags); |
2773 | |
2774 | /* |
2775 | * edma_status from a response queue entry: |
2776 | * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). |
2777 | * MSB is saved ATA status from command completion. |
2778 | */ |
2779 | if (!ncq_enabled) { |
2780 | u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; |
2781 | if (err_cause) { |
2782 | /* |
2783 | * Error will be seen/handled by |
2784 | * mv_err_intr(). So do nothing at all here. |
2785 | */ |
2786 | return false; |
2787 | } |
2788 | } |
2789 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; |
2790 | if (!ac_err_mask(status: ata_status)) |
2791 | return true; |
2792 | /* else: leave it for mv_err_intr() */ |
2793 | return false; |
2794 | } |
2795 | |
2796 | static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) |
2797 | { |
2798 | void __iomem *port_mmio = mv_ap_base(ap); |
2799 | struct mv_host_priv *hpriv = ap->host->private_data; |
2800 | u32 in_index; |
2801 | bool work_done = false; |
2802 | u32 done_mask = 0; |
2803 | int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); |
2804 | |
2805 | /* Get the hardware queue position index */ |
2806 | in_index = (readl(addr: port_mmio + EDMA_RSP_Q_IN_PTR) |
2807 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; |
2808 | |
2809 | /* Process new responses from since the last time we looked */ |
2810 | while (in_index != pp->resp_idx) { |
2811 | unsigned int tag; |
2812 | struct mv_crpb *response = &pp->crpb[pp->resp_idx]; |
2813 | |
2814 | pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; |
2815 | |
2816 | if (IS_GEN_I(hpriv)) { |
2817 | /* 50xx: no NCQ, only one command active at a time */ |
2818 | tag = ap->link.active_tag; |
2819 | } else { |
2820 | /* Gen II/IIE: get command tag from CRPB entry */ |
2821 | tag = le16_to_cpu(response->id) & 0x1f; |
2822 | } |
2823 | if (mv_process_crpb_response(ap, response, tag, ncq_enabled)) |
2824 | done_mask |= 1 << tag; |
2825 | work_done = true; |
2826 | } |
2827 | |
2828 | if (work_done) { |
2829 | ata_qc_complete_multiple(ap, qc_active: ata_qc_get_active(ap) ^ done_mask); |
2830 | |
2831 | /* Update the software queue position index in hardware */ |
2832 | writelfl(data: (pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | |
2833 | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), |
2834 | addr: port_mmio + EDMA_RSP_Q_OUT_PTR); |
2835 | } |
2836 | } |
2837 | |
2838 | static void mv_port_intr(struct ata_port *ap, u32 port_cause) |
2839 | { |
2840 | struct mv_port_priv *pp; |
2841 | int edma_was_enabled; |
2842 | |
2843 | /* |
2844 | * Grab a snapshot of the EDMA_EN flag setting, |
2845 | * so that we have a consistent view for this port, |
2846 | * even if something we call of our routines changes it. |
2847 | */ |
2848 | pp = ap->private_data; |
2849 | edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); |
2850 | /* |
2851 | * Process completed CRPB response(s) before other events. |
2852 | */ |
2853 | if (edma_was_enabled && (port_cause & DONE_IRQ)) { |
2854 | mv_process_crpb_entries(ap, pp); |
2855 | if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) |
2856 | mv_handle_fbs_ncq_dev_err(ap); |
2857 | } |
2858 | /* |
2859 | * Handle chip-reported errors, or continue on to handle PIO. |
2860 | */ |
2861 | if (unlikely(port_cause & ERR_IRQ)) { |
2862 | mv_err_intr(ap); |
2863 | } else if (!edma_was_enabled) { |
2864 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); |
2865 | if (qc) |
2866 | ata_bmdma_port_intr(ap, qc); |
2867 | else |
2868 | mv_unexpected_intr(ap, edma_was_enabled); |
2869 | } |
2870 | } |
2871 | |
2872 | /** |
2873 | * mv_host_intr - Handle all interrupts on the given host controller |
2874 | * @host: host specific structure |
2875 | * @main_irq_cause: Main interrupt cause register for the chip. |
2876 | * |
2877 | * LOCKING: |
2878 | * Inherited from caller. |
2879 | */ |
2880 | static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) |
2881 | { |
2882 | struct mv_host_priv *hpriv = host->private_data; |
2883 | void __iomem *mmio = hpriv->base, *hc_mmio; |
2884 | unsigned int handled = 0, port; |
2885 | |
2886 | /* If asserted, clear the "all ports" IRQ coalescing bit */ |
2887 | if (main_irq_cause & ALL_PORTS_COAL_DONE) |
2888 | writel(val: ~ALL_PORTS_COAL_IRQ, addr: mmio + IRQ_COAL_CAUSE); |
2889 | |
2890 | for (port = 0; port < hpriv->n_ports; port++) { |
2891 | struct ata_port *ap = host->ports[port]; |
2892 | unsigned int p, shift, hardport, port_cause; |
2893 | |
2894 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); |
2895 | /* |
2896 | * Each hc within the host has its own hc_irq_cause register, |
2897 | * where the interrupting ports bits get ack'd. |
2898 | */ |
2899 | if (hardport == 0) { /* first port on this hc ? */ |
2900 | u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; |
2901 | u32 port_mask, ack_irqs; |
2902 | /* |
2903 | * Skip this entire hc if nothing pending for any ports |
2904 | */ |
2905 | if (!hc_cause) { |
2906 | port += MV_PORTS_PER_HC - 1; |
2907 | continue; |
2908 | } |
2909 | /* |
2910 | * We don't need/want to read the hc_irq_cause register, |
2911 | * because doing so hurts performance, and |
2912 | * main_irq_cause already gives us everything we need. |
2913 | * |
2914 | * But we do have to *write* to the hc_irq_cause to ack |
2915 | * the ports that we are handling this time through. |
2916 | * |
2917 | * This requires that we create a bitmap for those |
2918 | * ports which interrupted us, and use that bitmap |
2919 | * to ack (only) those ports via hc_irq_cause. |
2920 | */ |
2921 | ack_irqs = 0; |
2922 | if (hc_cause & PORTS_0_3_COAL_DONE) |
2923 | ack_irqs = HC_COAL_IRQ; |
2924 | for (p = 0; p < MV_PORTS_PER_HC; ++p) { |
2925 | if ((port + p) >= hpriv->n_ports) |
2926 | break; |
2927 | port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); |
2928 | if (hc_cause & port_mask) |
2929 | ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; |
2930 | } |
2931 | hc_mmio = mv_hc_base_from_port(base: mmio, port); |
2932 | writelfl(data: ~ack_irqs, addr: hc_mmio + HC_IRQ_CAUSE); |
2933 | handled = 1; |
2934 | } |
2935 | /* |
2936 | * Handle interrupts signalled for this port: |
2937 | */ |
2938 | port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); |
2939 | if (port_cause) |
2940 | mv_port_intr(ap, port_cause); |
2941 | } |
2942 | return handled; |
2943 | } |
2944 | |
2945 | static int mv_pci_error(struct ata_host *host, void __iomem *mmio) |
2946 | { |
2947 | struct mv_host_priv *hpriv = host->private_data; |
2948 | struct ata_port *ap; |
2949 | struct ata_queued_cmd *qc; |
2950 | struct ata_eh_info *ehi; |
2951 | unsigned int i, err_mask, printed = 0; |
2952 | u32 err_cause; |
2953 | |
2954 | err_cause = readl(addr: mmio + hpriv->irq_cause_offset); |
2955 | |
2956 | dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n" , err_cause); |
2957 | |
2958 | dev_dbg(host->dev, "%s: All regs @ PCI error\n" , __func__); |
2959 | mv_dump_all_regs(mmio_base: mmio, to_pci_dev(host->dev)); |
2960 | |
2961 | writelfl(data: 0, addr: mmio + hpriv->irq_cause_offset); |
2962 | |
2963 | for (i = 0; i < host->n_ports; i++) { |
2964 | ap = host->ports[i]; |
2965 | if (!ata_link_offline(link: &ap->link)) { |
2966 | ehi = &ap->link.eh_info; |
2967 | ata_ehi_clear_desc(ehi); |
2968 | if (!printed++) |
2969 | ata_ehi_push_desc(ehi, |
2970 | fmt: "PCI err cause 0x%08x" , err_cause); |
2971 | err_mask = AC_ERR_HOST_BUS; |
2972 | ehi->action = ATA_EH_RESET; |
2973 | qc = ata_qc_from_tag(ap, tag: ap->link.active_tag); |
2974 | if (qc) |
2975 | qc->err_mask |= err_mask; |
2976 | else |
2977 | ehi->err_mask |= err_mask; |
2978 | |
2979 | ata_port_freeze(ap); |
2980 | } |
2981 | } |
2982 | return 1; /* handled */ |
2983 | } |
2984 | |
2985 | /** |
2986 | * mv_interrupt - Main interrupt event handler |
2987 | * @irq: unused |
2988 | * @dev_instance: private data; in this case the host structure |
2989 | * |
2990 | * Read the read only register to determine if any host |
2991 | * controllers have pending interrupts. If so, call lower level |
2992 | * routine to handle. Also check for PCI errors which are only |
2993 | * reported here. |
2994 | * |
2995 | * LOCKING: |
2996 | * This routine holds the host lock while processing pending |
2997 | * interrupts. |
2998 | */ |
2999 | static irqreturn_t mv_interrupt(int irq, void *dev_instance) |
3000 | { |
3001 | struct ata_host *host = dev_instance; |
3002 | struct mv_host_priv *hpriv = host->private_data; |
3003 | unsigned int handled = 0; |
3004 | int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; |
3005 | u32 main_irq_cause, pending_irqs; |
3006 | |
3007 | spin_lock(lock: &host->lock); |
3008 | |
3009 | /* for MSI: block new interrupts while in here */ |
3010 | if (using_msi) |
3011 | mv_write_main_irq_mask(mask: 0, hpriv); |
3012 | |
3013 | main_irq_cause = readl(addr: hpriv->main_irq_cause_addr); |
3014 | pending_irqs = main_irq_cause & hpriv->main_irq_mask; |
3015 | /* |
3016 | * Deal with cases where we either have nothing pending, or have read |
3017 | * a bogus register value which can indicate HW removal or PCI fault. |
3018 | */ |
3019 | if (pending_irqs && main_irq_cause != 0xffffffffU) { |
3020 | if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) |
3021 | handled = mv_pci_error(host, mmio: hpriv->base); |
3022 | else |
3023 | handled = mv_host_intr(host, main_irq_cause: pending_irqs); |
3024 | } |
3025 | |
3026 | /* for MSI: unmask; interrupt cause bits will retrigger now */ |
3027 | if (using_msi) |
3028 | mv_write_main_irq_mask(mask: hpriv->main_irq_mask, hpriv); |
3029 | |
3030 | spin_unlock(lock: &host->lock); |
3031 | |
3032 | return IRQ_RETVAL(handled); |
3033 | } |
3034 | |
3035 | static unsigned int mv5_scr_offset(unsigned int sc_reg_in) |
3036 | { |
3037 | unsigned int ofs; |
3038 | |
3039 | switch (sc_reg_in) { |
3040 | case SCR_STATUS: |
3041 | case SCR_ERROR: |
3042 | case SCR_CONTROL: |
3043 | ofs = sc_reg_in * sizeof(u32); |
3044 | break; |
3045 | default: |
3046 | ofs = 0xffffffffU; |
3047 | break; |
3048 | } |
3049 | return ofs; |
3050 | } |
3051 | |
3052 | static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) |
3053 | { |
3054 | struct mv_host_priv *hpriv = link->ap->host->private_data; |
3055 | void __iomem *mmio = hpriv->base; |
3056 | void __iomem *addr = mv5_phy_base(mmio, port: link->ap->port_no); |
3057 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
3058 | |
3059 | if (ofs != 0xffffffffU) { |
3060 | *val = readl(addr: addr + ofs); |
3061 | return 0; |
3062 | } else |
3063 | return -EINVAL; |
3064 | } |
3065 | |
3066 | static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) |
3067 | { |
3068 | struct mv_host_priv *hpriv = link->ap->host->private_data; |
3069 | void __iomem *mmio = hpriv->base; |
3070 | void __iomem *addr = mv5_phy_base(mmio, port: link->ap->port_no); |
3071 | unsigned int ofs = mv5_scr_offset(sc_reg_in); |
3072 | |
3073 | if (ofs != 0xffffffffU) { |
3074 | writelfl(data: val, addr: addr + ofs); |
3075 | return 0; |
3076 | } else |
3077 | return -EINVAL; |
3078 | } |
3079 | |
3080 | static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) |
3081 | { |
3082 | struct pci_dev *pdev = to_pci_dev(host->dev); |
3083 | int early_5080; |
3084 | |
3085 | early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); |
3086 | |
3087 | if (!early_5080) { |
3088 | u32 tmp = readl(addr: mmio + MV_PCI_EXP_ROM_BAR_CTL); |
3089 | tmp |= (1 << 0); |
3090 | writel(val: tmp, addr: mmio + MV_PCI_EXP_ROM_BAR_CTL); |
3091 | } |
3092 | |
3093 | mv_reset_pci_bus(host, mmio); |
3094 | } |
3095 | |
3096 | static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) |
3097 | { |
3098 | writel(val: 0x0fcfffff, addr: mmio + FLASH_CTL); |
3099 | } |
3100 | |
3101 | static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, |
3102 | void __iomem *mmio) |
3103 | { |
3104 | void __iomem *phy_mmio = mv5_phy_base(mmio, port: idx); |
3105 | u32 tmp; |
3106 | |
3107 | tmp = readl(addr: phy_mmio + MV5_PHY_MODE); |
3108 | |
3109 | hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ |
3110 | hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ |
3111 | } |
3112 | |
3113 | static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
3114 | { |
3115 | u32 tmp; |
3116 | |
3117 | writel(val: 0, addr: mmio + GPIO_PORT_CTL); |
3118 | |
3119 | /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ |
3120 | |
3121 | tmp = readl(addr: mmio + MV_PCI_EXP_ROM_BAR_CTL); |
3122 | tmp |= ~(1 << 0); |
3123 | writel(val: tmp, addr: mmio + MV_PCI_EXP_ROM_BAR_CTL); |
3124 | } |
3125 | |
3126 | static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
3127 | unsigned int port) |
3128 | { |
3129 | void __iomem *phy_mmio = mv5_phy_base(mmio, port); |
3130 | const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); |
3131 | u32 tmp; |
3132 | int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); |
3133 | |
3134 | if (fix_apm_sq) { |
3135 | tmp = readl(addr: phy_mmio + MV5_LTMODE); |
3136 | tmp |= (1 << 19); |
3137 | writel(val: tmp, addr: phy_mmio + MV5_LTMODE); |
3138 | |
3139 | tmp = readl(addr: phy_mmio + MV5_PHY_CTL); |
3140 | tmp &= ~0x3; |
3141 | tmp |= 0x1; |
3142 | writel(val: tmp, addr: phy_mmio + MV5_PHY_CTL); |
3143 | } |
3144 | |
3145 | tmp = readl(addr: phy_mmio + MV5_PHY_MODE); |
3146 | tmp &= ~mask; |
3147 | tmp |= hpriv->signal[port].pre; |
3148 | tmp |= hpriv->signal[port].amps; |
3149 | writel(val: tmp, addr: phy_mmio + MV5_PHY_MODE); |
3150 | } |
3151 | |
3152 | |
3153 | #undef ZERO |
3154 | #define ZERO(reg) writel(0, port_mmio + (reg)) |
3155 | static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, |
3156 | unsigned int port) |
3157 | { |
3158 | void __iomem *port_mmio = mv_port_base(base: mmio, port); |
3159 | |
3160 | mv_reset_channel(hpriv, mmio, port_no: port); |
3161 | |
3162 | ZERO(0x028); /* command */ |
3163 | writel(val: 0x11f, addr: port_mmio + EDMA_CFG); |
3164 | ZERO(0x004); /* timer */ |
3165 | ZERO(0x008); /* irq err cause */ |
3166 | ZERO(0x00c); /* irq err mask */ |
3167 | ZERO(0x010); /* rq bah */ |
3168 | ZERO(0x014); /* rq inp */ |
3169 | ZERO(0x018); /* rq outp */ |
3170 | ZERO(0x01c); /* respq bah */ |
3171 | ZERO(0x024); /* respq outp */ |
3172 | ZERO(0x020); /* respq inp */ |
3173 | ZERO(0x02c); /* test control */ |
3174 | writel(val: 0xbc, addr: port_mmio + EDMA_IORDY_TMOUT); |
3175 | } |
3176 | #undef ZERO |
3177 | |
3178 | #define ZERO(reg) writel(0, hc_mmio + (reg)) |
3179 | static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, |
3180 | unsigned int hc) |
3181 | { |
3182 | void __iomem *hc_mmio = mv_hc_base(base: mmio, hc); |
3183 | u32 tmp; |
3184 | |
3185 | ZERO(0x00c); |
3186 | ZERO(0x010); |
3187 | ZERO(0x014); |
3188 | ZERO(0x018); |
3189 | |
3190 | tmp = readl(addr: hc_mmio + 0x20); |
3191 | tmp &= 0x1c1c1c1c; |
3192 | tmp |= 0x03030303; |
3193 | writel(val: tmp, addr: hc_mmio + 0x20); |
3194 | } |
3195 | #undef ZERO |
3196 | |
3197 | static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio, |
3198 | unsigned int n_hc) |
3199 | { |
3200 | struct mv_host_priv *hpriv = host->private_data; |
3201 | unsigned int hc, port; |
3202 | |
3203 | for (hc = 0; hc < n_hc; hc++) { |
3204 | for (port = 0; port < MV_PORTS_PER_HC; port++) |
3205 | mv5_reset_hc_port(hpriv, mmio, |
3206 | port: (hc * MV_PORTS_PER_HC) + port); |
3207 | |
3208 | mv5_reset_one_hc(hpriv, mmio, hc); |
3209 | } |
3210 | |
3211 | return 0; |
3212 | } |
3213 | |
3214 | #undef ZERO |
3215 | #define ZERO(reg) writel(0, mmio + (reg)) |
3216 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) |
3217 | { |
3218 | struct mv_host_priv *hpriv = host->private_data; |
3219 | u32 tmp; |
3220 | |
3221 | tmp = readl(addr: mmio + MV_PCI_MODE); |
3222 | tmp &= 0xff00ffff; |
3223 | writel(val: tmp, addr: mmio + MV_PCI_MODE); |
3224 | |
3225 | ZERO(MV_PCI_DISC_TIMER); |
3226 | ZERO(MV_PCI_MSI_TRIGGER); |
3227 | writel(val: 0x000100ff, addr: mmio + MV_PCI_XBAR_TMOUT); |
3228 | ZERO(MV_PCI_SERR_MASK); |
3229 | ZERO(hpriv->irq_cause_offset); |
3230 | ZERO(hpriv->irq_mask_offset); |
3231 | ZERO(MV_PCI_ERR_LOW_ADDRESS); |
3232 | ZERO(MV_PCI_ERR_HIGH_ADDRESS); |
3233 | ZERO(MV_PCI_ERR_ATTRIBUTE); |
3234 | ZERO(MV_PCI_ERR_COMMAND); |
3235 | } |
3236 | #undef ZERO |
3237 | |
3238 | static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) |
3239 | { |
3240 | u32 tmp; |
3241 | |
3242 | mv5_reset_flash(hpriv, mmio); |
3243 | |
3244 | tmp = readl(addr: mmio + GPIO_PORT_CTL); |
3245 | tmp &= 0x3; |
3246 | tmp |= (1 << 5) | (1 << 6); |
3247 | writel(val: tmp, addr: mmio + GPIO_PORT_CTL); |
3248 | } |
3249 | |
3250 | /* |
3251 | * mv6_reset_hc - Perform the 6xxx global soft reset |
3252 | * @mmio: base address of the HBA |
3253 | * |
3254 | * This routine only applies to 6xxx parts. |
3255 | * |
3256 | * LOCKING: |
3257 | * Inherited from caller. |
3258 | */ |
3259 | static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio, |
3260 | unsigned int n_hc) |
3261 | { |
3262 | void __iomem *reg = mmio + PCI_MAIN_CMD_STS; |
3263 | int i, rc = 0; |
3264 | u32 t; |
3265 | |
3266 | /* Following procedure defined in PCI "main command and status |
3267 | * register" table. |
3268 | */ |
3269 | t = readl(addr: reg); |
3270 | writel(val: t | STOP_PCI_MASTER, addr: reg); |
3271 | |
3272 | for (i = 0; i < 1000; i++) { |
3273 | udelay(1); |
3274 | t = readl(addr: reg); |
3275 | if (PCI_MASTER_EMPTY & t) |
3276 | break; |
3277 | } |
3278 | if (!(PCI_MASTER_EMPTY & t)) { |
3279 | dev_err(host->dev, "PCI master won't flush\n" ); |
3280 | rc = 1; |
3281 | goto done; |
3282 | } |
3283 | |
3284 | /* set reset */ |
3285 | i = 5; |
3286 | do { |
3287 | writel(val: t | GLOB_SFT_RST, addr: reg); |
3288 | t = readl(addr: reg); |
3289 | udelay(1); |
3290 | } while (!(GLOB_SFT_RST & t) && (i-- > 0)); |
3291 | |
3292 | if (!(GLOB_SFT_RST & t)) { |
3293 | dev_err(host->dev, "can't set global reset\n" ); |
3294 | rc = 1; |
3295 | goto done; |
3296 | } |
3297 | |
3298 | /* clear reset and *reenable the PCI master* (not mentioned in spec) */ |
3299 | i = 5; |
3300 | do { |
3301 | writel(val: t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), addr: reg); |
3302 | t = readl(addr: reg); |
3303 | udelay(1); |
3304 | } while ((GLOB_SFT_RST & t) && (i-- > 0)); |
3305 | |
3306 | if (GLOB_SFT_RST & t) { |
3307 | dev_err(host->dev, "can't clear global reset\n" ); |
3308 | rc = 1; |
3309 | } |
3310 | done: |
3311 | return rc; |
3312 | } |
3313 | |
3314 | static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, |
3315 | void __iomem *mmio) |
3316 | { |
3317 | void __iomem *port_mmio; |
3318 | u32 tmp; |
3319 | |
3320 | tmp = readl(addr: mmio + RESET_CFG); |
3321 | if ((tmp & (1 << 0)) == 0) { |
3322 | hpriv->signal[idx].amps = 0x7 << 8; |
3323 | hpriv->signal[idx].pre = 0x1 << 5; |
3324 | return; |
3325 | } |
3326 | |
3327 | port_mmio = mv_port_base(base: mmio, port: idx); |
3328 | tmp = readl(addr: port_mmio + PHY_MODE2); |
3329 | |
3330 | hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ |
3331 | hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ |
3332 | } |
3333 | |
3334 | static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) |
3335 | { |
3336 | writel(val: 0x00000060, addr: mmio + GPIO_PORT_CTL); |
3337 | } |
3338 | |
3339 | static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, |
3340 | unsigned int port) |
3341 | { |
3342 | void __iomem *port_mmio = mv_port_base(base: mmio, port); |
3343 | |
3344 | u32 hp_flags = hpriv->hp_flags; |
3345 | int fix_phy_mode2 = |
3346 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
3347 | int fix_phy_mode4 = |
3348 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
3349 | u32 m2, m3; |
3350 | |
3351 | if (fix_phy_mode2) { |
3352 | m2 = readl(addr: port_mmio + PHY_MODE2); |
3353 | m2 &= ~(1 << 16); |
3354 | m2 |= (1 << 31); |
3355 | writel(val: m2, addr: port_mmio + PHY_MODE2); |
3356 | |
3357 | udelay(200); |
3358 | |
3359 | m2 = readl(addr: port_mmio + PHY_MODE2); |
3360 | m2 &= ~((1 << 16) | (1 << 31)); |
3361 | writel(val: m2, addr: port_mmio + PHY_MODE2); |
3362 | |
3363 | udelay(200); |
3364 | } |
3365 | |
3366 | /* |
3367 | * Gen-II/IIe PHY_MODE3 errata RM#2: |
3368 | * Achieves better receiver noise performance than the h/w default: |
3369 | */ |
3370 | m3 = readl(addr: port_mmio + PHY_MODE3); |
3371 | m3 = (m3 & 0x1f) | (0x5555601 << 5); |
3372 | |
3373 | /* Guideline 88F5182 (GL# SATA-S11) */ |
3374 | if (IS_SOC(hpriv)) |
3375 | m3 &= ~0x1c; |
3376 | |
3377 | if (fix_phy_mode4) { |
3378 | u32 m4 = readl(addr: port_mmio + PHY_MODE4); |
3379 | /* |
3380 | * Enforce reserved-bit restrictions on GenIIe devices only. |
3381 | * For earlier chipsets, force only the internal config field |
3382 | * (workaround for errata FEr SATA#10 part 1). |
3383 | */ |
3384 | if (IS_GEN_IIE(hpriv)) |
3385 | m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; |
3386 | else |
3387 | m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; |
3388 | writel(val: m4, addr: port_mmio + PHY_MODE4); |
3389 | } |
3390 | /* |
3391 | * Workaround for 60x1-B2 errata SATA#13: |
3392 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, |
3393 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. |
3394 | * Or ensure we use writelfl() when writing PHY_MODE4. |
3395 | */ |
3396 | writel(val: m3, addr: port_mmio + PHY_MODE3); |
3397 | |
3398 | /* Revert values of pre-emphasis and signal amps to the saved ones */ |
3399 | m2 = readl(addr: port_mmio + PHY_MODE2); |
3400 | |
3401 | m2 &= ~MV_M2_PREAMP_MASK; |
3402 | m2 |= hpriv->signal[port].amps; |
3403 | m2 |= hpriv->signal[port].pre; |
3404 | m2 &= ~(1 << 16); |
3405 | |
3406 | /* according to mvSata 3.6.1, some IIE values are fixed */ |
3407 | if (IS_GEN_IIE(hpriv)) { |
3408 | m2 &= ~0xC30FF01F; |
3409 | m2 |= 0x0000900F; |
3410 | } |
3411 | |
3412 | writel(val: m2, addr: port_mmio + PHY_MODE2); |
3413 | } |
3414 | |
3415 | /* TODO: use the generic LED interface to configure the SATA Presence */ |
3416 | /* & Acitivy LEDs on the board */ |
3417 | static void mv_soc_enable_leds(struct mv_host_priv *hpriv, |
3418 | void __iomem *mmio) |
3419 | { |
3420 | return; |
3421 | } |
3422 | |
3423 | static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, |
3424 | void __iomem *mmio) |
3425 | { |
3426 | void __iomem *port_mmio; |
3427 | u32 tmp; |
3428 | |
3429 | port_mmio = mv_port_base(base: mmio, port: idx); |
3430 | tmp = readl(addr: port_mmio + PHY_MODE2); |
3431 | |
3432 | hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ |
3433 | hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ |
3434 | } |
3435 | |
3436 | #undef ZERO |
3437 | #define ZERO(reg) writel(0, port_mmio + (reg)) |
3438 | static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, |
3439 | void __iomem *mmio, unsigned int port) |
3440 | { |
3441 | void __iomem *port_mmio = mv_port_base(base: mmio, port); |
3442 | |
3443 | mv_reset_channel(hpriv, mmio, port_no: port); |
3444 | |
3445 | ZERO(0x028); /* command */ |
3446 | writel(val: 0x101f, addr: port_mmio + EDMA_CFG); |
3447 | ZERO(0x004); /* timer */ |
3448 | ZERO(0x008); /* irq err cause */ |
3449 | ZERO(0x00c); /* irq err mask */ |
3450 | ZERO(0x010); /* rq bah */ |
3451 | ZERO(0x014); /* rq inp */ |
3452 | ZERO(0x018); /* rq outp */ |
3453 | ZERO(0x01c); /* respq bah */ |
3454 | ZERO(0x024); /* respq outp */ |
3455 | ZERO(0x020); /* respq inp */ |
3456 | ZERO(0x02c); /* test control */ |
3457 | writel(val: 0x800, addr: port_mmio + EDMA_IORDY_TMOUT); |
3458 | } |
3459 | |
3460 | #undef ZERO |
3461 | |
3462 | #define ZERO(reg) writel(0, hc_mmio + (reg)) |
3463 | static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, |
3464 | void __iomem *mmio) |
3465 | { |
3466 | void __iomem *hc_mmio = mv_hc_base(base: mmio, hc: 0); |
3467 | |
3468 | ZERO(0x00c); |
3469 | ZERO(0x010); |
3470 | ZERO(0x014); |
3471 | |
3472 | } |
3473 | |
3474 | #undef ZERO |
3475 | |
3476 | static int mv_soc_reset_hc(struct ata_host *host, |
3477 | void __iomem *mmio, unsigned int n_hc) |
3478 | { |
3479 | struct mv_host_priv *hpriv = host->private_data; |
3480 | unsigned int port; |
3481 | |
3482 | for (port = 0; port < hpriv->n_ports; port++) |
3483 | mv_soc_reset_hc_port(hpriv, mmio, port); |
3484 | |
3485 | mv_soc_reset_one_hc(hpriv, mmio); |
3486 | |
3487 | return 0; |
3488 | } |
3489 | |
3490 | static void mv_soc_reset_flash(struct mv_host_priv *hpriv, |
3491 | void __iomem *mmio) |
3492 | { |
3493 | return; |
3494 | } |
3495 | |
3496 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) |
3497 | { |
3498 | return; |
3499 | } |
3500 | |
3501 | static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, |
3502 | void __iomem *mmio, unsigned int port) |
3503 | { |
3504 | void __iomem *port_mmio = mv_port_base(base: mmio, port); |
3505 | u32 reg; |
3506 | |
3507 | reg = readl(addr: port_mmio + PHY_MODE3); |
3508 | reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ |
3509 | reg |= (0x1 << 27); |
3510 | reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ |
3511 | reg |= (0x1 << 29); |
3512 | writel(val: reg, addr: port_mmio + PHY_MODE3); |
3513 | |
3514 | reg = readl(addr: port_mmio + PHY_MODE4); |
3515 | reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ |
3516 | reg |= (0x1 << 16); |
3517 | writel(val: reg, addr: port_mmio + PHY_MODE4); |
3518 | |
3519 | reg = readl(addr: port_mmio + PHY_MODE9_GEN2); |
3520 | reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ |
3521 | reg |= 0x8; |
3522 | reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ |
3523 | writel(val: reg, addr: port_mmio + PHY_MODE9_GEN2); |
3524 | |
3525 | reg = readl(addr: port_mmio + PHY_MODE9_GEN1); |
3526 | reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ |
3527 | reg |= 0x8; |
3528 | reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ |
3529 | writel(val: reg, addr: port_mmio + PHY_MODE9_GEN1); |
3530 | } |
3531 | |
3532 | /* |
3533 | * soc_is_65 - check if the soc is 65 nano device |
3534 | * |
3535 | * Detect the type of the SoC, this is done by reading the PHYCFG_OFS |
3536 | * register, this register should contain non-zero value and it exists only |
3537 | * in the 65 nano devices, when reading it from older devices we get 0. |
3538 | */ |
3539 | static bool soc_is_65n(struct mv_host_priv *hpriv) |
3540 | { |
3541 | void __iomem *port0_mmio = mv_port_base(base: hpriv->base, port: 0); |
3542 | |
3543 | if (readl(addr: port0_mmio + PHYCFG_OFS)) |
3544 | return true; |
3545 | return false; |
3546 | } |
3547 | |
3548 | static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) |
3549 | { |
3550 | u32 ifcfg = readl(addr: port_mmio + SATA_IFCFG); |
3551 | |
3552 | ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ |
3553 | if (want_gen2i) |
3554 | ifcfg |= (1 << 7); /* enable gen2i speed */ |
3555 | writelfl(data: ifcfg, addr: port_mmio + SATA_IFCFG); |
3556 | } |
3557 | |
3558 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
3559 | unsigned int port_no) |
3560 | { |
3561 | void __iomem *port_mmio = mv_port_base(base: mmio, port: port_no); |
3562 | |
3563 | /* |
3564 | * The datasheet warns against setting EDMA_RESET when EDMA is active |
3565 | * (but doesn't say what the problem might be). So we first try |
3566 | * to disable the EDMA engine before doing the EDMA_RESET operation. |
3567 | */ |
3568 | mv_stop_edma_engine(port_mmio); |
3569 | writelfl(data: EDMA_RESET, addr: port_mmio + EDMA_CMD); |
3570 | |
3571 | if (!IS_GEN_I(hpriv)) { |
3572 | /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ |
3573 | mv_setup_ifcfg(port_mmio, want_gen2i: 1); |
3574 | } |
3575 | /* |
3576 | * Strobing EDMA_RESET here causes a hard reset of the SATA transport, |
3577 | * link, and physical layers. It resets all SATA interface registers |
3578 | * (except for SATA_IFCFG), and issues a COMRESET to the dev. |
3579 | */ |
3580 | writelfl(data: EDMA_RESET, addr: port_mmio + EDMA_CMD); |
3581 | udelay(25); /* allow reset propagation */ |
3582 | writelfl(data: 0, addr: port_mmio + EDMA_CMD); |
3583 | |
3584 | hpriv->ops->phy_errata(hpriv, mmio, port_no); |
3585 | |
3586 | if (IS_GEN_I(hpriv)) |
3587 | usleep_range(min: 500, max: 1000); |
3588 | } |
3589 | |
3590 | static void mv_pmp_select(struct ata_port *ap, int pmp) |
3591 | { |
3592 | if (sata_pmp_supported(ap)) { |
3593 | void __iomem *port_mmio = mv_ap_base(ap); |
3594 | u32 reg = readl(addr: port_mmio + SATA_IFCTL); |
3595 | int old = reg & 0xf; |
3596 | |
3597 | if (old != pmp) { |
3598 | reg = (reg & ~0xf) | pmp; |
3599 | writelfl(data: reg, addr: port_mmio + SATA_IFCTL); |
3600 | } |
3601 | } |
3602 | } |
3603 | |
3604 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, |
3605 | unsigned long deadline) |
3606 | { |
3607 | mv_pmp_select(ap: link->ap, pmp: sata_srst_pmp(link)); |
3608 | return sata_std_hardreset(link, class, deadline); |
3609 | } |
3610 | |
3611 | static int mv_softreset(struct ata_link *link, unsigned int *class, |
3612 | unsigned long deadline) |
3613 | { |
3614 | mv_pmp_select(ap: link->ap, pmp: sata_srst_pmp(link)); |
3615 | return ata_sff_softreset(link, classes: class, deadline); |
3616 | } |
3617 | |
3618 | static int mv_hardreset(struct ata_link *link, unsigned int *class, |
3619 | unsigned long deadline) |
3620 | { |
3621 | struct ata_port *ap = link->ap; |
3622 | struct mv_host_priv *hpriv = ap->host->private_data; |
3623 | struct mv_port_priv *pp = ap->private_data; |
3624 | void __iomem *mmio = hpriv->base; |
3625 | int rc, attempts = 0, = 0; |
3626 | u32 sstatus; |
3627 | bool online; |
3628 | |
3629 | mv_reset_channel(hpriv, mmio, port_no: ap->port_no); |
3630 | pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; |
3631 | pp->pp_flags &= |
3632 | ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); |
3633 | |
3634 | /* Workaround for errata FEr SATA#10 (part 2) */ |
3635 | do { |
3636 | const unsigned int *timing = |
3637 | sata_ehc_deb_timing(ehc: &link->eh_context); |
3638 | |
3639 | rc = sata_link_hardreset(link, timing, deadline: deadline + extra, |
3640 | online: &online, NULL); |
3641 | rc = online ? -EAGAIN : rc; |
3642 | if (rc) |
3643 | return rc; |
3644 | sata_scr_read(link, reg: SCR_STATUS, val: &sstatus); |
3645 | if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { |
3646 | /* Force 1.5gb/s link speed and try again */ |
3647 | mv_setup_ifcfg(port_mmio: mv_ap_base(ap), want_gen2i: 0); |
3648 | if (time_after(jiffies + HZ, deadline)) |
3649 | extra = HZ; /* only extend it once, max */ |
3650 | } |
3651 | } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); |
3652 | mv_save_cached_regs(ap); |
3653 | mv_edma_cfg(ap, want_ncq: 0, want_edma: 0); |
3654 | |
3655 | return rc; |
3656 | } |
3657 | |
3658 | static void mv_eh_freeze(struct ata_port *ap) |
3659 | { |
3660 | mv_stop_edma(ap); |
3661 | mv_enable_port_irqs(ap, port_bits: 0); |
3662 | } |
3663 | |
3664 | static void mv_eh_thaw(struct ata_port *ap) |
3665 | { |
3666 | struct mv_host_priv *hpriv = ap->host->private_data; |
3667 | unsigned int port = ap->port_no; |
3668 | unsigned int hardport = mv_hardport_from_port(port); |
3669 | void __iomem *hc_mmio = mv_hc_base_from_port(base: hpriv->base, port); |
3670 | void __iomem *port_mmio = mv_ap_base(ap); |
3671 | u32 hc_irq_cause; |
3672 | |
3673 | /* clear EDMA errors on this port */ |
3674 | writel(val: 0, addr: port_mmio + EDMA_ERR_IRQ_CAUSE); |
3675 | |
3676 | /* clear pending irq events */ |
3677 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); |
3678 | writelfl(data: hc_irq_cause, addr: hc_mmio + HC_IRQ_CAUSE); |
3679 | |
3680 | mv_enable_port_irqs(ap, port_bits: ERR_IRQ); |
3681 | } |
3682 | |
3683 | /** |
3684 | * mv_port_init - Perform some early initialization on a single port. |
3685 | * @port: libata data structure storing shadow register addresses |
3686 | * @port_mmio: base address of the port |
3687 | * |
3688 | * Initialize shadow register mmio addresses, clear outstanding |
3689 | * interrupts on the port, and unmask interrupts for the future |
3690 | * start of the port. |
3691 | * |
3692 | * LOCKING: |
3693 | * Inherited from caller. |
3694 | */ |
3695 | static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) |
3696 | { |
3697 | void __iomem *serr, *shd_base = port_mmio + SHD_BLK; |
3698 | |
3699 | /* PIO related setup |
3700 | */ |
3701 | port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); |
3702 | port->error_addr = |
3703 | port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); |
3704 | port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); |
3705 | port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); |
3706 | port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); |
3707 | port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); |
3708 | port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); |
3709 | port->status_addr = |
3710 | port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); |
3711 | /* special case: control/altstatus doesn't have ATA_REG_ address */ |
3712 | port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; |
3713 | |
3714 | /* Clear any currently outstanding port interrupt conditions */ |
3715 | serr = port_mmio + mv_scr_offset(sc_reg_in: SCR_ERROR); |
3716 | writelfl(readl(addr: serr), addr: serr); |
3717 | writelfl(data: 0, addr: port_mmio + EDMA_ERR_IRQ_CAUSE); |
3718 | |
3719 | /* unmask all non-transient EDMA error interrupts */ |
3720 | writelfl(data: ~EDMA_ERR_IRQ_TRANSIENT, addr: port_mmio + EDMA_ERR_IRQ_MASK); |
3721 | } |
3722 | |
3723 | static unsigned int mv_in_pcix_mode(struct ata_host *host) |
3724 | { |
3725 | struct mv_host_priv *hpriv = host->private_data; |
3726 | void __iomem *mmio = hpriv->base; |
3727 | u32 reg; |
3728 | |
3729 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) |
3730 | return 0; /* not PCI-X capable */ |
3731 | reg = readl(addr: mmio + MV_PCI_MODE); |
3732 | if ((reg & MV_PCI_MODE_MASK) == 0) |
3733 | return 0; /* conventional PCI mode */ |
3734 | return 1; /* chip is in PCI-X mode */ |
3735 | } |
3736 | |
3737 | static int mv_pci_cut_through_okay(struct ata_host *host) |
3738 | { |
3739 | struct mv_host_priv *hpriv = host->private_data; |
3740 | void __iomem *mmio = hpriv->base; |
3741 | u32 reg; |
3742 | |
3743 | if (!mv_in_pcix_mode(host)) { |
3744 | reg = readl(addr: mmio + MV_PCI_COMMAND); |
3745 | if (reg & MV_PCI_COMMAND_MRDTRIG) |
3746 | return 0; /* not okay */ |
3747 | } |
3748 | return 1; /* okay */ |
3749 | } |
3750 | |
3751 | static void mv_60x1b2_errata_pci7(struct ata_host *host) |
3752 | { |
3753 | struct mv_host_priv *hpriv = host->private_data; |
3754 | void __iomem *mmio = hpriv->base; |
3755 | |
3756 | /* workaround for 60x1-B2 errata PCI#7 */ |
3757 | if (mv_in_pcix_mode(host)) { |
3758 | u32 reg = readl(addr: mmio + MV_PCI_COMMAND); |
3759 | writelfl(data: reg & ~MV_PCI_COMMAND_MWRCOM, addr: mmio + MV_PCI_COMMAND); |
3760 | } |
3761 | } |
3762 | |
3763 | static int mv_chip_id(struct ata_host *host, unsigned int board_idx) |
3764 | { |
3765 | struct pci_dev *pdev = to_pci_dev(host->dev); |
3766 | struct mv_host_priv *hpriv = host->private_data; |
3767 | u32 hp_flags = hpriv->hp_flags; |
3768 | |
3769 | switch (board_idx) { |
3770 | case chip_5080: |
3771 | hpriv->ops = &mv5xxx_ops; |
3772 | hp_flags |= MV_HP_GEN_I; |
3773 | |
3774 | switch (pdev->revision) { |
3775 | case 0x1: |
3776 | hp_flags |= MV_HP_ERRATA_50XXB0; |
3777 | break; |
3778 | case 0x3: |
3779 | hp_flags |= MV_HP_ERRATA_50XXB2; |
3780 | break; |
3781 | default: |
3782 | dev_warn(&pdev->dev, |
3783 | "Applying 50XXB2 workarounds to unknown rev\n" ); |
3784 | hp_flags |= MV_HP_ERRATA_50XXB2; |
3785 | break; |
3786 | } |
3787 | break; |
3788 | |
3789 | case chip_504x: |
3790 | case chip_508x: |
3791 | hpriv->ops = &mv5xxx_ops; |
3792 | hp_flags |= MV_HP_GEN_I; |
3793 | |
3794 | switch (pdev->revision) { |
3795 | case 0x0: |
3796 | hp_flags |= MV_HP_ERRATA_50XXB0; |
3797 | break; |
3798 | case 0x3: |
3799 | hp_flags |= MV_HP_ERRATA_50XXB2; |
3800 | break; |
3801 | default: |
3802 | dev_warn(&pdev->dev, |
3803 | "Applying B2 workarounds to unknown rev\n" ); |
3804 | hp_flags |= MV_HP_ERRATA_50XXB2; |
3805 | break; |
3806 | } |
3807 | break; |
3808 | |
3809 | case chip_604x: |
3810 | case chip_608x: |
3811 | hpriv->ops = &mv6xxx_ops; |
3812 | hp_flags |= MV_HP_GEN_II; |
3813 | |
3814 | switch (pdev->revision) { |
3815 | case 0x7: |
3816 | mv_60x1b2_errata_pci7(host); |
3817 | hp_flags |= MV_HP_ERRATA_60X1B2; |
3818 | break; |
3819 | case 0x9: |
3820 | hp_flags |= MV_HP_ERRATA_60X1C0; |
3821 | break; |
3822 | default: |
3823 | dev_warn(&pdev->dev, |
3824 | "Applying B2 workarounds to unknown rev\n" ); |
3825 | hp_flags |= MV_HP_ERRATA_60X1B2; |
3826 | break; |
3827 | } |
3828 | break; |
3829 | |
3830 | case chip_7042: |
3831 | hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; |
3832 | if (pdev->vendor == PCI_VENDOR_ID_TTI && |
3833 | (pdev->device == 0x2300 || pdev->device == 0x2310)) |
3834 | { |
3835 | /* |
3836 | * Highpoint RocketRAID PCIe 23xx series cards: |
3837 | * |
3838 | * Unconfigured drives are treated as "Legacy" |
3839 | * by the BIOS, and it overwrites sector 8 with |
3840 | * a "Lgcy" metadata block prior to Linux boot. |
3841 | * |
3842 | * Configured drives (RAID or JBOD) leave sector 8 |
3843 | * alone, but instead overwrite a high numbered |
3844 | * sector for the RAID metadata. This sector can |
3845 | * be determined exactly, by truncating the physical |
3846 | * drive capacity to a nice even GB value. |
3847 | * |
3848 | * RAID metadata is at: (dev->n_sectors & ~0xfffff) |
3849 | * |
3850 | * Warn the user, lest they think we're just buggy. |
3851 | */ |
3852 | dev_warn(&pdev->dev, "Highpoint RocketRAID" |
3853 | " BIOS CORRUPTS DATA on all attached drives," |
3854 | " regardless of if/how they are configured." |
3855 | " BEWARE!\n" ); |
3856 | dev_warn(&pdev->dev, "For data safety, do not" |
3857 | " use sectors 8-9 on \"Legacy\" drives," |
3858 | " and avoid the final two gigabytes on" |
3859 | " all RocketRAID BIOS initialized drives.\n" ); |
3860 | } |
3861 | fallthrough; |
3862 | case chip_6042: |
3863 | hpriv->ops = &mv6xxx_ops; |
3864 | hp_flags |= MV_HP_GEN_IIE; |
3865 | if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) |
3866 | hp_flags |= MV_HP_CUT_THROUGH; |
3867 | |
3868 | switch (pdev->revision) { |
3869 | case 0x2: /* Rev.B0: the first/only public release */ |
3870 | hp_flags |= MV_HP_ERRATA_60X1C0; |
3871 | break; |
3872 | default: |
3873 | dev_warn(&pdev->dev, |
3874 | "Applying 60X1C0 workarounds to unknown rev\n" ); |
3875 | hp_flags |= MV_HP_ERRATA_60X1C0; |
3876 | break; |
3877 | } |
3878 | break; |
3879 | case chip_soc: |
3880 | if (soc_is_65n(hpriv)) |
3881 | hpriv->ops = &mv_soc_65n_ops; |
3882 | else |
3883 | hpriv->ops = &mv_soc_ops; |
3884 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | |
3885 | MV_HP_ERRATA_60X1C0; |
3886 | break; |
3887 | |
3888 | default: |
3889 | dev_alert(host->dev, "BUG: invalid board index %u\n" , board_idx); |
3890 | return -EINVAL; |
3891 | } |
3892 | |
3893 | hpriv->hp_flags = hp_flags; |
3894 | if (hp_flags & MV_HP_PCIE) { |
3895 | hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; |
3896 | hpriv->irq_mask_offset = PCIE_IRQ_MASK; |
3897 | hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; |
3898 | } else { |
3899 | hpriv->irq_cause_offset = PCI_IRQ_CAUSE; |
3900 | hpriv->irq_mask_offset = PCI_IRQ_MASK; |
3901 | hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; |
3902 | } |
3903 | |
3904 | return 0; |
3905 | } |
3906 | |
3907 | /** |
3908 | * mv_init_host - Perform some early initialization of the host. |
3909 | * @host: ATA host to initialize |
3910 | * |
3911 | * If possible, do an early global reset of the host. Then do |
3912 | * our port init and clear/unmask all/relevant host interrupts. |
3913 | * |
3914 | * LOCKING: |
3915 | * Inherited from caller. |
3916 | */ |
3917 | static int mv_init_host(struct ata_host *host) |
3918 | { |
3919 | int rc = 0, n_hc, port, hc; |
3920 | struct mv_host_priv *hpriv = host->private_data; |
3921 | void __iomem *mmio = hpriv->base; |
3922 | |
3923 | rc = mv_chip_id(host, board_idx: hpriv->board_idx); |
3924 | if (rc) |
3925 | goto done; |
3926 | |
3927 | if (IS_SOC(hpriv)) { |
3928 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; |
3929 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; |
3930 | } else { |
3931 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; |
3932 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; |
3933 | } |
3934 | |
3935 | /* initialize shadow irq mask with register's value */ |
3936 | hpriv->main_irq_mask = readl(addr: hpriv->main_irq_mask_addr); |
3937 | |
3938 | /* global interrupt mask: 0 == mask everything */ |
3939 | mv_set_main_irq_mask(host, disable_bits: ~0, enable_bits: 0); |
3940 | |
3941 | n_hc = mv_get_hc_count(port_flags: host->ports[0]->flags); |
3942 | |
3943 | for (port = 0; port < host->n_ports; port++) |
3944 | if (hpriv->ops->read_preamp) |
3945 | hpriv->ops->read_preamp(hpriv, port, mmio); |
3946 | |
3947 | rc = hpriv->ops->reset_hc(host, mmio, n_hc); |
3948 | if (rc) |
3949 | goto done; |
3950 | |
3951 | hpriv->ops->reset_flash(hpriv, mmio); |
3952 | hpriv->ops->reset_bus(host, mmio); |
3953 | hpriv->ops->enable_leds(hpriv, mmio); |
3954 | |
3955 | for (port = 0; port < host->n_ports; port++) { |
3956 | struct ata_port *ap = host->ports[port]; |
3957 | void __iomem *port_mmio = mv_port_base(base: mmio, port); |
3958 | |
3959 | mv_port_init(port: &ap->ioaddr, port_mmio); |
3960 | } |
3961 | |
3962 | for (hc = 0; hc < n_hc; hc++) { |
3963 | void __iomem *hc_mmio = mv_hc_base(base: mmio, hc); |
3964 | |
3965 | dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause " |
3966 | "(before clear)=0x%08x\n" , hc, |
3967 | readl(hc_mmio + HC_CFG), |
3968 | readl(hc_mmio + HC_IRQ_CAUSE)); |
3969 | |
3970 | /* Clear any currently outstanding hc interrupt conditions */ |
3971 | writelfl(data: 0, addr: hc_mmio + HC_IRQ_CAUSE); |
3972 | } |
3973 | |
3974 | if (!IS_SOC(hpriv)) { |
3975 | /* Clear any currently outstanding host interrupt conditions */ |
3976 | writelfl(data: 0, addr: mmio + hpriv->irq_cause_offset); |
3977 | |
3978 | /* and unmask interrupt generation for host regs */ |
3979 | writelfl(data: hpriv->unmask_all_irqs, addr: mmio + hpriv->irq_mask_offset); |
3980 | } |
3981 | |
3982 | /* |
3983 | * enable only global host interrupts for now. |
3984 | * The per-port interrupts get done later as ports are set up. |
3985 | */ |
3986 | mv_set_main_irq_mask(host, disable_bits: 0, enable_bits: PCI_ERR); |
3987 | mv_set_irq_coalescing(host, count: irq_coalescing_io_count, |
3988 | usecs: irq_coalescing_usecs); |
3989 | done: |
3990 | return rc; |
3991 | } |
3992 | |
3993 | static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) |
3994 | { |
3995 | hpriv->crqb_pool = dmam_pool_create(name: "crqb_q" , dev, size: MV_CRQB_Q_SZ, |
3996 | align: MV_CRQB_Q_SZ, allocation: 0); |
3997 | if (!hpriv->crqb_pool) |
3998 | return -ENOMEM; |
3999 | |
4000 | hpriv->crpb_pool = dmam_pool_create(name: "crpb_q" , dev, size: MV_CRPB_Q_SZ, |
4001 | align: MV_CRPB_Q_SZ, allocation: 0); |
4002 | if (!hpriv->crpb_pool) |
4003 | return -ENOMEM; |
4004 | |
4005 | hpriv->sg_tbl_pool = dmam_pool_create(name: "sg_tbl" , dev, size: MV_SG_TBL_SZ, |
4006 | align: MV_SG_TBL_SZ, allocation: 0); |
4007 | if (!hpriv->sg_tbl_pool) |
4008 | return -ENOMEM; |
4009 | |
4010 | return 0; |
4011 | } |
4012 | |
4013 | static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, |
4014 | const struct mbus_dram_target_info *dram) |
4015 | { |
4016 | int i; |
4017 | |
4018 | for (i = 0; i < 4; i++) { |
4019 | writel(val: 0, addr: hpriv->base + WINDOW_CTRL(i)); |
4020 | writel(val: 0, addr: hpriv->base + WINDOW_BASE(i)); |
4021 | } |
4022 | |
4023 | for (i = 0; i < dram->num_cs; i++) { |
4024 | const struct mbus_dram_window *cs = dram->cs + i; |
4025 | |
4026 | writel(val: ((cs->size - 1) & 0xffff0000) | |
4027 | (cs->mbus_attr << 8) | |
4028 | (dram->mbus_dram_target_id << 4) | 1, |
4029 | addr: hpriv->base + WINDOW_CTRL(i)); |
4030 | writel(val: cs->base, addr: hpriv->base + WINDOW_BASE(i)); |
4031 | } |
4032 | } |
4033 | |
4034 | /** |
4035 | * mv_platform_probe - handle a positive probe of an soc Marvell |
4036 | * host |
4037 | * @pdev: platform device found |
4038 | * |
4039 | * LOCKING: |
4040 | * Inherited from caller. |
4041 | */ |
4042 | static int mv_platform_probe(struct platform_device *pdev) |
4043 | { |
4044 | const struct mv_sata_platform_data *mv_platform_data; |
4045 | const struct mbus_dram_target_info *dram; |
4046 | const struct ata_port_info *ppi[] = |
4047 | { &mv_port_info[chip_soc], NULL }; |
4048 | struct ata_host *host; |
4049 | struct mv_host_priv *hpriv; |
4050 | struct resource *res; |
4051 | int n_ports = 0, irq = 0; |
4052 | int rc; |
4053 | int port; |
4054 | |
4055 | ata_print_version_once(&pdev->dev, DRV_VERSION); |
4056 | |
4057 | /* |
4058 | * Simple resource validation .. |
4059 | */ |
4060 | if (unlikely(pdev->num_resources != 1)) { |
4061 | dev_err(&pdev->dev, "invalid number of resources\n" ); |
4062 | return -EINVAL; |
4063 | } |
4064 | |
4065 | /* |
4066 | * Get the register base first |
4067 | */ |
4068 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
4069 | if (res == NULL) |
4070 | return -EINVAL; |
4071 | |
4072 | /* allocate host */ |
4073 | if (pdev->dev.of_node) { |
4074 | rc = of_property_read_u32(np: pdev->dev.of_node, propname: "nr-ports" , |
4075 | out_value: &n_ports); |
4076 | if (rc) { |
4077 | dev_err(&pdev->dev, |
4078 | "error parsing nr-ports property: %d\n" , rc); |
4079 | return rc; |
4080 | } |
4081 | |
4082 | if (n_ports <= 0) { |
4083 | dev_err(&pdev->dev, "nr-ports must be positive: %d\n" , |
4084 | n_ports); |
4085 | return -EINVAL; |
4086 | } |
4087 | |
4088 | irq = irq_of_parse_and_map(node: pdev->dev.of_node, index: 0); |
4089 | } else { |
4090 | mv_platform_data = dev_get_platdata(dev: &pdev->dev); |
4091 | n_ports = mv_platform_data->n_ports; |
4092 | irq = platform_get_irq(pdev, 0); |
4093 | } |
4094 | if (irq < 0) |
4095 | return irq; |
4096 | if (!irq) |
4097 | return -EINVAL; |
4098 | |
4099 | host = ata_host_alloc_pinfo(dev: &pdev->dev, ppi, n_ports); |
4100 | hpriv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hpriv), GFP_KERNEL); |
4101 | |
4102 | if (!host || !hpriv) |
4103 | return -ENOMEM; |
4104 | hpriv->port_clks = devm_kcalloc(dev: &pdev->dev, |
4105 | n: n_ports, size: sizeof(struct clk *), |
4106 | GFP_KERNEL); |
4107 | if (!hpriv->port_clks) |
4108 | return -ENOMEM; |
4109 | hpriv->port_phys = devm_kcalloc(dev: &pdev->dev, |
4110 | n: n_ports, size: sizeof(struct phy *), |
4111 | GFP_KERNEL); |
4112 | if (!hpriv->port_phys) |
4113 | return -ENOMEM; |
4114 | host->private_data = hpriv; |
4115 | hpriv->board_idx = chip_soc; |
4116 | |
4117 | host->iomap = NULL; |
4118 | hpriv->base = devm_ioremap(dev: &pdev->dev, offset: res->start, |
4119 | size: resource_size(res)); |
4120 | if (!hpriv->base) |
4121 | return -ENOMEM; |
4122 | |
4123 | hpriv->base -= SATAHC0_REG_BASE; |
4124 | |
4125 | hpriv->clk = clk_get(dev: &pdev->dev, NULL); |
4126 | if (IS_ERR(ptr: hpriv->clk)) { |
4127 | dev_notice(&pdev->dev, "cannot get optional clkdev\n" ); |
4128 | } else { |
4129 | rc = clk_prepare_enable(clk: hpriv->clk); |
4130 | if (rc) |
4131 | goto err; |
4132 | } |
4133 | |
4134 | for (port = 0; port < n_ports; port++) { |
4135 | char port_number[16]; |
4136 | sprintf(buf: port_number, fmt: "%d" , port); |
4137 | hpriv->port_clks[port] = clk_get(dev: &pdev->dev, id: port_number); |
4138 | if (!IS_ERR(ptr: hpriv->port_clks[port])) |
4139 | clk_prepare_enable(clk: hpriv->port_clks[port]); |
4140 | |
4141 | sprintf(buf: port_number, fmt: "port%d" , port); |
4142 | hpriv->port_phys[port] = devm_phy_optional_get(dev: &pdev->dev, |
4143 | string: port_number); |
4144 | if (IS_ERR(ptr: hpriv->port_phys[port])) { |
4145 | rc = PTR_ERR(ptr: hpriv->port_phys[port]); |
4146 | hpriv->port_phys[port] = NULL; |
4147 | if (rc != -EPROBE_DEFER) |
4148 | dev_warn(&pdev->dev, "error getting phy %d" , rc); |
4149 | |
4150 | /* Cleanup only the initialized ports */ |
4151 | hpriv->n_ports = port; |
4152 | goto err; |
4153 | } else |
4154 | phy_power_on(phy: hpriv->port_phys[port]); |
4155 | } |
4156 | |
4157 | /* All the ports have been initialized */ |
4158 | hpriv->n_ports = n_ports; |
4159 | |
4160 | /* |
4161 | * (Re-)program MBUS remapping windows if we are asked to. |
4162 | */ |
4163 | dram = mv_mbus_dram_info(); |
4164 | if (dram) |
4165 | mv_conf_mbus_windows(hpriv, dram); |
4166 | |
4167 | rc = mv_create_dma_pools(hpriv, dev: &pdev->dev); |
4168 | if (rc) |
4169 | goto err; |
4170 | |
4171 | /* |
4172 | * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be |
4173 | * updated in the LP_PHY_CTL register. |
4174 | */ |
4175 | if (pdev->dev.of_node && |
4176 | of_device_is_compatible(device: pdev->dev.of_node, |
4177 | "marvell,armada-370-sata" )) |
4178 | hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL; |
4179 | |
4180 | /* initialize adapter */ |
4181 | rc = mv_init_host(host); |
4182 | if (rc) |
4183 | goto err; |
4184 | |
4185 | dev_info(&pdev->dev, "slots %u ports %d\n" , |
4186 | (unsigned)MV_MAX_Q_DEPTH, host->n_ports); |
4187 | |
4188 | rc = ata_host_activate(host, irq, irq_handler: mv_interrupt, IRQF_SHARED, sht: &mv6_sht); |
4189 | if (!rc) |
4190 | return 0; |
4191 | |
4192 | err: |
4193 | if (!IS_ERR(ptr: hpriv->clk)) { |
4194 | clk_disable_unprepare(clk: hpriv->clk); |
4195 | clk_put(clk: hpriv->clk); |
4196 | } |
4197 | for (port = 0; port < hpriv->n_ports; port++) { |
4198 | if (!IS_ERR(ptr: hpriv->port_clks[port])) { |
4199 | clk_disable_unprepare(clk: hpriv->port_clks[port]); |
4200 | clk_put(clk: hpriv->port_clks[port]); |
4201 | } |
4202 | phy_power_off(phy: hpriv->port_phys[port]); |
4203 | } |
4204 | |
4205 | return rc; |
4206 | } |
4207 | |
4208 | /* |
4209 | * |
4210 | * mv_platform_remove - unplug a platform interface |
4211 | * @pdev: platform device |
4212 | * |
4213 | * A platform bus SATA device has been unplugged. Perform the needed |
4214 | * cleanup. Also called on module unload for any active devices. |
4215 | */ |
4216 | static void mv_platform_remove(struct platform_device *pdev) |
4217 | { |
4218 | struct ata_host *host = platform_get_drvdata(pdev); |
4219 | struct mv_host_priv *hpriv = host->private_data; |
4220 | int port; |
4221 | ata_host_detach(host); |
4222 | |
4223 | if (!IS_ERR(ptr: hpriv->clk)) { |
4224 | clk_disable_unprepare(clk: hpriv->clk); |
4225 | clk_put(clk: hpriv->clk); |
4226 | } |
4227 | for (port = 0; port < host->n_ports; port++) { |
4228 | if (!IS_ERR(ptr: hpriv->port_clks[port])) { |
4229 | clk_disable_unprepare(clk: hpriv->port_clks[port]); |
4230 | clk_put(clk: hpriv->port_clks[port]); |
4231 | } |
4232 | phy_power_off(phy: hpriv->port_phys[port]); |
4233 | } |
4234 | } |
4235 | |
4236 | #ifdef CONFIG_PM_SLEEP |
4237 | static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) |
4238 | { |
4239 | struct ata_host *host = platform_get_drvdata(pdev); |
4240 | |
4241 | if (host) |
4242 | ata_host_suspend(host, mesg: state); |
4243 | return 0; |
4244 | } |
4245 | |
4246 | static int mv_platform_resume(struct platform_device *pdev) |
4247 | { |
4248 | struct ata_host *host = platform_get_drvdata(pdev); |
4249 | const struct mbus_dram_target_info *dram; |
4250 | int ret; |
4251 | |
4252 | if (host) { |
4253 | struct mv_host_priv *hpriv = host->private_data; |
4254 | |
4255 | /* |
4256 | * (Re-)program MBUS remapping windows if we are asked to. |
4257 | */ |
4258 | dram = mv_mbus_dram_info(); |
4259 | if (dram) |
4260 | mv_conf_mbus_windows(hpriv, dram); |
4261 | |
4262 | /* initialize adapter */ |
4263 | ret = mv_init_host(host); |
4264 | if (ret) { |
4265 | dev_err(&pdev->dev, "Error during HW init\n" ); |
4266 | return ret; |
4267 | } |
4268 | ata_host_resume(host); |
4269 | } |
4270 | |
4271 | return 0; |
4272 | } |
4273 | #else |
4274 | #define mv_platform_suspend NULL |
4275 | #define mv_platform_resume NULL |
4276 | #endif |
4277 | |
4278 | #ifdef CONFIG_OF |
4279 | static const struct of_device_id mv_sata_dt_ids[] = { |
4280 | { .compatible = "marvell,armada-370-sata" , }, |
4281 | { .compatible = "marvell,orion-sata" , }, |
4282 | { /* sentinel */ } |
4283 | }; |
4284 | MODULE_DEVICE_TABLE(of, mv_sata_dt_ids); |
4285 | #endif |
4286 | |
4287 | static struct platform_driver mv_platform_driver = { |
4288 | .probe = mv_platform_probe, |
4289 | .remove_new = mv_platform_remove, |
4290 | .suspend = mv_platform_suspend, |
4291 | .resume = mv_platform_resume, |
4292 | .driver = { |
4293 | .name = DRV_NAME, |
4294 | .of_match_table = of_match_ptr(mv_sata_dt_ids), |
4295 | }, |
4296 | }; |
4297 | |
4298 | |
4299 | #ifdef CONFIG_PCI |
4300 | static int mv_pci_init_one(struct pci_dev *pdev, |
4301 | const struct pci_device_id *ent); |
4302 | #ifdef CONFIG_PM_SLEEP |
4303 | static int mv_pci_device_resume(struct pci_dev *pdev); |
4304 | #endif |
4305 | |
4306 | |
4307 | static struct pci_driver mv_pci_driver = { |
4308 | .name = DRV_NAME, |
4309 | .id_table = mv_pci_tbl, |
4310 | .probe = mv_pci_init_one, |
4311 | .remove = ata_pci_remove_one, |
4312 | #ifdef CONFIG_PM_SLEEP |
4313 | .suspend = ata_pci_device_suspend, |
4314 | .resume = mv_pci_device_resume, |
4315 | #endif |
4316 | |
4317 | }; |
4318 | |
4319 | /** |
4320 | * mv_print_info - Dump key info to kernel log for perusal. |
4321 | * @host: ATA host to print info about |
4322 | * |
4323 | * FIXME: complete this. |
4324 | * |
4325 | * LOCKING: |
4326 | * Inherited from caller. |
4327 | */ |
4328 | static void mv_print_info(struct ata_host *host) |
4329 | { |
4330 | struct pci_dev *pdev = to_pci_dev(host->dev); |
4331 | struct mv_host_priv *hpriv = host->private_data; |
4332 | u8 scc; |
4333 | const char *scc_s, *gen; |
4334 | |
4335 | /* Use this to determine the HW stepping of the chip so we know |
4336 | * what errata to workaround |
4337 | */ |
4338 | pci_read_config_byte(dev: pdev, PCI_CLASS_DEVICE, val: &scc); |
4339 | if (scc == 0) |
4340 | scc_s = "SCSI" ; |
4341 | else if (scc == 0x01) |
4342 | scc_s = "RAID" ; |
4343 | else |
4344 | scc_s = "?" ; |
4345 | |
4346 | if (IS_GEN_I(hpriv)) |
4347 | gen = "I" ; |
4348 | else if (IS_GEN_II(hpriv)) |
4349 | gen = "II" ; |
4350 | else if (IS_GEN_IIE(hpriv)) |
4351 | gen = "IIE" ; |
4352 | else |
4353 | gen = "?" ; |
4354 | |
4355 | dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n" , |
4356 | gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, |
4357 | scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx" ); |
4358 | } |
4359 | |
4360 | /** |
4361 | * mv_pci_init_one - handle a positive probe of a PCI Marvell host |
4362 | * @pdev: PCI device found |
4363 | * @ent: PCI device ID entry for the matched host |
4364 | * |
4365 | * LOCKING: |
4366 | * Inherited from caller. |
4367 | */ |
4368 | static int mv_pci_init_one(struct pci_dev *pdev, |
4369 | const struct pci_device_id *ent) |
4370 | { |
4371 | unsigned int board_idx = (unsigned int)ent->driver_data; |
4372 | const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; |
4373 | struct ata_host *host; |
4374 | struct mv_host_priv *hpriv; |
4375 | int n_ports, port, rc; |
4376 | |
4377 | ata_print_version_once(&pdev->dev, DRV_VERSION); |
4378 | |
4379 | /* allocate host */ |
4380 | n_ports = mv_get_hc_count(port_flags: ppi[0]->flags) * MV_PORTS_PER_HC; |
4381 | |
4382 | host = ata_host_alloc_pinfo(dev: &pdev->dev, ppi, n_ports); |
4383 | hpriv = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hpriv), GFP_KERNEL); |
4384 | if (!host || !hpriv) |
4385 | return -ENOMEM; |
4386 | host->private_data = hpriv; |
4387 | hpriv->n_ports = n_ports; |
4388 | hpriv->board_idx = board_idx; |
4389 | |
4390 | /* acquire resources */ |
4391 | rc = pcim_enable_device(pdev); |
4392 | if (rc) |
4393 | return rc; |
4394 | |
4395 | rc = pcim_iomap_regions(pdev, mask: 1 << MV_PRIMARY_BAR, DRV_NAME); |
4396 | if (rc == -EBUSY) |
4397 | pcim_pin_device(pdev); |
4398 | if (rc) |
4399 | return rc; |
4400 | host->iomap = pcim_iomap_table(pdev); |
4401 | hpriv->base = host->iomap[MV_PRIMARY_BAR]; |
4402 | |
4403 | rc = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
4404 | if (rc) { |
4405 | dev_err(&pdev->dev, "DMA enable failed\n" ); |
4406 | return rc; |
4407 | } |
4408 | |
4409 | rc = mv_create_dma_pools(hpriv, dev: &pdev->dev); |
4410 | if (rc) |
4411 | return rc; |
4412 | |
4413 | for (port = 0; port < host->n_ports; port++) { |
4414 | struct ata_port *ap = host->ports[port]; |
4415 | void __iomem *port_mmio = mv_port_base(base: hpriv->base, port); |
4416 | unsigned int offset = port_mmio - hpriv->base; |
4417 | |
4418 | ata_port_pbar_desc(ap, bar: MV_PRIMARY_BAR, offset: -1, name: "mmio" ); |
4419 | ata_port_pbar_desc(ap, bar: MV_PRIMARY_BAR, offset, name: "port" ); |
4420 | } |
4421 | |
4422 | /* initialize adapter */ |
4423 | rc = mv_init_host(host); |
4424 | if (rc) |
4425 | return rc; |
4426 | |
4427 | /* Enable message-switched interrupts, if requested */ |
4428 | if (msi && pci_enable_msi(dev: pdev) == 0) |
4429 | hpriv->hp_flags |= MV_HP_FLAG_MSI; |
4430 | |
4431 | mv_dump_pci_cfg(pdev, bytes: 0x68); |
4432 | mv_print_info(host); |
4433 | |
4434 | pci_set_master(dev: pdev); |
4435 | pci_try_set_mwi(dev: pdev); |
4436 | return ata_host_activate(host, irq: pdev->irq, irq_handler: mv_interrupt, IRQF_SHARED, |
4437 | IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); |
4438 | } |
4439 | |
4440 | #ifdef CONFIG_PM_SLEEP |
4441 | static int mv_pci_device_resume(struct pci_dev *pdev) |
4442 | { |
4443 | struct ata_host *host = pci_get_drvdata(pdev); |
4444 | int rc; |
4445 | |
4446 | rc = ata_pci_device_do_resume(pdev); |
4447 | if (rc) |
4448 | return rc; |
4449 | |
4450 | /* initialize adapter */ |
4451 | rc = mv_init_host(host); |
4452 | if (rc) |
4453 | return rc; |
4454 | |
4455 | ata_host_resume(host); |
4456 | |
4457 | return 0; |
4458 | } |
4459 | #endif |
4460 | #endif |
4461 | |
4462 | static int __init mv_init(void) |
4463 | { |
4464 | int rc = -ENODEV; |
4465 | #ifdef CONFIG_PCI |
4466 | rc = pci_register_driver(&mv_pci_driver); |
4467 | if (rc < 0) |
4468 | return rc; |
4469 | #endif |
4470 | rc = platform_driver_register(&mv_platform_driver); |
4471 | |
4472 | #ifdef CONFIG_PCI |
4473 | if (rc < 0) |
4474 | pci_unregister_driver(dev: &mv_pci_driver); |
4475 | #endif |
4476 | return rc; |
4477 | } |
4478 | |
4479 | static void __exit mv_exit(void) |
4480 | { |
4481 | #ifdef CONFIG_PCI |
4482 | pci_unregister_driver(dev: &mv_pci_driver); |
4483 | #endif |
4484 | platform_driver_unregister(&mv_platform_driver); |
4485 | } |
4486 | |
4487 | MODULE_AUTHOR("Brett Russ" ); |
4488 | MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers" ); |
4489 | MODULE_LICENSE("GPL v2" ); |
4490 | MODULE_DEVICE_TABLE(pci, mv_pci_tbl); |
4491 | MODULE_VERSION(DRV_VERSION); |
4492 | MODULE_ALIAS("platform:" DRV_NAME); |
4493 | |
4494 | module_init(mv_init); |
4495 | module_exit(mv_exit); |
4496 | |