1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Marvell 88SE94xx hardware specific |
4 | * |
5 | * Copyright 2007 Red Hat, Inc. |
6 | * Copyright 2008 Marvell. <kewei@marvell.com> |
7 | * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> |
8 | */ |
9 | |
10 | #include "mv_sas.h" |
11 | #include "mv_94xx.h" |
12 | #include "mv_chips.h" |
13 | |
14 | static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) |
15 | { |
16 | u32 reg; |
17 | struct mvs_phy *phy = &mvi->phy[i]; |
18 | u32 phy_status; |
19 | |
20 | mvs_write_port_vsr_addr(mvi, port: i, addr: VSR_PHY_MODE3); |
21 | reg = mvs_read_port_vsr_data(mvi, port: i); |
22 | phy_status = ((reg & 0x3f0000) >> 16) & 0xff; |
23 | phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); |
24 | switch (phy_status) { |
25 | case 0x10: |
26 | phy->phy_type |= PORT_TYPE_SAS; |
27 | break; |
28 | case 0x1d: |
29 | default: |
30 | phy->phy_type |= PORT_TYPE_SATA; |
31 | break; |
32 | } |
33 | } |
34 | |
35 | static void set_phy_tuning(struct mvs_info *mvi, int phy_id, |
36 | struct phy_tuning phy_tuning) |
37 | { |
38 | u32 tmp, setting_0 = 0, setting_1 = 0; |
39 | u8 i; |
40 | |
41 | /* Remap information for B0 chip: |
42 | * |
43 | * R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient) |
44 | * R0Dh -> R118h[31:16] (Generation 1 Setting 0) |
45 | * R0Eh -> R11Ch[15:0] (Generation 1 Setting 1) |
46 | * R0Fh -> R11Ch[31:16] (Generation 2 Setting 0) |
47 | * R10h -> R120h[15:0] (Generation 2 Setting 1) |
48 | * R11h -> R120h[31:16] (Generation 3 Setting 0) |
49 | * R12h -> R124h[15:0] (Generation 3 Setting 1) |
50 | * R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved)) |
51 | */ |
52 | |
53 | /* A0 has a different set of registers */ |
54 | if (mvi->pdev->revision == VANIR_A0_REV) |
55 | return; |
56 | |
57 | for (i = 0; i < 3; i++) { |
58 | /* loop 3 times, set Gen 1, Gen 2, Gen 3 */ |
59 | switch (i) { |
60 | case 0: |
61 | setting_0 = GENERATION_1_SETTING; |
62 | setting_1 = GENERATION_1_2_SETTING; |
63 | break; |
64 | case 1: |
65 | setting_0 = GENERATION_1_2_SETTING; |
66 | setting_1 = GENERATION_2_3_SETTING; |
67 | break; |
68 | case 2: |
69 | setting_0 = GENERATION_2_3_SETTING; |
70 | setting_1 = GENERATION_3_4_SETTING; |
71 | break; |
72 | } |
73 | |
74 | /* Set: |
75 | * |
76 | * Transmitter Emphasis Enable |
77 | * Transmitter Emphasis Amplitude |
78 | * Transmitter Amplitude |
79 | */ |
80 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: setting_0); |
81 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
82 | tmp &= ~(0xFBE << 16); |
83 | tmp |= (((phy_tuning.trans_emp_en << 11) | |
84 | (phy_tuning.trans_emp_amp << 7) | |
85 | (phy_tuning.trans_amp << 1)) << 16); |
86 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp); |
87 | |
88 | /* Set Transmitter Amplitude Adjust */ |
89 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: setting_1); |
90 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
91 | tmp &= ~(0xC000); |
92 | tmp |= (phy_tuning.trans_amp_adj << 14); |
93 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp); |
94 | } |
95 | } |
96 | |
97 | static void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id, |
98 | struct ffe_control ffe) |
99 | { |
100 | u32 tmp; |
101 | |
102 | /* Don't run this if A0/B0 */ |
103 | if ((mvi->pdev->revision == VANIR_A0_REV) |
104 | || (mvi->pdev->revision == VANIR_B0_REV)) |
105 | return; |
106 | |
107 | /* FFE Resistor and Capacitor */ |
108 | /* R10Ch DFE Resolution Control/Squelch and FFE Setting |
109 | * |
110 | * FFE_FORCE [7] |
111 | * FFE_RES_SEL [6:4] |
112 | * FFE_CAP_SEL [3:0] |
113 | */ |
114 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: VSR_PHY_FFE_CONTROL); |
115 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
116 | tmp &= ~0xFF; |
117 | |
118 | /* Read from HBA_Info_Page */ |
119 | tmp |= ((0x1 << 7) | |
120 | (ffe.ffe_rss_sel << 4) | |
121 | (ffe.ffe_cap_sel << 0)); |
122 | |
123 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp); |
124 | |
125 | /* R064h PHY Mode Register 1 |
126 | * |
127 | * DFE_DIS 18 |
128 | */ |
129 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: VSR_REF_CLOCK_CRTL); |
130 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
131 | tmp &= ~0x40001; |
132 | /* Hard coding */ |
133 | /* No defines in HBA_Info_Page */ |
134 | tmp |= (0 << 18); |
135 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp); |
136 | |
137 | /* R110h DFE F0-F1 Coefficient Control/DFE Update Control |
138 | * |
139 | * DFE_UPDATE_EN [11:6] |
140 | * DFE_FX_FORCE [5:0] |
141 | */ |
142 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: VSR_PHY_DFE_UPDATE_CRTL); |
143 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
144 | tmp &= ~0xFFF; |
145 | /* Hard coding */ |
146 | /* No defines in HBA_Info_Page */ |
147 | tmp |= ((0x3F << 6) | (0x0 << 0)); |
148 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp); |
149 | |
150 | /* R1A0h Interface and Digital Reference Clock Control/Reserved_50h |
151 | * |
152 | * FFE_TRAIN_EN 3 |
153 | */ |
154 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: VSR_REF_CLOCK_CRTL); |
155 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
156 | tmp &= ~0x8; |
157 | /* Hard coding */ |
158 | /* No defines in HBA_Info_Page */ |
159 | tmp |= (0 << 3); |
160 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp); |
161 | } |
162 | |
163 | /*Notice: this function must be called when phy is disabled*/ |
164 | static void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate) |
165 | { |
166 | union reg_phy_cfg phy_cfg, phy_cfg_tmp; |
167 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: VSR_PHY_MODE2); |
168 | phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, port: phy_id); |
169 | phy_cfg.v = 0; |
170 | phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy; |
171 | phy_cfg.u.sas_support = 1; |
172 | phy_cfg.u.sata_support = 1; |
173 | phy_cfg.u.sata_host_mode = 1; |
174 | |
175 | switch (rate) { |
176 | case 0x0: |
177 | /* support 1.5 Gbps */ |
178 | phy_cfg.u.speed_support = 1; |
179 | phy_cfg.u.snw_3_support = 0; |
180 | phy_cfg.u.tx_lnk_parity = 1; |
181 | phy_cfg.u.tx_spt_phs_lnk_rate = 0x30; |
182 | break; |
183 | case 0x1: |
184 | |
185 | /* support 1.5, 3.0 Gbps */ |
186 | phy_cfg.u.speed_support = 3; |
187 | phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c; |
188 | phy_cfg.u.tx_lgcl_lnk_rate = 0x08; |
189 | break; |
190 | case 0x2: |
191 | default: |
192 | /* support 1.5, 3.0, 6.0 Gbps */ |
193 | phy_cfg.u.speed_support = 7; |
194 | phy_cfg.u.snw_3_support = 1; |
195 | phy_cfg.u.tx_lnk_parity = 1; |
196 | phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f; |
197 | phy_cfg.u.tx_lgcl_lnk_rate = 0x09; |
198 | break; |
199 | } |
200 | mvs_write_port_vsr_data(mvi, port: phy_id, val: phy_cfg.v); |
201 | } |
202 | |
203 | static void mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id) |
204 | { |
205 | u32 temp; |
206 | temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]); |
207 | if (temp == 0xFFFFFFFFL) { |
208 | mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6; |
209 | mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A; |
210 | mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3; |
211 | } |
212 | |
213 | temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]); |
214 | if (temp == 0xFFL) { |
215 | switch (mvi->pdev->revision) { |
216 | case VANIR_A0_REV: |
217 | case VANIR_B0_REV: |
218 | mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; |
219 | mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7; |
220 | break; |
221 | case VANIR_C0_REV: |
222 | case VANIR_C1_REV: |
223 | case VANIR_C2_REV: |
224 | default: |
225 | mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7; |
226 | mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC; |
227 | break; |
228 | } |
229 | } |
230 | |
231 | temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]); |
232 | if (temp == 0xFFL) |
233 | /*set default phy_rate = 6Gbps*/ |
234 | mvi->hba_info_param.phy_rate[phy_id] = 0x2; |
235 | |
236 | set_phy_tuning(mvi, phy_id, |
237 | phy_tuning: mvi->hba_info_param.phy_tuning[phy_id]); |
238 | set_phy_ffe_tuning(mvi, phy_id, |
239 | ffe: mvi->hba_info_param.ffe_ctl[phy_id]); |
240 | set_phy_rate(mvi, phy_id, |
241 | rate: mvi->hba_info_param.phy_rate[phy_id]); |
242 | } |
243 | |
244 | static void mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) |
245 | { |
246 | void __iomem *regs = mvi->regs; |
247 | u32 tmp; |
248 | |
249 | tmp = mr32(MVS_PCS); |
250 | tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); |
251 | mw32(MVS_PCS, tmp); |
252 | } |
253 | |
254 | static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) |
255 | { |
256 | u32 tmp; |
257 | u32 delay = 5000; |
258 | if (hard == MVS_PHY_TUNE) { |
259 | mvs_write_port_cfg_addr(mvi, port: phy_id, addr: PHYR_SATA_CTL); |
260 | tmp = mvs_read_port_cfg_data(mvi, port: phy_id); |
261 | mvs_write_port_cfg_data(mvi, port: phy_id, val: tmp|0x20000000); |
262 | mvs_write_port_cfg_data(mvi, port: phy_id, val: tmp|0x100000); |
263 | return; |
264 | } |
265 | tmp = mvs_read_port_irq_stat(mvi, port: phy_id); |
266 | tmp &= ~PHYEV_RDY_CH; |
267 | mvs_write_port_irq_stat(mvi, port: phy_id, val: tmp); |
268 | if (hard) { |
269 | tmp = mvs_read_phy_ctl(mvi, port: phy_id); |
270 | tmp |= PHY_RST_HARD; |
271 | mvs_write_phy_ctl(mvi, port: phy_id, val: tmp); |
272 | do { |
273 | tmp = mvs_read_phy_ctl(mvi, port: phy_id); |
274 | udelay(10); |
275 | delay--; |
276 | } while ((tmp & PHY_RST_HARD) && delay); |
277 | if (!delay) |
278 | mv_dprintk("phy hard reset failed.\n" ); |
279 | } else { |
280 | tmp = mvs_read_phy_ctl(mvi, port: phy_id); |
281 | tmp |= PHY_RST; |
282 | mvs_write_phy_ctl(mvi, port: phy_id, val: tmp); |
283 | } |
284 | } |
285 | |
286 | static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) |
287 | { |
288 | u32 tmp; |
289 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: VSR_PHY_MODE2); |
290 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
291 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp | 0x00800000); |
292 | } |
293 | |
294 | static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) |
295 | { |
296 | u32 tmp; |
297 | u8 revision = 0; |
298 | |
299 | revision = mvi->pdev->revision; |
300 | if (revision == VANIR_A0_REV) { |
301 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: CMD_HOST_RD_DATA); |
302 | mvs_write_port_vsr_data(mvi, port: phy_id, val: 0x8300ffc1); |
303 | } |
304 | if (revision == VANIR_B0_REV) { |
305 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: CMD_APP_MEM_CTL); |
306 | mvs_write_port_vsr_data(mvi, port: phy_id, val: 0x08001006); |
307 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: CMD_HOST_RD_DATA); |
308 | mvs_write_port_vsr_data(mvi, port: phy_id, val: 0x0000705f); |
309 | } |
310 | |
311 | mvs_write_port_vsr_addr(mvi, port: phy_id, addr: VSR_PHY_MODE2); |
312 | tmp = mvs_read_port_vsr_data(mvi, port: phy_id); |
313 | tmp |= bit(0); |
314 | mvs_write_port_vsr_data(mvi, port: phy_id, val: tmp & 0xfd7fffff); |
315 | } |
316 | |
317 | static void mvs_94xx_sgpio_init(struct mvs_info *mvi) |
318 | { |
319 | void __iomem *regs = mvi->regs_ex - 0x10200; |
320 | u32 tmp; |
321 | |
322 | tmp = mr32(MVS_HST_CHIP_CONFIG); |
323 | tmp |= 0x100; |
324 | mw32(MVS_HST_CHIP_CONFIG, tmp); |
325 | |
326 | mw32(MVS_SGPIO_CTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, |
327 | MVS_SGPIO_CTRL_SDOUT_AUTO << MVS_SGPIO_CTRL_SDOUT_SHIFT); |
328 | |
329 | mw32(MVS_SGPIO_CFG1 + MVS_SGPIO_HOST_OFFSET * mvi->id, |
330 | 8 << MVS_SGPIO_CFG1_LOWA_SHIFT | |
331 | 8 << MVS_SGPIO_CFG1_HIA_SHIFT | |
332 | 4 << MVS_SGPIO_CFG1_LOWB_SHIFT | |
333 | 4 << MVS_SGPIO_CFG1_HIB_SHIFT | |
334 | 2 << MVS_SGPIO_CFG1_MAXACTON_SHIFT | |
335 | 1 << MVS_SGPIO_CFG1_FORCEACTOFF_SHIFT |
336 | ); |
337 | |
338 | mw32(MVS_SGPIO_CFG2 + MVS_SGPIO_HOST_OFFSET * mvi->id, |
339 | (300000 / 100) << MVS_SGPIO_CFG2_CLK_SHIFT | /* 100kHz clock */ |
340 | 66 << MVS_SGPIO_CFG2_BLINK_SHIFT /* (66 * 0,121 Hz?)*/ |
341 | ); |
342 | |
343 | mw32(MVS_SGPIO_CFG0 + MVS_SGPIO_HOST_OFFSET * mvi->id, |
344 | MVS_SGPIO_CFG0_ENABLE | |
345 | MVS_SGPIO_CFG0_BLINKA | |
346 | MVS_SGPIO_CFG0_BLINKB | |
347 | /* 3*4 data bits / PDU */ |
348 | (12 - 1) << MVS_SGPIO_CFG0_AUT_BITLEN_SHIFT |
349 | ); |
350 | |
351 | mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, |
352 | DEFAULT_SGPIO_BITS); |
353 | |
354 | mw32(MVS_SGPIO_DSRC + MVS_SGPIO_HOST_OFFSET * mvi->id, |
355 | ((mvi->id * 4) + 3) << (8 * 3) | |
356 | ((mvi->id * 4) + 2) << (8 * 2) | |
357 | ((mvi->id * 4) + 1) << (8 * 1) | |
358 | ((mvi->id * 4) + 0) << (8 * 0)); |
359 | |
360 | } |
361 | |
362 | static int mvs_94xx_init(struct mvs_info *mvi) |
363 | { |
364 | void __iomem *regs = mvi->regs; |
365 | int i; |
366 | u32 tmp, cctl; |
367 | u8 revision; |
368 | |
369 | revision = mvi->pdev->revision; |
370 | mvs_show_pcie_usage(mvi); |
371 | if (mvi->flags & MVF_FLAG_SOC) { |
372 | tmp = mr32(MVS_PHY_CTL); |
373 | tmp &= ~PCTL_PWR_OFF; |
374 | tmp |= PCTL_PHY_DSBL; |
375 | mw32(MVS_PHY_CTL, tmp); |
376 | } |
377 | |
378 | /* Init Chip */ |
379 | /* make sure RST is set; HBA_RST /should/ have done that for us */ |
380 | cctl = mr32(MVS_CTL) & 0xFFFF; |
381 | if (cctl & CCTL_RST) |
382 | cctl &= ~CCTL_RST; |
383 | else |
384 | mw32_f(MVS_CTL, cctl | CCTL_RST); |
385 | |
386 | if (mvi->flags & MVF_FLAG_SOC) { |
387 | tmp = mr32(MVS_PHY_CTL); |
388 | tmp &= ~PCTL_PWR_OFF; |
389 | tmp |= PCTL_COM_ON; |
390 | tmp &= ~PCTL_PHY_DSBL; |
391 | tmp |= PCTL_LINK_RST; |
392 | mw32(MVS_PHY_CTL, tmp); |
393 | msleep(msecs: 100); |
394 | tmp &= ~PCTL_LINK_RST; |
395 | mw32(MVS_PHY_CTL, tmp); |
396 | msleep(msecs: 100); |
397 | } |
398 | |
399 | /* disable Multiplexing, enable phy implemented */ |
400 | mw32(MVS_PORTS_IMP, 0xFF); |
401 | |
402 | if (revision == VANIR_A0_REV) { |
403 | mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET); |
404 | mw32(MVS_PA_VSR_PORT, 0x00018080); |
405 | } |
406 | mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2); |
407 | if (revision == VANIR_A0_REV || revision == VANIR_B0_REV) |
408 | /* set 6G/3G/1.5G, multiplexing, without SSC */ |
409 | mw32(MVS_PA_VSR_PORT, 0x0084d4fe); |
410 | else |
411 | /* set 6G/3G/1.5G, multiplexing, with and without SSC */ |
412 | mw32(MVS_PA_VSR_PORT, 0x0084fffe); |
413 | |
414 | if (revision == VANIR_B0_REV) { |
415 | mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL); |
416 | mw32(MVS_PA_VSR_PORT, 0x08001006); |
417 | mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA); |
418 | mw32(MVS_PA_VSR_PORT, 0x0000705f); |
419 | } |
420 | |
421 | /* reset control */ |
422 | mw32(MVS_PCS, 0); /* MVS_PCS */ |
423 | mw32(MVS_STP_REG_SET_0, 0); |
424 | mw32(MVS_STP_REG_SET_1, 0); |
425 | |
426 | /* init phys */ |
427 | mvs_phy_hacks(mvi); |
428 | |
429 | /* disable non data frame retry */ |
430 | tmp = mvs_cr32(mvi, addr: CMD_SAS_CTL1); |
431 | if ((revision == VANIR_A0_REV) || |
432 | (revision == VANIR_B0_REV) || |
433 | (revision == VANIR_C0_REV)) { |
434 | tmp &= ~0xffff; |
435 | tmp |= 0x007f; |
436 | mvs_cw32(mvi, addr: CMD_SAS_CTL1, val: tmp); |
437 | } |
438 | |
439 | /* set LED blink when IO*/ |
440 | mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED); |
441 | tmp = mr32(MVS_PA_VSR_PORT); |
442 | tmp &= 0xFFFF00FF; |
443 | tmp |= 0x00003300; |
444 | mw32(MVS_PA_VSR_PORT, tmp); |
445 | |
446 | mw32(MVS_CMD_LIST_LO, mvi->slot_dma); |
447 | mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); |
448 | |
449 | mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); |
450 | mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); |
451 | |
452 | mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); |
453 | mw32(MVS_TX_LO, mvi->tx_dma); |
454 | mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); |
455 | |
456 | mw32(MVS_RX_CFG, MVS_RX_RING_SZ); |
457 | mw32(MVS_RX_LO, mvi->rx_dma); |
458 | mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); |
459 | |
460 | for (i = 0; i < mvi->chip->n_phy; i++) { |
461 | mvs_94xx_phy_disable(mvi, phy_id: i); |
462 | /* set phy local SAS address */ |
463 | mvs_set_sas_addr(mvi, port_id: i, off_lo: CONFIG_ID_FRAME3, off_hi: CONFIG_ID_FRAME4, |
464 | cpu_to_le64(mvi->phy[i].dev_sas_addr)); |
465 | |
466 | mvs_94xx_enable_xmt(mvi, phy_id: i); |
467 | mvs_94xx_config_reg_from_hba(mvi, phy_id: i); |
468 | mvs_94xx_phy_enable(mvi, phy_id: i); |
469 | |
470 | mvs_94xx_phy_reset(mvi, phy_id: i, hard: PHY_RST_HARD); |
471 | msleep(msecs: 500); |
472 | mvs_94xx_detect_porttype(mvi, i); |
473 | } |
474 | |
475 | if (mvi->flags & MVF_FLAG_SOC) { |
476 | /* set select registers */ |
477 | writel(val: 0x0E008000, addr: regs + 0x000); |
478 | writel(val: 0x59000008, addr: regs + 0x004); |
479 | writel(val: 0x20, addr: regs + 0x008); |
480 | writel(val: 0x20, addr: regs + 0x00c); |
481 | writel(val: 0x20, addr: regs + 0x010); |
482 | writel(val: 0x20, addr: regs + 0x014); |
483 | writel(val: 0x20, addr: regs + 0x018); |
484 | writel(val: 0x20, addr: regs + 0x01c); |
485 | } |
486 | for (i = 0; i < mvi->chip->n_phy; i++) { |
487 | /* clear phy int status */ |
488 | tmp = mvs_read_port_irq_stat(mvi, port: i); |
489 | tmp &= ~PHYEV_SIG_FIS; |
490 | mvs_write_port_irq_stat(mvi, port: i, val: tmp); |
491 | |
492 | /* set phy int mask */ |
493 | tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | |
494 | PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ; |
495 | mvs_write_port_irq_mask(mvi, port: i, val: tmp); |
496 | |
497 | msleep(msecs: 100); |
498 | mvs_update_phyinfo(mvi, i, get_st: 1); |
499 | } |
500 | |
501 | /* little endian for open address and command table, etc. */ |
502 | cctl = mr32(MVS_CTL); |
503 | cctl |= CCTL_ENDIAN_CMD; |
504 | cctl &= ~CCTL_ENDIAN_OPEN; |
505 | cctl |= CCTL_ENDIAN_RSP; |
506 | mw32_f(MVS_CTL, cctl); |
507 | |
508 | /* reset CMD queue */ |
509 | tmp = mr32(MVS_PCS); |
510 | tmp |= PCS_CMD_RST; |
511 | tmp &= ~PCS_SELF_CLEAR; |
512 | mw32(MVS_PCS, tmp); |
513 | /* |
514 | * the max count is 0x1ff, while our max slot is 0x200, |
515 | * it will make count 0. |
516 | */ |
517 | tmp = 0; |
518 | if (MVS_CHIP_SLOT_SZ > 0x1ff) |
519 | mw32(MVS_INT_COAL, 0x1ff | COAL_EN); |
520 | else |
521 | mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN); |
522 | |
523 | /* default interrupt coalescing time is 128us */ |
524 | tmp = 0x10000 | interrupt_coalescing; |
525 | mw32(MVS_INT_COAL_TMOUT, tmp); |
526 | |
527 | /* ladies and gentlemen, start your engines */ |
528 | mw32(MVS_TX_CFG, 0); |
529 | mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); |
530 | mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); |
531 | /* enable CMD/CMPL_Q/RESP mode */ |
532 | mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN | |
533 | PCS_CMD_EN | PCS_CMD_STOP_ERR); |
534 | |
535 | /* enable completion queue interrupt */ |
536 | tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | |
537 | CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR); |
538 | tmp |= CINT_PHY_MASK; |
539 | mw32(MVS_INT_MASK, tmp); |
540 | |
541 | tmp = mvs_cr32(mvi, addr: CMD_LINK_TIMER); |
542 | tmp |= 0xFFFF0000; |
543 | mvs_cw32(mvi, addr: CMD_LINK_TIMER, val: tmp); |
544 | |
545 | /* tune STP performance */ |
546 | tmp = 0x003F003F; |
547 | mvs_cw32(mvi, addr: CMD_PL_TIMER, val: tmp); |
548 | |
549 | /* This can improve expander large block size seq write performance */ |
550 | tmp = mvs_cr32(mvi, addr: CMD_PORT_LAYER_TIMER1); |
551 | tmp |= 0xFFFF007F; |
552 | mvs_cw32(mvi, addr: CMD_PORT_LAYER_TIMER1, val: tmp); |
553 | |
554 | /* change the connection open-close behavior (bit 9) |
555 | * set bit8 to 1 for performance tuning */ |
556 | tmp = mvs_cr32(mvi, addr: CMD_SL_MODE0); |
557 | tmp |= 0x00000300; |
558 | /* set bit0 to 0 to enable retry for no_dest reject case */ |
559 | tmp &= 0xFFFFFFFE; |
560 | mvs_cw32(mvi, addr: CMD_SL_MODE0, val: tmp); |
561 | |
562 | /* Enable SRS interrupt */ |
563 | mw32(MVS_INT_MASK_SRS_0, 0xFFFF); |
564 | |
565 | mvs_94xx_sgpio_init(mvi); |
566 | |
567 | return 0; |
568 | } |
569 | |
570 | static int mvs_94xx_ioremap(struct mvs_info *mvi) |
571 | { |
572 | if (!mvs_ioremap(mvi, bar: 2, bar_ex: -1)) { |
573 | mvi->regs_ex = mvi->regs + 0x10200; |
574 | mvi->regs += 0x20000; |
575 | if (mvi->id == 1) |
576 | mvi->regs += 0x4000; |
577 | return 0; |
578 | } |
579 | return -1; |
580 | } |
581 | |
582 | static void mvs_94xx_iounmap(struct mvs_info *mvi) |
583 | { |
584 | if (mvi->regs) { |
585 | mvi->regs -= 0x20000; |
586 | if (mvi->id == 1) |
587 | mvi->regs -= 0x4000; |
588 | mvs_iounmap(regs: mvi->regs); |
589 | } |
590 | } |
591 | |
592 | static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) |
593 | { |
594 | void __iomem *regs = mvi->regs_ex; |
595 | u32 tmp; |
596 | |
597 | tmp = mr32(MVS_GBL_CTL); |
598 | tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); |
599 | mw32(MVS_GBL_INT_STAT, tmp); |
600 | writel(val: tmp, addr: regs + 0x0C); |
601 | writel(val: tmp, addr: regs + 0x10); |
602 | writel(val: tmp, addr: regs + 0x14); |
603 | writel(val: tmp, addr: regs + 0x18); |
604 | mw32(MVS_GBL_CTL, tmp); |
605 | } |
606 | |
607 | static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) |
608 | { |
609 | void __iomem *regs = mvi->regs_ex; |
610 | u32 tmp; |
611 | |
612 | tmp = mr32(MVS_GBL_CTL); |
613 | |
614 | tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); |
615 | mw32(MVS_GBL_INT_STAT, tmp); |
616 | writel(val: tmp, addr: regs + 0x0C); |
617 | writel(val: tmp, addr: regs + 0x10); |
618 | writel(val: tmp, addr: regs + 0x14); |
619 | writel(val: tmp, addr: regs + 0x18); |
620 | mw32(MVS_GBL_CTL, tmp); |
621 | } |
622 | |
623 | static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) |
624 | { |
625 | void __iomem *regs = mvi->regs_ex; |
626 | u32 stat = 0; |
627 | if (!(mvi->flags & MVF_FLAG_SOC)) { |
628 | stat = mr32(MVS_GBL_INT_STAT); |
629 | |
630 | if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B))) |
631 | return 0; |
632 | } |
633 | return stat; |
634 | } |
635 | |
636 | static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) |
637 | { |
638 | void __iomem *regs = mvi->regs; |
639 | |
640 | if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) || |
641 | ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) { |
642 | mw32_f(MVS_INT_STAT, CINT_DONE); |
643 | |
644 | spin_lock(lock: &mvi->lock); |
645 | mvs_int_full(mvi); |
646 | spin_unlock(lock: &mvi->lock); |
647 | } |
648 | return IRQ_HANDLED; |
649 | } |
650 | |
651 | static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) |
652 | { |
653 | u32 tmp; |
654 | tmp = mvs_cr32(mvi, addr: MVS_COMMAND_ACTIVE+(slot_idx >> 3)); |
655 | if (tmp & 1 << (slot_idx % 32)) { |
656 | mv_printk("command active %08X, slot [%x].\n" , tmp, slot_idx); |
657 | mvs_cw32(mvi, addr: MVS_COMMAND_ACTIVE + (slot_idx >> 3), |
658 | val: 1 << (slot_idx % 32)); |
659 | do { |
660 | tmp = mvs_cr32(mvi, |
661 | addr: MVS_COMMAND_ACTIVE + (slot_idx >> 3)); |
662 | } while (tmp & 1 << (slot_idx % 32)); |
663 | } |
664 | } |
665 | |
666 | static void |
667 | mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) |
668 | { |
669 | void __iomem *regs = mvi->regs; |
670 | u32 tmp; |
671 | |
672 | if (clear_all) { |
673 | tmp = mr32(MVS_INT_STAT_SRS_0); |
674 | if (tmp) { |
675 | mv_dprintk("check SRS 0 %08X.\n" , tmp); |
676 | mw32(MVS_INT_STAT_SRS_0, tmp); |
677 | } |
678 | tmp = mr32(MVS_INT_STAT_SRS_1); |
679 | if (tmp) { |
680 | mv_dprintk("check SRS 1 %08X.\n" , tmp); |
681 | mw32(MVS_INT_STAT_SRS_1, tmp); |
682 | } |
683 | } else { |
684 | if (reg_set > 31) |
685 | tmp = mr32(MVS_INT_STAT_SRS_1); |
686 | else |
687 | tmp = mr32(MVS_INT_STAT_SRS_0); |
688 | |
689 | if (tmp & (1 << (reg_set % 32))) { |
690 | mv_dprintk("register set 0x%x was stopped.\n" , reg_set); |
691 | if (reg_set > 31) |
692 | mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32)); |
693 | else |
694 | mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32)); |
695 | } |
696 | } |
697 | } |
698 | |
699 | static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, |
700 | u32 tfs) |
701 | { |
702 | void __iomem *regs = mvi->regs; |
703 | u32 tmp; |
704 | mvs_94xx_clear_srs_irq(mvi, reg_set: 0, clear_all: 1); |
705 | |
706 | tmp = mr32(MVS_INT_STAT); |
707 | mw32(MVS_INT_STAT, tmp | CINT_CI_STOP); |
708 | tmp = mr32(MVS_PCS) | 0xFF00; |
709 | mw32(MVS_PCS, tmp); |
710 | } |
711 | |
712 | static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi) |
713 | { |
714 | void __iomem *regs = mvi->regs; |
715 | u32 err_0, err_1; |
716 | u8 i; |
717 | struct mvs_device *device; |
718 | |
719 | err_0 = mr32(MVS_NON_NCQ_ERR_0); |
720 | err_1 = mr32(MVS_NON_NCQ_ERR_1); |
721 | |
722 | mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n" , |
723 | err_0, err_1); |
724 | for (i = 0; i < 32; i++) { |
725 | if (err_0 & bit(i)) { |
726 | device = mvs_find_dev_by_reg_set(mvi, reg_set: i); |
727 | if (device) |
728 | mvs_release_task(mvi, dev: device->sas_device); |
729 | } |
730 | if (err_1 & bit(i)) { |
731 | device = mvs_find_dev_by_reg_set(mvi, reg_set: i+32); |
732 | if (device) |
733 | mvs_release_task(mvi, dev: device->sas_device); |
734 | } |
735 | } |
736 | |
737 | mw32(MVS_NON_NCQ_ERR_0, err_0); |
738 | mw32(MVS_NON_NCQ_ERR_1, err_1); |
739 | } |
740 | |
741 | static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) |
742 | { |
743 | void __iomem *regs = mvi->regs; |
744 | u8 reg_set = *tfs; |
745 | |
746 | if (*tfs == MVS_ID_NOT_MAPPED) |
747 | return; |
748 | |
749 | mvi->sata_reg_set &= ~bit(reg_set); |
750 | if (reg_set < 32) |
751 | w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); |
752 | else |
753 | w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32)); |
754 | |
755 | *tfs = MVS_ID_NOT_MAPPED; |
756 | |
757 | return; |
758 | } |
759 | |
760 | static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) |
761 | { |
762 | int i; |
763 | void __iomem *regs = mvi->regs; |
764 | |
765 | if (*tfs != MVS_ID_NOT_MAPPED) |
766 | return 0; |
767 | |
768 | i = mv_ffc64(v: mvi->sata_reg_set); |
769 | if (i >= 32) { |
770 | mvi->sata_reg_set |= bit(i); |
771 | w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); |
772 | *tfs = i; |
773 | return 0; |
774 | } else if (i >= 0) { |
775 | mvi->sata_reg_set |= bit(i); |
776 | w_reg_set_enable(i, (u32)mvi->sata_reg_set); |
777 | *tfs = i; |
778 | return 0; |
779 | } |
780 | return MVS_ID_NOT_MAPPED; |
781 | } |
782 | |
783 | static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) |
784 | { |
785 | int i; |
786 | struct scatterlist *sg; |
787 | struct mvs_prd *buf_prd = prd; |
788 | struct mvs_prd_imt im_len; |
789 | *(u32 *)&im_len = 0; |
790 | for_each_sg(scatter, sg, nr, i) { |
791 | buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); |
792 | im_len.len = sg_dma_len(sg); |
793 | buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); |
794 | buf_prd++; |
795 | } |
796 | } |
797 | |
798 | static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) |
799 | { |
800 | u32 phy_st; |
801 | phy_st = mvs_read_phy_ctl(mvi, port: i); |
802 | if (phy_st & PHY_READY_MASK) |
803 | return 1; |
804 | return 0; |
805 | } |
806 | |
807 | static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, |
808 | struct sas_identify_frame *id) |
809 | { |
810 | int i; |
811 | u32 id_frame[7]; |
812 | |
813 | for (i = 0; i < 7; i++) { |
814 | mvs_write_port_cfg_addr(mvi, port: port_id, |
815 | addr: CONFIG_ID_FRAME0 + i * 4); |
816 | id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); |
817 | } |
818 | memcpy(id, id_frame, 28); |
819 | } |
820 | |
821 | static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, |
822 | struct sas_identify_frame *id) |
823 | { |
824 | int i; |
825 | u32 id_frame[7]; |
826 | |
827 | for (i = 0; i < 7; i++) { |
828 | mvs_write_port_cfg_addr(mvi, port: port_id, |
829 | addr: CONFIG_ATT_ID_FRAME0 + i * 4); |
830 | id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id)); |
831 | mv_dprintk("94xx phy %d atta frame %d %x.\n" , |
832 | port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); |
833 | } |
834 | memcpy(id, id_frame, 28); |
835 | } |
836 | |
837 | static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id) |
838 | { |
839 | u32 att_dev_info = 0; |
840 | |
841 | att_dev_info |= id->dev_type; |
842 | if (id->stp_iport) |
843 | att_dev_info |= PORT_DEV_STP_INIT; |
844 | if (id->smp_iport) |
845 | att_dev_info |= PORT_DEV_SMP_INIT; |
846 | if (id->ssp_iport) |
847 | att_dev_info |= PORT_DEV_SSP_INIT; |
848 | if (id->stp_tport) |
849 | att_dev_info |= PORT_DEV_STP_TRGT; |
850 | if (id->smp_tport) |
851 | att_dev_info |= PORT_DEV_SMP_TRGT; |
852 | if (id->ssp_tport) |
853 | att_dev_info |= PORT_DEV_SSP_TRGT; |
854 | |
855 | att_dev_info |= (u32)id->phy_id<<24; |
856 | return att_dev_info; |
857 | } |
858 | |
859 | static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id) |
860 | { |
861 | return mvs_94xx_make_dev_info(id); |
862 | } |
863 | |
864 | static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, |
865 | struct sas_identify_frame *id) |
866 | { |
867 | struct mvs_phy *phy = &mvi->phy[i]; |
868 | struct asd_sas_phy *sas_phy = &phy->sas_phy; |
869 | mv_dprintk("get all reg link rate is 0x%x\n" , phy->phy_status); |
870 | sas_phy->linkrate = |
871 | (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> |
872 | PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; |
873 | sas_phy->linkrate += 0x8; |
874 | mv_dprintk("get link rate is %d\n" , sas_phy->linkrate); |
875 | phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; |
876 | phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; |
877 | mvs_94xx_get_dev_identify_frame(mvi, port_id: i, id); |
878 | phy->dev_info = mvs_94xx_make_dev_info(id); |
879 | |
880 | if (phy->phy_type & PORT_TYPE_SAS) { |
881 | mvs_94xx_get_att_identify_frame(mvi, port_id: i, id); |
882 | phy->att_dev_info = mvs_94xx_make_att_info(id); |
883 | phy->att_dev_sas_addr = *(u64 *)id->sas_addr; |
884 | } else { |
885 | phy->att_dev_info = PORT_DEV_STP_TRGT | 1; |
886 | } |
887 | |
888 | /* enable spin up bit */ |
889 | mvs_write_port_cfg_addr(mvi, port: i, addr: PHYR_PHY_STAT); |
890 | mvs_write_port_cfg_data(mvi, port: i, val: 0x04); |
891 | |
892 | } |
893 | |
894 | static void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, |
895 | struct sas_phy_linkrates *rates) |
896 | { |
897 | u32 lrmax = 0; |
898 | u32 tmp; |
899 | |
900 | tmp = mvs_read_phy_ctl(mvi, port: phy_id); |
901 | lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12; |
902 | |
903 | if (lrmax) { |
904 | tmp &= ~(0x3 << 12); |
905 | tmp |= lrmax; |
906 | } |
907 | mvs_write_phy_ctl(mvi, port: phy_id, val: tmp); |
908 | mvs_94xx_phy_reset(mvi, phy_id, hard: PHY_RST_HARD); |
909 | } |
910 | |
911 | static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) |
912 | { |
913 | u32 tmp; |
914 | void __iomem *regs = mvi->regs; |
915 | tmp = mr32(MVS_STP_REG_SET_0); |
916 | mw32(MVS_STP_REG_SET_0, 0); |
917 | mw32(MVS_STP_REG_SET_0, tmp); |
918 | tmp = mr32(MVS_STP_REG_SET_1); |
919 | mw32(MVS_STP_REG_SET_1, 0); |
920 | mw32(MVS_STP_REG_SET_1, tmp); |
921 | } |
922 | |
923 | |
924 | static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) |
925 | { |
926 | void __iomem *regs = mvi->regs_ex - 0x10200; |
927 | return mr32(SPI_RD_DATA_REG_94XX); |
928 | } |
929 | |
930 | static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) |
931 | { |
932 | void __iomem *regs = mvi->regs_ex - 0x10200; |
933 | |
934 | mw32(SPI_RD_DATA_REG_94XX, data); |
935 | } |
936 | |
937 | |
938 | static int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, |
939 | u32 *dwCmd, |
940 | u8 cmd, |
941 | u8 read, |
942 | u8 length, |
943 | u32 addr |
944 | ) |
945 | { |
946 | void __iomem *regs = mvi->regs_ex - 0x10200; |
947 | u32 dwTmp; |
948 | |
949 | dwTmp = ((u32)cmd << 8) | ((u32)length << 4); |
950 | if (read) |
951 | dwTmp |= SPI_CTRL_READ_94XX; |
952 | |
953 | if (addr != MV_MAX_U32) { |
954 | mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL)); |
955 | dwTmp |= SPI_ADDR_VLD_94XX; |
956 | } |
957 | |
958 | *dwCmd = dwTmp; |
959 | return 0; |
960 | } |
961 | |
962 | |
963 | static int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) |
964 | { |
965 | void __iomem *regs = mvi->regs_ex - 0x10200; |
966 | mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); |
967 | |
968 | return 0; |
969 | } |
970 | |
971 | static int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) |
972 | { |
973 | void __iomem *regs = mvi->regs_ex - 0x10200; |
974 | u32 i, dwTmp; |
975 | |
976 | for (i = 0; i < timeout; i++) { |
977 | dwTmp = mr32(SPI_CTRL_REG_94XX); |
978 | if (!(dwTmp & SPI_CTRL_SpiStart_94XX)) |
979 | return 0; |
980 | msleep(msecs: 10); |
981 | } |
982 | |
983 | return -1; |
984 | } |
985 | |
986 | static void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, |
987 | int buf_len, int from, void *prd) |
988 | { |
989 | int i; |
990 | struct mvs_prd *buf_prd = prd; |
991 | dma_addr_t buf_dma; |
992 | struct mvs_prd_imt im_len; |
993 | |
994 | *(u32 *)&im_len = 0; |
995 | buf_prd += from; |
996 | |
997 | #define PRD_CHAINED_ENTRY 0x01 |
998 | if ((mvi->pdev->revision == VANIR_A0_REV) || |
999 | (mvi->pdev->revision == VANIR_B0_REV)) |
1000 | buf_dma = (phy_mask <= 0x08) ? |
1001 | mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1; |
1002 | else |
1003 | return; |
1004 | |
1005 | for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) { |
1006 | if (i == MAX_SG_ENTRY - 1) { |
1007 | buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1)); |
1008 | im_len.len = 2; |
1009 | im_len.misc_ctl = PRD_CHAINED_ENTRY; |
1010 | } else { |
1011 | buf_prd->addr = cpu_to_le64(buf_dma); |
1012 | im_len.len = buf_len; |
1013 | } |
1014 | buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len); |
1015 | } |
1016 | } |
1017 | |
1018 | static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time) |
1019 | { |
1020 | void __iomem *regs = mvi->regs; |
1021 | u32 tmp = 0; |
1022 | /* |
1023 | * the max count is 0x1ff, while our max slot is 0x200, |
1024 | * it will make count 0. |
1025 | */ |
1026 | if (time == 0) { |
1027 | mw32(MVS_INT_COAL, 0); |
1028 | mw32(MVS_INT_COAL_TMOUT, 0x10000); |
1029 | } else { |
1030 | if (MVS_CHIP_SLOT_SZ > 0x1ff) |
1031 | mw32(MVS_INT_COAL, 0x1ff|COAL_EN); |
1032 | else |
1033 | mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN); |
1034 | |
1035 | tmp = 0x10000 | time; |
1036 | mw32(MVS_INT_COAL_TMOUT, tmp); |
1037 | } |
1038 | |
1039 | } |
1040 | |
1041 | static int mvs_94xx_gpio_write(struct mvs_prv_info *mvs_prv, |
1042 | u8 reg_type, u8 reg_index, |
1043 | u8 reg_count, u8 *write_data) |
1044 | { |
1045 | int i; |
1046 | |
1047 | switch (reg_type) { |
1048 | |
1049 | case SAS_GPIO_REG_TX_GP: |
1050 | if (reg_index == 0) |
1051 | return -EINVAL; |
1052 | |
1053 | if (reg_count > 1) |
1054 | return -EINVAL; |
1055 | |
1056 | if (reg_count == 0) |
1057 | return 0; |
1058 | |
1059 | /* maximum supported bits = hosts * 4 drives * 3 bits */ |
1060 | for (i = 0; i < mvs_prv->n_host * 4 * 3; i++) { |
1061 | |
1062 | /* select host */ |
1063 | struct mvs_info *mvi = mvs_prv->mvi[i/(4*3)]; |
1064 | |
1065 | void __iomem *regs = mvi->regs_ex - 0x10200; |
1066 | |
1067 | int drive = (i/3) & (4-1); /* drive number on host */ |
1068 | int driveshift = drive * 8; /* bit offset of drive */ |
1069 | u32 block = ioread32be(regs + MVS_SGPIO_DCTRL + |
1070 | MVS_SGPIO_HOST_OFFSET * mvi->id); |
1071 | |
1072 | /* |
1073 | * if bit is set then create a mask with the first |
1074 | * bit of the drive set in the mask ... |
1075 | */ |
1076 | u32 bit = get_unaligned_be32(p: write_data) & (1 << i) ? |
1077 | 1 << driveshift : 0; |
1078 | |
1079 | /* |
1080 | * ... and then shift it to the right position based |
1081 | * on the led type (activity/id/fail) |
1082 | */ |
1083 | switch (i%3) { |
1084 | case 0: /* activity */ |
1085 | block &= ~((0x7 << MVS_SGPIO_DCTRL_ACT_SHIFT) |
1086 | << driveshift); |
1087 | /* hardwire activity bit to SOF */ |
1088 | block |= LED_BLINKA_SOF << ( |
1089 | MVS_SGPIO_DCTRL_ACT_SHIFT + |
1090 | driveshift); |
1091 | break; |
1092 | case 1: /* id */ |
1093 | block &= ~((0x3 << MVS_SGPIO_DCTRL_LOC_SHIFT) |
1094 | << driveshift); |
1095 | block |= bit << MVS_SGPIO_DCTRL_LOC_SHIFT; |
1096 | break; |
1097 | case 2: /* fail */ |
1098 | block &= ~((0x7 << MVS_SGPIO_DCTRL_ERR_SHIFT) |
1099 | << driveshift); |
1100 | block |= bit << MVS_SGPIO_DCTRL_ERR_SHIFT; |
1101 | break; |
1102 | } |
1103 | |
1104 | iowrite32be(block, |
1105 | regs + MVS_SGPIO_DCTRL + |
1106 | MVS_SGPIO_HOST_OFFSET * mvi->id); |
1107 | |
1108 | } |
1109 | |
1110 | return reg_count; |
1111 | |
1112 | case SAS_GPIO_REG_TX: |
1113 | if (reg_index + reg_count > mvs_prv->n_host) |
1114 | return -EINVAL; |
1115 | |
1116 | for (i = 0; i < reg_count; i++) { |
1117 | struct mvs_info *mvi = mvs_prv->mvi[i+reg_index]; |
1118 | void __iomem *regs = mvi->regs_ex - 0x10200; |
1119 | |
1120 | mw32(MVS_SGPIO_DCTRL + MVS_SGPIO_HOST_OFFSET * mvi->id, |
1121 | ((u32 *) write_data)[i]); |
1122 | } |
1123 | return reg_count; |
1124 | } |
1125 | return -ENOSYS; |
1126 | } |
1127 | |
1128 | const struct mvs_dispatch mvs_94xx_dispatch = { |
1129 | "mv94xx" , |
1130 | mvs_94xx_init, |
1131 | NULL, |
1132 | mvs_94xx_ioremap, |
1133 | mvs_94xx_iounmap, |
1134 | mvs_94xx_isr, |
1135 | mvs_94xx_isr_status, |
1136 | mvs_94xx_interrupt_enable, |
1137 | mvs_94xx_interrupt_disable, |
1138 | mvs_read_phy_ctl, |
1139 | mvs_write_phy_ctl, |
1140 | mvs_read_port_cfg_data, |
1141 | mvs_write_port_cfg_data, |
1142 | mvs_write_port_cfg_addr, |
1143 | mvs_read_port_vsr_data, |
1144 | mvs_write_port_vsr_data, |
1145 | mvs_write_port_vsr_addr, |
1146 | mvs_read_port_irq_stat, |
1147 | mvs_write_port_irq_stat, |
1148 | mvs_read_port_irq_mask, |
1149 | mvs_write_port_irq_mask, |
1150 | mvs_94xx_command_active, |
1151 | mvs_94xx_clear_srs_irq, |
1152 | mvs_94xx_issue_stop, |
1153 | mvs_start_delivery, |
1154 | mvs_rx_update, |
1155 | mvs_int_full, |
1156 | mvs_94xx_assign_reg_set, |
1157 | mvs_94xx_free_reg_set, |
1158 | mvs_get_prd_size, |
1159 | mvs_get_prd_count, |
1160 | mvs_94xx_make_prd, |
1161 | mvs_94xx_detect_porttype, |
1162 | mvs_94xx_oob_done, |
1163 | mvs_94xx_fix_phy_info, |
1164 | NULL, |
1165 | mvs_94xx_phy_set_link_rate, |
1166 | mvs_hw_max_link_rate, |
1167 | mvs_94xx_phy_disable, |
1168 | mvs_94xx_phy_enable, |
1169 | mvs_94xx_phy_reset, |
1170 | NULL, |
1171 | mvs_94xx_clear_active_cmds, |
1172 | mvs_94xx_spi_read_data, |
1173 | mvs_94xx_spi_write_data, |
1174 | mvs_94xx_spi_buildcmd, |
1175 | mvs_94xx_spi_issuecmd, |
1176 | mvs_94xx_spi_waitdataready, |
1177 | mvs_94xx_fix_dma, |
1178 | mvs_94xx_tune_interrupt, |
1179 | mvs_94xx_non_spec_ncq_error, |
1180 | mvs_94xx_gpio_write, |
1181 | }; |
1182 | |
1183 | |