1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * EP93XX PATA controller driver. |
4 | * |
5 | * Copyright (c) 2012, Metasoft s.c. |
6 | * Rafal Prylowski <prylowski@metasoft.pl> |
7 | * |
8 | * Based on pata_scc.c, pata_icside.c and on earlier version of EP93XX |
9 | * PATA driver by Lennert Buytenhek and Alessandro Zummo. |
10 | * Read/Write timings, resource management and other improvements |
11 | * from driver by Joao Ramos and Bartlomiej Zolnierkiewicz. |
12 | * DMA engine support based on spi-ep93xx.c by Mika Westerberg. |
13 | * |
14 | * Original copyrights: |
15 | * |
16 | * Support for Cirrus Logic's EP93xx (EP9312, EP9315) CPUs |
17 | * PATA host controller driver. |
18 | * |
19 | * Copyright (c) 2009, Bartlomiej Zolnierkiewicz |
20 | * |
21 | * Heavily based on the ep93xx-ide.c driver: |
22 | * |
23 | * Copyright (c) 2009, Joao Ramos <joao.ramos@inov.pt> |
24 | * INESC Inovacao (INOV) |
25 | * |
26 | * EP93XX PATA controller driver. |
27 | * Copyright (C) 2007 Lennert Buytenhek <buytenh@wantstofly.org> |
28 | * |
29 | * An ATA driver for the Cirrus Logic EP93xx PATA controller. |
30 | * |
31 | * Based on an earlier version by Alessandro Zummo, which is: |
32 | * Copyright (C) 2006 Tower Technologies |
33 | */ |
34 | |
35 | #include <linux/err.h> |
36 | #include <linux/kernel.h> |
37 | #include <linux/module.h> |
38 | #include <linux/blkdev.h> |
39 | #include <scsi/scsi_host.h> |
40 | #include <linux/ata.h> |
41 | #include <linux/libata.h> |
42 | #include <linux/platform_device.h> |
43 | #include <linux/sys_soc.h> |
44 | #include <linux/delay.h> |
45 | #include <linux/dmaengine.h> |
46 | #include <linux/ktime.h> |
47 | |
48 | #include <linux/platform_data/dma-ep93xx.h> |
49 | #include <linux/soc/cirrus/ep93xx.h> |
50 | |
51 | #define DRV_NAME "ep93xx-ide" |
52 | #define DRV_VERSION "1.0" |
53 | |
54 | enum { |
55 | /* IDE Control Register */ |
56 | IDECTRL = 0x00, |
57 | IDECTRL_CS0N = (1 << 0), |
58 | IDECTRL_CS1N = (1 << 1), |
59 | IDECTRL_DIORN = (1 << 5), |
60 | IDECTRL_DIOWN = (1 << 6), |
61 | IDECTRL_INTRQ = (1 << 9), |
62 | IDECTRL_IORDY = (1 << 10), |
63 | /* |
64 | * the device IDE register to be accessed is selected through |
65 | * IDECTRL register's specific bitfields 'DA', 'CS1N' and 'CS0N': |
66 | * b4 b3 b2 b1 b0 |
67 | * A2 A1 A0 CS1N CS0N |
68 | * the values filled in this structure allows the value to be directly |
69 | * ORed to the IDECTRL register, hence giving directly the A[2:0] and |
70 | * CS1N/CS0N values for each IDE register. |
71 | * The values correspond to the transformation: |
72 | * ((real IDE address) << 2) | CS1N value << 1 | CS0N value |
73 | */ |
74 | IDECTRL_ADDR_CMD = 0 + 2, /* CS1 */ |
75 | IDECTRL_ADDR_DATA = (ATA_REG_DATA << 2) + 2, |
76 | IDECTRL_ADDR_ERROR = (ATA_REG_ERR << 2) + 2, |
77 | IDECTRL_ADDR_FEATURE = (ATA_REG_FEATURE << 2) + 2, |
78 | IDECTRL_ADDR_NSECT = (ATA_REG_NSECT << 2) + 2, |
79 | IDECTRL_ADDR_LBAL = (ATA_REG_LBAL << 2) + 2, |
80 | IDECTRL_ADDR_LBAM = (ATA_REG_LBAM << 2) + 2, |
81 | IDECTRL_ADDR_LBAH = (ATA_REG_LBAH << 2) + 2, |
82 | IDECTRL_ADDR_DEVICE = (ATA_REG_DEVICE << 2) + 2, |
83 | IDECTRL_ADDR_STATUS = (ATA_REG_STATUS << 2) + 2, |
84 | IDECTRL_ADDR_COMMAND = (ATA_REG_CMD << 2) + 2, |
85 | IDECTRL_ADDR_ALTSTATUS = (0x06 << 2) + 1, /* CS0 */ |
86 | IDECTRL_ADDR_CTL = (0x06 << 2) + 1, /* CS0 */ |
87 | |
88 | /* IDE Configuration Register */ |
89 | IDECFG = 0x04, |
90 | IDECFG_IDEEN = (1 << 0), |
91 | IDECFG_PIO = (1 << 1), |
92 | IDECFG_MDMA = (1 << 2), |
93 | IDECFG_UDMA = (1 << 3), |
94 | IDECFG_MODE_SHIFT = 4, |
95 | IDECFG_MODE_MASK = (0xf << 4), |
96 | IDECFG_WST_SHIFT = 8, |
97 | IDECFG_WST_MASK = (0x3 << 8), |
98 | |
99 | /* MDMA Operation Register */ |
100 | IDEMDMAOP = 0x08, |
101 | |
102 | /* UDMA Operation Register */ |
103 | IDEUDMAOP = 0x0c, |
104 | IDEUDMAOP_UEN = (1 << 0), |
105 | IDEUDMAOP_RWOP = (1 << 1), |
106 | |
107 | /* PIO/MDMA/UDMA Data Registers */ |
108 | IDEDATAOUT = 0x10, |
109 | IDEDATAIN = 0x14, |
110 | IDEMDMADATAOUT = 0x18, |
111 | IDEMDMADATAIN = 0x1c, |
112 | IDEUDMADATAOUT = 0x20, |
113 | IDEUDMADATAIN = 0x24, |
114 | |
115 | /* UDMA Status Register */ |
116 | IDEUDMASTS = 0x28, |
117 | IDEUDMASTS_DMAIDE = (1 << 16), |
118 | IDEUDMASTS_INTIDE = (1 << 17), |
119 | IDEUDMASTS_SBUSY = (1 << 18), |
120 | IDEUDMASTS_NDO = (1 << 24), |
121 | IDEUDMASTS_NDI = (1 << 25), |
122 | IDEUDMASTS_N4X = (1 << 26), |
123 | |
124 | /* UDMA Debug Status Register */ |
125 | IDEUDMADEBUG = 0x2c, |
126 | }; |
127 | |
128 | struct ep93xx_pata_data { |
129 | const struct platform_device *pdev; |
130 | void __iomem *ide_base; |
131 | struct ata_timing t; |
132 | bool iordy; |
133 | |
134 | unsigned long udma_in_phys; |
135 | unsigned long udma_out_phys; |
136 | |
137 | struct dma_chan *dma_rx_channel; |
138 | struct ep93xx_dma_data dma_rx_data; |
139 | struct dma_chan *dma_tx_channel; |
140 | struct ep93xx_dma_data dma_tx_data; |
141 | }; |
142 | |
143 | static void ep93xx_pata_clear_regs(void __iomem *base) |
144 | { |
145 | writel(val: IDECTRL_CS0N | IDECTRL_CS1N | IDECTRL_DIORN | |
146 | IDECTRL_DIOWN, addr: base + IDECTRL); |
147 | |
148 | writel(val: 0, addr: base + IDECFG); |
149 | writel(val: 0, addr: base + IDEMDMAOP); |
150 | writel(val: 0, addr: base + IDEUDMAOP); |
151 | writel(val: 0, addr: base + IDEDATAOUT); |
152 | writel(val: 0, addr: base + IDEDATAIN); |
153 | writel(val: 0, addr: base + IDEMDMADATAOUT); |
154 | writel(val: 0, addr: base + IDEMDMADATAIN); |
155 | writel(val: 0, addr: base + IDEUDMADATAOUT); |
156 | writel(val: 0, addr: base + IDEUDMADATAIN); |
157 | writel(val: 0, addr: base + IDEUDMADEBUG); |
158 | } |
159 | |
160 | static bool ep93xx_pata_check_iordy(void __iomem *base) |
161 | { |
162 | return !!(readl(addr: base + IDECTRL) & IDECTRL_IORDY); |
163 | } |
164 | |
165 | /* |
166 | * According to EP93xx User's Guide, WST field of IDECFG specifies number |
167 | * of HCLK cycles to hold the data bus after a PIO write operation. |
168 | * It should be programmed to guarantee following delays: |
169 | * |
170 | * PIO Mode [ns] |
171 | * 0 30 |
172 | * 1 20 |
173 | * 2 15 |
174 | * 3 10 |
175 | * 4 5 |
176 | * |
177 | * Maximum possible value for HCLK is 100MHz. |
178 | */ |
179 | static int ep93xx_pata_get_wst(int pio_mode) |
180 | { |
181 | int val; |
182 | |
183 | if (pio_mode == 0) |
184 | val = 3; |
185 | else if (pio_mode < 3) |
186 | val = 2; |
187 | else |
188 | val = 1; |
189 | |
190 | return val << IDECFG_WST_SHIFT; |
191 | } |
192 | |
193 | static void ep93xx_pata_enable_pio(void __iomem *base, int pio_mode) |
194 | { |
195 | writel(val: IDECFG_IDEEN | IDECFG_PIO | |
196 | ep93xx_pata_get_wst(pio_mode) | |
197 | (pio_mode << IDECFG_MODE_SHIFT), addr: base + IDECFG); |
198 | } |
199 | |
200 | /* |
201 | * Based on delay loop found in mach-pxa/mp900.c. |
202 | * |
203 | * Single iteration should take 5 cpu cycles. This is 25ns assuming the |
204 | * fastest ep93xx cpu speed (200MHz) and is better optimized for PIO4 timings |
205 | * than eg. 20ns. |
206 | */ |
207 | static void ep93xx_pata_delay(unsigned long count) |
208 | { |
209 | __asm__ volatile ( |
210 | "0:\n" |
211 | "mov r0, r0\n" |
212 | "subs %0, %1, #1\n" |
213 | "bge 0b\n" |
214 | : "=r" (count) |
215 | : "0" (count) |
216 | ); |
217 | } |
218 | |
219 | static unsigned long ep93xx_pata_wait_for_iordy(void __iomem *base, |
220 | unsigned long t2) |
221 | { |
222 | /* |
223 | * According to ATA specification, IORDY pin can be first sampled |
224 | * tA = 35ns after activation of DIOR-/DIOW-. Maximum IORDY pulse |
225 | * width is tB = 1250ns. |
226 | * |
227 | * We are already t2 delay loop iterations after activation of |
228 | * DIOR-/DIOW-, so we set timeout to (1250 + 35) / 25 - t2 additional |
229 | * delay loop iterations. |
230 | */ |
231 | unsigned long start = (1250 + 35) / 25 - t2; |
232 | unsigned long counter = start; |
233 | |
234 | while (!ep93xx_pata_check_iordy(base) && counter--) |
235 | ep93xx_pata_delay(count: 1); |
236 | return start - counter; |
237 | } |
238 | |
239 | /* common part at start of ep93xx_pata_read/write() */ |
240 | static void ep93xx_pata_rw_begin(void __iomem *base, unsigned long addr, |
241 | unsigned long t1) |
242 | { |
243 | writel(val: IDECTRL_DIOWN | IDECTRL_DIORN | addr, addr: base + IDECTRL); |
244 | ep93xx_pata_delay(count: t1); |
245 | } |
246 | |
247 | /* common part at end of ep93xx_pata_read/write() */ |
248 | static void ep93xx_pata_rw_end(void __iomem *base, unsigned long addr, |
249 | bool iordy, unsigned long t0, unsigned long t2, |
250 | unsigned long t2i) |
251 | { |
252 | ep93xx_pata_delay(count: t2); |
253 | /* lengthen t2 if needed */ |
254 | if (iordy) |
255 | t2 += ep93xx_pata_wait_for_iordy(base, t2); |
256 | writel(val: IDECTRL_DIOWN | IDECTRL_DIORN | addr, addr: base + IDECTRL); |
257 | if (t0 > t2 && t0 - t2 > t2i) |
258 | ep93xx_pata_delay(count: t0 - t2); |
259 | else |
260 | ep93xx_pata_delay(count: t2i); |
261 | } |
262 | |
263 | static u16 ep93xx_pata_read(struct ep93xx_pata_data *drv_data, |
264 | unsigned long addr, |
265 | bool reg) |
266 | { |
267 | void __iomem *base = drv_data->ide_base; |
268 | const struct ata_timing *t = &drv_data->t; |
269 | unsigned long t0 = reg ? t->cyc8b : t->cycle; |
270 | unsigned long t2 = reg ? t->act8b : t->active; |
271 | unsigned long t2i = reg ? t->rec8b : t->recover; |
272 | |
273 | ep93xx_pata_rw_begin(base, addr, t1: t->setup); |
274 | writel(val: IDECTRL_DIOWN | addr, addr: base + IDECTRL); |
275 | /* |
276 | * The IDEDATAIN register is loaded from the DD pins at the positive |
277 | * edge of the DIORN signal. (EP93xx UG p27-14) |
278 | */ |
279 | ep93xx_pata_rw_end(base, addr, iordy: drv_data->iordy, t0, t2, t2i); |
280 | return readl(addr: base + IDEDATAIN); |
281 | } |
282 | |
283 | /* IDE register read */ |
284 | static u16 ep93xx_pata_read_reg(struct ep93xx_pata_data *drv_data, |
285 | unsigned long addr) |
286 | { |
287 | return ep93xx_pata_read(drv_data, addr, reg: true); |
288 | } |
289 | |
290 | /* PIO data read */ |
291 | static u16 ep93xx_pata_read_data(struct ep93xx_pata_data *drv_data, |
292 | unsigned long addr) |
293 | { |
294 | return ep93xx_pata_read(drv_data, addr, reg: false); |
295 | } |
296 | |
297 | static void ep93xx_pata_write(struct ep93xx_pata_data *drv_data, |
298 | u16 value, unsigned long addr, |
299 | bool reg) |
300 | { |
301 | void __iomem *base = drv_data->ide_base; |
302 | const struct ata_timing *t = &drv_data->t; |
303 | unsigned long t0 = reg ? t->cyc8b : t->cycle; |
304 | unsigned long t2 = reg ? t->act8b : t->active; |
305 | unsigned long t2i = reg ? t->rec8b : t->recover; |
306 | |
307 | ep93xx_pata_rw_begin(base, addr, t1: t->setup); |
308 | /* |
309 | * Value from IDEDATAOUT register is driven onto the DD pins when |
310 | * DIOWN is low. (EP93xx UG p27-13) |
311 | */ |
312 | writel(val: value, addr: base + IDEDATAOUT); |
313 | writel(val: IDECTRL_DIORN | addr, addr: base + IDECTRL); |
314 | ep93xx_pata_rw_end(base, addr, iordy: drv_data->iordy, t0, t2, t2i); |
315 | } |
316 | |
317 | /* IDE register write */ |
318 | static void ep93xx_pata_write_reg(struct ep93xx_pata_data *drv_data, |
319 | u16 value, unsigned long addr) |
320 | { |
321 | ep93xx_pata_write(drv_data, value, addr, reg: true); |
322 | } |
323 | |
324 | /* PIO data write */ |
325 | static void ep93xx_pata_write_data(struct ep93xx_pata_data *drv_data, |
326 | u16 value, unsigned long addr) |
327 | { |
328 | ep93xx_pata_write(drv_data, value, addr, reg: false); |
329 | } |
330 | |
331 | static void ep93xx_pata_set_piomode(struct ata_port *ap, |
332 | struct ata_device *adev) |
333 | { |
334 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
335 | struct ata_device *pair = ata_dev_pair(adev); |
336 | /* |
337 | * Calculate timings for the delay loop, assuming ep93xx cpu speed |
338 | * is 200MHz (maximum possible for ep93xx). If actual cpu speed is |
339 | * slower, we will wait a bit longer in each delay. |
340 | * Additional division of cpu speed by 5, because single iteration |
341 | * of our delay loop takes 5 cpu cycles (25ns). |
342 | */ |
343 | unsigned long T = 1000000 / (200 / 5); |
344 | |
345 | ata_timing_compute(adev, adev->pio_mode, &drv_data->t, T, 0); |
346 | if (pair && pair->pio_mode) { |
347 | struct ata_timing t; |
348 | ata_timing_compute(pair, pair->pio_mode, &t, T, 0); |
349 | ata_timing_merge(&t, &drv_data->t, &drv_data->t, |
350 | ATA_TIMING_SETUP | ATA_TIMING_8BIT); |
351 | } |
352 | drv_data->iordy = ata_pio_need_iordy(adev); |
353 | |
354 | ep93xx_pata_enable_pio(base: drv_data->ide_base, |
355 | pio_mode: adev->pio_mode - XFER_PIO_0); |
356 | } |
357 | |
358 | /* Note: original code is ata_sff_check_status */ |
359 | static u8 ep93xx_pata_check_status(struct ata_port *ap) |
360 | { |
361 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
362 | |
363 | return ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_STATUS); |
364 | } |
365 | |
366 | static u8 ep93xx_pata_check_altstatus(struct ata_port *ap) |
367 | { |
368 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
369 | |
370 | return ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_ALTSTATUS); |
371 | } |
372 | |
373 | /* Note: original code is ata_sff_tf_load */ |
374 | static void ep93xx_pata_tf_load(struct ata_port *ap, |
375 | const struct ata_taskfile *tf) |
376 | { |
377 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
378 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; |
379 | |
380 | if (tf->ctl != ap->last_ctl) { |
381 | ep93xx_pata_write_reg(drv_data, value: tf->ctl, addr: IDECTRL_ADDR_CTL); |
382 | ap->last_ctl = tf->ctl; |
383 | ata_wait_idle(ap); |
384 | } |
385 | |
386 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
387 | ep93xx_pata_write_reg(drv_data, value: tf->hob_feature, |
388 | addr: IDECTRL_ADDR_FEATURE); |
389 | ep93xx_pata_write_reg(drv_data, value: tf->hob_nsect, |
390 | addr: IDECTRL_ADDR_NSECT); |
391 | ep93xx_pata_write_reg(drv_data, value: tf->hob_lbal, |
392 | addr: IDECTRL_ADDR_LBAL); |
393 | ep93xx_pata_write_reg(drv_data, value: tf->hob_lbam, |
394 | addr: IDECTRL_ADDR_LBAM); |
395 | ep93xx_pata_write_reg(drv_data, value: tf->hob_lbah, |
396 | addr: IDECTRL_ADDR_LBAH); |
397 | } |
398 | |
399 | if (is_addr) { |
400 | ep93xx_pata_write_reg(drv_data, value: tf->feature, |
401 | addr: IDECTRL_ADDR_FEATURE); |
402 | ep93xx_pata_write_reg(drv_data, value: tf->nsect, addr: IDECTRL_ADDR_NSECT); |
403 | ep93xx_pata_write_reg(drv_data, value: tf->lbal, addr: IDECTRL_ADDR_LBAL); |
404 | ep93xx_pata_write_reg(drv_data, value: tf->lbam, addr: IDECTRL_ADDR_LBAM); |
405 | ep93xx_pata_write_reg(drv_data, value: tf->lbah, addr: IDECTRL_ADDR_LBAH); |
406 | } |
407 | |
408 | if (tf->flags & ATA_TFLAG_DEVICE) |
409 | ep93xx_pata_write_reg(drv_data, value: tf->device, |
410 | addr: IDECTRL_ADDR_DEVICE); |
411 | |
412 | ata_wait_idle(ap); |
413 | } |
414 | |
415 | /* Note: original code is ata_sff_tf_read */ |
416 | static void ep93xx_pata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
417 | { |
418 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
419 | |
420 | tf->status = ep93xx_pata_check_status(ap); |
421 | tf->error = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_FEATURE); |
422 | tf->nsect = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_NSECT); |
423 | tf->lbal = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_LBAL); |
424 | tf->lbam = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_LBAM); |
425 | tf->lbah = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_LBAH); |
426 | tf->device = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_DEVICE); |
427 | |
428 | if (tf->flags & ATA_TFLAG_LBA48) { |
429 | ep93xx_pata_write_reg(drv_data, value: tf->ctl | ATA_HOB, |
430 | addr: IDECTRL_ADDR_CTL); |
431 | tf->hob_feature = ep93xx_pata_read_reg(drv_data, |
432 | addr: IDECTRL_ADDR_FEATURE); |
433 | tf->hob_nsect = ep93xx_pata_read_reg(drv_data, |
434 | addr: IDECTRL_ADDR_NSECT); |
435 | tf->hob_lbal = ep93xx_pata_read_reg(drv_data, |
436 | addr: IDECTRL_ADDR_LBAL); |
437 | tf->hob_lbam = ep93xx_pata_read_reg(drv_data, |
438 | addr: IDECTRL_ADDR_LBAM); |
439 | tf->hob_lbah = ep93xx_pata_read_reg(drv_data, |
440 | addr: IDECTRL_ADDR_LBAH); |
441 | ep93xx_pata_write_reg(drv_data, value: tf->ctl, addr: IDECTRL_ADDR_CTL); |
442 | ap->last_ctl = tf->ctl; |
443 | } |
444 | } |
445 | |
446 | /* Note: original code is ata_sff_exec_command */ |
447 | static void ep93xx_pata_exec_command(struct ata_port *ap, |
448 | const struct ata_taskfile *tf) |
449 | { |
450 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
451 | |
452 | ep93xx_pata_write_reg(drv_data, value: tf->command, |
453 | addr: IDECTRL_ADDR_COMMAND); |
454 | ata_sff_pause(ap); |
455 | } |
456 | |
457 | /* Note: original code is ata_sff_dev_select */ |
458 | static void ep93xx_pata_dev_select(struct ata_port *ap, unsigned int device) |
459 | { |
460 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
461 | u8 tmp = ATA_DEVICE_OBS; |
462 | |
463 | if (device != 0) |
464 | tmp |= ATA_DEV1; |
465 | |
466 | ep93xx_pata_write_reg(drv_data, value: tmp, addr: IDECTRL_ADDR_DEVICE); |
467 | ata_sff_pause(ap); /* needed; also flushes, for mmio */ |
468 | } |
469 | |
470 | /* Note: original code is ata_sff_set_devctl */ |
471 | static void ep93xx_pata_set_devctl(struct ata_port *ap, u8 ctl) |
472 | { |
473 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
474 | |
475 | ep93xx_pata_write_reg(drv_data, value: ctl, addr: IDECTRL_ADDR_CTL); |
476 | } |
477 | |
478 | /* Note: original code is ata_sff_data_xfer */ |
479 | static unsigned int ep93xx_pata_data_xfer(struct ata_queued_cmd *qc, |
480 | unsigned char *buf, |
481 | unsigned int buflen, int rw) |
482 | { |
483 | struct ata_port *ap = qc->dev->link->ap; |
484 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
485 | u16 *data = (u16 *)buf; |
486 | unsigned int words = buflen >> 1; |
487 | |
488 | /* Transfer multiple of 2 bytes */ |
489 | while (words--) |
490 | if (rw == READ) |
491 | *data++ = cpu_to_le16( |
492 | ep93xx_pata_read_data( |
493 | drv_data, IDECTRL_ADDR_DATA)); |
494 | else |
495 | ep93xx_pata_write_data(drv_data, le16_to_cpu(*data++), |
496 | addr: IDECTRL_ADDR_DATA); |
497 | |
498 | /* Transfer trailing 1 byte, if any. */ |
499 | if (unlikely(buflen & 0x01)) { |
500 | unsigned char pad[2] = { }; |
501 | |
502 | buf += buflen - 1; |
503 | |
504 | if (rw == READ) { |
505 | *pad = cpu_to_le16( |
506 | ep93xx_pata_read_data( |
507 | drv_data, IDECTRL_ADDR_DATA)); |
508 | *buf = pad[0]; |
509 | } else { |
510 | pad[0] = *buf; |
511 | ep93xx_pata_write_data(drv_data, le16_to_cpu(*pad), |
512 | addr: IDECTRL_ADDR_DATA); |
513 | } |
514 | words++; |
515 | } |
516 | |
517 | return words << 1; |
518 | } |
519 | |
520 | /* Note: original code is ata_devchk */ |
521 | static bool ep93xx_pata_device_is_present(struct ata_port *ap, |
522 | unsigned int device) |
523 | { |
524 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
525 | u8 nsect, lbal; |
526 | |
527 | ap->ops->sff_dev_select(ap, device); |
528 | |
529 | ep93xx_pata_write_reg(drv_data, value: 0x55, addr: IDECTRL_ADDR_NSECT); |
530 | ep93xx_pata_write_reg(drv_data, value: 0xaa, addr: IDECTRL_ADDR_LBAL); |
531 | |
532 | ep93xx_pata_write_reg(drv_data, value: 0xaa, addr: IDECTRL_ADDR_NSECT); |
533 | ep93xx_pata_write_reg(drv_data, value: 0x55, addr: IDECTRL_ADDR_LBAL); |
534 | |
535 | ep93xx_pata_write_reg(drv_data, value: 0x55, addr: IDECTRL_ADDR_NSECT); |
536 | ep93xx_pata_write_reg(drv_data, value: 0xaa, addr: IDECTRL_ADDR_LBAL); |
537 | |
538 | nsect = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_NSECT); |
539 | lbal = ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_LBAL); |
540 | |
541 | if ((nsect == 0x55) && (lbal == 0xaa)) |
542 | return true; |
543 | |
544 | return false; |
545 | } |
546 | |
547 | /* Note: original code is ata_sff_wait_after_reset */ |
548 | static int ep93xx_pata_wait_after_reset(struct ata_link *link, |
549 | unsigned int devmask, |
550 | unsigned long deadline) |
551 | { |
552 | struct ata_port *ap = link->ap; |
553 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
554 | unsigned int dev0 = devmask & (1 << 0); |
555 | unsigned int dev1 = devmask & (1 << 1); |
556 | int rc, ret = 0; |
557 | |
558 | ata_msleep(ap, msecs: ATA_WAIT_AFTER_RESET); |
559 | |
560 | /* always check readiness of the master device */ |
561 | rc = ata_sff_wait_ready(link, deadline); |
562 | /* |
563 | * -ENODEV means the odd clown forgot the D7 pulldown resistor |
564 | * and TF status is 0xff, bail out on it too. |
565 | */ |
566 | if (rc) |
567 | return rc; |
568 | |
569 | /* |
570 | * if device 1 was found in ata_devchk, wait for register |
571 | * access briefly, then wait for BSY to clear. |
572 | */ |
573 | if (dev1) { |
574 | int i; |
575 | |
576 | ap->ops->sff_dev_select(ap, 1); |
577 | |
578 | /* |
579 | * Wait for register access. Some ATAPI devices fail |
580 | * to set nsect/lbal after reset, so don't waste too |
581 | * much time on it. We're gonna wait for !BSY anyway. |
582 | */ |
583 | for (i = 0; i < 2; i++) { |
584 | u8 nsect, lbal; |
585 | |
586 | nsect = ep93xx_pata_read_reg(drv_data, |
587 | addr: IDECTRL_ADDR_NSECT); |
588 | lbal = ep93xx_pata_read_reg(drv_data, |
589 | addr: IDECTRL_ADDR_LBAL); |
590 | if (nsect == 1 && lbal == 1) |
591 | break; |
592 | msleep(msecs: 50); /* give drive a breather */ |
593 | } |
594 | |
595 | rc = ata_sff_wait_ready(link, deadline); |
596 | if (rc) { |
597 | if (rc != -ENODEV) |
598 | return rc; |
599 | ret = rc; |
600 | } |
601 | } |
602 | /* is all this really necessary? */ |
603 | ap->ops->sff_dev_select(ap, 0); |
604 | if (dev1) |
605 | ap->ops->sff_dev_select(ap, 1); |
606 | if (dev0) |
607 | ap->ops->sff_dev_select(ap, 0); |
608 | |
609 | return ret; |
610 | } |
611 | |
612 | /* Note: original code is ata_bus_softreset */ |
613 | static int ep93xx_pata_bus_softreset(struct ata_port *ap, unsigned int devmask, |
614 | unsigned long deadline) |
615 | { |
616 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
617 | |
618 | ep93xx_pata_write_reg(drv_data, value: ap->ctl, addr: IDECTRL_ADDR_CTL); |
619 | udelay(20); /* FIXME: flush */ |
620 | ep93xx_pata_write_reg(drv_data, value: ap->ctl | ATA_SRST, addr: IDECTRL_ADDR_CTL); |
621 | udelay(20); /* FIXME: flush */ |
622 | ep93xx_pata_write_reg(drv_data, value: ap->ctl, addr: IDECTRL_ADDR_CTL); |
623 | ap->last_ctl = ap->ctl; |
624 | |
625 | return ep93xx_pata_wait_after_reset(link: &ap->link, devmask, deadline); |
626 | } |
627 | |
628 | static void ep93xx_pata_release_dma(struct ep93xx_pata_data *drv_data) |
629 | { |
630 | if (drv_data->dma_rx_channel) { |
631 | dma_release_channel(chan: drv_data->dma_rx_channel); |
632 | drv_data->dma_rx_channel = NULL; |
633 | } |
634 | if (drv_data->dma_tx_channel) { |
635 | dma_release_channel(chan: drv_data->dma_tx_channel); |
636 | drv_data->dma_tx_channel = NULL; |
637 | } |
638 | } |
639 | |
640 | static bool ep93xx_pata_dma_filter(struct dma_chan *chan, void *filter_param) |
641 | { |
642 | if (ep93xx_dma_chan_is_m2p(chan)) |
643 | return false; |
644 | |
645 | chan->private = filter_param; |
646 | return true; |
647 | } |
648 | |
649 | static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data) |
650 | { |
651 | const struct platform_device *pdev = drv_data->pdev; |
652 | dma_cap_mask_t mask; |
653 | struct dma_slave_config conf; |
654 | |
655 | dma_cap_zero(mask); |
656 | dma_cap_set(DMA_SLAVE, mask); |
657 | |
658 | /* |
659 | * Request two channels for IDE. Another possibility would be |
660 | * to request only one channel, and reprogram it's direction at |
661 | * start of new transfer. |
662 | */ |
663 | drv_data->dma_rx_data.port = EP93XX_DMA_IDE; |
664 | drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM; |
665 | drv_data->dma_rx_data.name = "ep93xx-pata-rx" ; |
666 | drv_data->dma_rx_channel = dma_request_channel(mask, |
667 | ep93xx_pata_dma_filter, &drv_data->dma_rx_data); |
668 | if (!drv_data->dma_rx_channel) |
669 | return; |
670 | |
671 | drv_data->dma_tx_data.port = EP93XX_DMA_IDE; |
672 | drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV; |
673 | drv_data->dma_tx_data.name = "ep93xx-pata-tx" ; |
674 | drv_data->dma_tx_channel = dma_request_channel(mask, |
675 | ep93xx_pata_dma_filter, &drv_data->dma_tx_data); |
676 | if (!drv_data->dma_tx_channel) { |
677 | dma_release_channel(chan: drv_data->dma_rx_channel); |
678 | return; |
679 | } |
680 | |
681 | /* Configure receive channel direction and source address */ |
682 | memset(&conf, 0, sizeof(conf)); |
683 | conf.direction = DMA_DEV_TO_MEM; |
684 | conf.src_addr = drv_data->udma_in_phys; |
685 | conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
686 | if (dmaengine_slave_config(chan: drv_data->dma_rx_channel, config: &conf)) { |
687 | dev_err(&pdev->dev, "failed to configure rx dma channel\n" ); |
688 | ep93xx_pata_release_dma(drv_data); |
689 | return; |
690 | } |
691 | |
692 | /* Configure transmit channel direction and destination address */ |
693 | memset(&conf, 0, sizeof(conf)); |
694 | conf.direction = DMA_MEM_TO_DEV; |
695 | conf.dst_addr = drv_data->udma_out_phys; |
696 | conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
697 | if (dmaengine_slave_config(chan: drv_data->dma_tx_channel, config: &conf)) { |
698 | dev_err(&pdev->dev, "failed to configure tx dma channel\n" ); |
699 | ep93xx_pata_release_dma(drv_data); |
700 | } |
701 | } |
702 | |
703 | static void ep93xx_pata_dma_start(struct ata_queued_cmd *qc) |
704 | { |
705 | struct dma_async_tx_descriptor *txd; |
706 | struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; |
707 | void __iomem *base = drv_data->ide_base; |
708 | struct ata_device *adev = qc->dev; |
709 | u32 v = qc->dma_dir == DMA_TO_DEVICE ? IDEUDMAOP_RWOP : 0; |
710 | struct dma_chan *channel = qc->dma_dir == DMA_TO_DEVICE |
711 | ? drv_data->dma_tx_channel : drv_data->dma_rx_channel; |
712 | |
713 | txd = dmaengine_prep_slave_sg(chan: channel, sgl: qc->sg, sg_len: qc->n_elem, dir: qc->dma_dir, |
714 | flags: DMA_CTRL_ACK); |
715 | if (!txd) { |
716 | dev_err(qc->ap->dev, "failed to prepare slave for sg dma\n" ); |
717 | return; |
718 | } |
719 | txd->callback = NULL; |
720 | txd->callback_param = NULL; |
721 | |
722 | if (dmaengine_submit(desc: txd) < 0) { |
723 | dev_err(qc->ap->dev, "failed to submit dma transfer\n" ); |
724 | return; |
725 | } |
726 | dma_async_issue_pending(chan: channel); |
727 | |
728 | /* |
729 | * When enabling UDMA operation, IDEUDMAOP register needs to be |
730 | * programmed in three step sequence: |
731 | * 1) set or clear the RWOP bit, |
732 | * 2) perform dummy read of the register, |
733 | * 3) set the UEN bit. |
734 | */ |
735 | writel(val: v, addr: base + IDEUDMAOP); |
736 | readl(addr: base + IDEUDMAOP); |
737 | writel(val: v | IDEUDMAOP_UEN, addr: base + IDEUDMAOP); |
738 | |
739 | writel(val: IDECFG_IDEEN | IDECFG_UDMA | |
740 | ((adev->xfer_mode - XFER_UDMA_0) << IDECFG_MODE_SHIFT), |
741 | addr: base + IDECFG); |
742 | } |
743 | |
744 | static void ep93xx_pata_dma_stop(struct ata_queued_cmd *qc) |
745 | { |
746 | struct ep93xx_pata_data *drv_data = qc->ap->host->private_data; |
747 | void __iomem *base = drv_data->ide_base; |
748 | |
749 | /* terminate all dma transfers, if not yet finished */ |
750 | dmaengine_terminate_all(chan: drv_data->dma_rx_channel); |
751 | dmaengine_terminate_all(chan: drv_data->dma_tx_channel); |
752 | |
753 | /* |
754 | * To properly stop IDE-DMA, IDEUDMAOP register must to be cleared |
755 | * and IDECTRL register must be set to default value. |
756 | */ |
757 | writel(val: 0, addr: base + IDEUDMAOP); |
758 | writel(readl(addr: base + IDECTRL) | IDECTRL_DIOWN | IDECTRL_DIORN | |
759 | IDECTRL_CS0N | IDECTRL_CS1N, addr: base + IDECTRL); |
760 | |
761 | ep93xx_pata_enable_pio(base: drv_data->ide_base, |
762 | pio_mode: qc->dev->pio_mode - XFER_PIO_0); |
763 | |
764 | ata_sff_dma_pause(ap: qc->ap); |
765 | } |
766 | |
767 | static void ep93xx_pata_dma_setup(struct ata_queued_cmd *qc) |
768 | { |
769 | qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); |
770 | } |
771 | |
772 | static u8 ep93xx_pata_dma_status(struct ata_port *ap) |
773 | { |
774 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
775 | u32 val = readl(addr: drv_data->ide_base + IDEUDMASTS); |
776 | |
777 | /* |
778 | * UDMA Status Register bits: |
779 | * |
780 | * DMAIDE - DMA request signal from UDMA state machine, |
781 | * INTIDE - INT line generated by UDMA because of errors in the |
782 | * state machine, |
783 | * SBUSY - UDMA state machine busy, not in idle state, |
784 | * NDO - error for data-out not completed, |
785 | * NDI - error for data-in not completed, |
786 | * N4X - error for data transferred not multiplies of four |
787 | * 32-bit words. |
788 | * (EP93xx UG p27-17) |
789 | */ |
790 | if (val & IDEUDMASTS_NDO || val & IDEUDMASTS_NDI || |
791 | val & IDEUDMASTS_N4X || val & IDEUDMASTS_INTIDE) |
792 | return ATA_DMA_ERR; |
793 | |
794 | /* read INTRQ (INT[3]) pin input state */ |
795 | if (readl(addr: drv_data->ide_base + IDECTRL) & IDECTRL_INTRQ) |
796 | return ATA_DMA_INTR; |
797 | |
798 | if (val & IDEUDMASTS_SBUSY || val & IDEUDMASTS_DMAIDE) |
799 | return ATA_DMA_ACTIVE; |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | /* Note: original code is ata_sff_softreset */ |
805 | static int ep93xx_pata_softreset(struct ata_link *al, unsigned int *classes, |
806 | unsigned long deadline) |
807 | { |
808 | struct ata_port *ap = al->ap; |
809 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
810 | unsigned int devmask = 0; |
811 | int rc; |
812 | u8 err; |
813 | |
814 | /* determine if device 0/1 are present */ |
815 | if (ep93xx_pata_device_is_present(ap, device: 0)) |
816 | devmask |= (1 << 0); |
817 | if (slave_possible && ep93xx_pata_device_is_present(ap, device: 1)) |
818 | devmask |= (1 << 1); |
819 | |
820 | /* select device 0 again */ |
821 | ap->ops->sff_dev_select(al->ap, 0); |
822 | |
823 | /* issue bus reset */ |
824 | rc = ep93xx_pata_bus_softreset(ap, devmask, deadline); |
825 | /* if link is ocuppied, -ENODEV too is an error */ |
826 | if (rc && (rc != -ENODEV || sata_scr_valid(link: al))) { |
827 | ata_link_err(al, "SRST failed (errno=%d)\n" , rc); |
828 | return rc; |
829 | } |
830 | |
831 | /* determine by signature whether we have ATA or ATAPI devices */ |
832 | classes[0] = ata_sff_dev_classify(dev: &al->device[0], present: devmask & (1 << 0), |
833 | r_err: &err); |
834 | if (slave_possible && err != 0x81) |
835 | classes[1] = ata_sff_dev_classify(dev: &al->device[1], |
836 | present: devmask & (1 << 1), r_err: &err); |
837 | |
838 | return 0; |
839 | } |
840 | |
841 | /* Note: original code is ata_sff_drain_fifo */ |
842 | static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc) |
843 | { |
844 | int count; |
845 | struct ata_port *ap; |
846 | struct ep93xx_pata_data *drv_data; |
847 | |
848 | /* We only need to flush incoming data when a command was running */ |
849 | if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) |
850 | return; |
851 | |
852 | ap = qc->ap; |
853 | drv_data = ap->host->private_data; |
854 | /* Drain up to 64K of data before we give up this recovery method */ |
855 | for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) |
856 | && count < 65536; count += 2) |
857 | ep93xx_pata_read_reg(drv_data, addr: IDECTRL_ADDR_DATA); |
858 | |
859 | if (count) |
860 | ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n" , count); |
861 | |
862 | } |
863 | |
864 | static int ep93xx_pata_port_start(struct ata_port *ap) |
865 | { |
866 | struct ep93xx_pata_data *drv_data = ap->host->private_data; |
867 | |
868 | /* |
869 | * Set timings to safe values at startup (= number of ns from ATA |
870 | * specification), we'll switch to properly calculated values later. |
871 | */ |
872 | drv_data->t = *ata_timing_find_mode(xfer_mode: XFER_PIO_0); |
873 | return 0; |
874 | } |
875 | |
876 | static const struct scsi_host_template ep93xx_pata_sht = { |
877 | ATA_BASE_SHT(DRV_NAME), |
878 | /* ep93xx dma implementation limit */ |
879 | .sg_tablesize = 32, |
880 | /* ep93xx dma can't transfer 65536 bytes at once */ |
881 | .dma_boundary = 0x7fff, |
882 | }; |
883 | |
884 | static struct ata_port_operations ep93xx_pata_port_ops = { |
885 | .inherits = &ata_bmdma_port_ops, |
886 | |
887 | .qc_prep = ata_noop_qc_prep, |
888 | |
889 | .softreset = ep93xx_pata_softreset, |
890 | .hardreset = ATA_OP_NULL, |
891 | |
892 | .sff_dev_select = ep93xx_pata_dev_select, |
893 | .sff_set_devctl = ep93xx_pata_set_devctl, |
894 | .sff_check_status = ep93xx_pata_check_status, |
895 | .sff_check_altstatus = ep93xx_pata_check_altstatus, |
896 | .sff_tf_load = ep93xx_pata_tf_load, |
897 | .sff_tf_read = ep93xx_pata_tf_read, |
898 | .sff_exec_command = ep93xx_pata_exec_command, |
899 | .sff_data_xfer = ep93xx_pata_data_xfer, |
900 | .sff_drain_fifo = ep93xx_pata_drain_fifo, |
901 | .sff_irq_clear = ATA_OP_NULL, |
902 | |
903 | .set_piomode = ep93xx_pata_set_piomode, |
904 | |
905 | .bmdma_setup = ep93xx_pata_dma_setup, |
906 | .bmdma_start = ep93xx_pata_dma_start, |
907 | .bmdma_stop = ep93xx_pata_dma_stop, |
908 | .bmdma_status = ep93xx_pata_dma_status, |
909 | |
910 | .cable_detect = ata_cable_unknown, |
911 | .port_start = ep93xx_pata_port_start, |
912 | }; |
913 | |
914 | static const struct soc_device_attribute ep93xx_soc_table[] = { |
915 | { .revision = "E1" , .data = (void *)ATA_UDMA3 }, |
916 | { .revision = "E2" , .data = (void *)ATA_UDMA4 }, |
917 | { /* sentinel */ } |
918 | }; |
919 | |
920 | static int ep93xx_pata_probe(struct platform_device *pdev) |
921 | { |
922 | struct ep93xx_pata_data *drv_data; |
923 | struct ata_host *host; |
924 | struct ata_port *ap; |
925 | int irq; |
926 | struct resource *mem_res; |
927 | void __iomem *ide_base; |
928 | int err; |
929 | |
930 | err = ep93xx_ide_acquire_gpio(pdev); |
931 | if (err) |
932 | return err; |
933 | |
934 | /* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */ |
935 | irq = platform_get_irq(pdev, 0); |
936 | if (irq < 0) { |
937 | err = irq; |
938 | goto err_rel_gpio; |
939 | } |
940 | |
941 | ide_base = devm_platform_get_and_ioremap_resource(pdev, index: 0, res: &mem_res); |
942 | if (IS_ERR(ptr: ide_base)) { |
943 | err = PTR_ERR(ptr: ide_base); |
944 | goto err_rel_gpio; |
945 | } |
946 | |
947 | drv_data = devm_kzalloc(dev: &pdev->dev, size: sizeof(*drv_data), GFP_KERNEL); |
948 | if (!drv_data) { |
949 | err = -ENOMEM; |
950 | goto err_rel_gpio; |
951 | } |
952 | |
953 | drv_data->pdev = pdev; |
954 | drv_data->ide_base = ide_base; |
955 | drv_data->udma_in_phys = mem_res->start + IDEUDMADATAIN; |
956 | drv_data->udma_out_phys = mem_res->start + IDEUDMADATAOUT; |
957 | ep93xx_pata_dma_init(drv_data); |
958 | |
959 | /* allocate host */ |
960 | host = ata_host_alloc(dev: &pdev->dev, max_ports: 1); |
961 | if (!host) { |
962 | err = -ENOMEM; |
963 | goto err_rel_dma; |
964 | } |
965 | |
966 | ep93xx_pata_clear_regs(base: ide_base); |
967 | |
968 | host->private_data = drv_data; |
969 | |
970 | ap = host->ports[0]; |
971 | ap->dev = &pdev->dev; |
972 | ap->ops = &ep93xx_pata_port_ops; |
973 | ap->flags |= ATA_FLAG_SLAVE_POSS; |
974 | ap->pio_mask = ATA_PIO4; |
975 | |
976 | /* |
977 | * Maximum UDMA modes: |
978 | * EP931x rev.E0 - UDMA2 |
979 | * EP931x rev.E1 - UDMA3 |
980 | * EP931x rev.E2 - UDMA4 |
981 | * |
982 | * MWDMA support was removed from EP931x rev.E2, |
983 | * so this driver supports only UDMA modes. |
984 | */ |
985 | if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) { |
986 | const struct soc_device_attribute *match; |
987 | |
988 | match = soc_device_match(matches: ep93xx_soc_table); |
989 | if (match) |
990 | ap->udma_mask = (unsigned int) match->data; |
991 | else |
992 | ap->udma_mask = ATA_UDMA2; |
993 | } |
994 | |
995 | /* defaults, pio 0 */ |
996 | ep93xx_pata_enable_pio(base: ide_base, pio_mode: 0); |
997 | |
998 | dev_info(&pdev->dev, "version " DRV_VERSION "\n" ); |
999 | |
1000 | /* activate host */ |
1001 | err = ata_host_activate(host, irq, irq_handler: ata_bmdma_interrupt, irq_flags: 0, |
1002 | sht: &ep93xx_pata_sht); |
1003 | if (err == 0) |
1004 | return 0; |
1005 | |
1006 | err_rel_dma: |
1007 | ep93xx_pata_release_dma(drv_data); |
1008 | err_rel_gpio: |
1009 | ep93xx_ide_release_gpio(pdev); |
1010 | return err; |
1011 | } |
1012 | |
1013 | static void ep93xx_pata_remove(struct platform_device *pdev) |
1014 | { |
1015 | struct ata_host *host = platform_get_drvdata(pdev); |
1016 | struct ep93xx_pata_data *drv_data = host->private_data; |
1017 | |
1018 | ata_host_detach(host); |
1019 | ep93xx_pata_release_dma(drv_data); |
1020 | ep93xx_pata_clear_regs(base: drv_data->ide_base); |
1021 | ep93xx_ide_release_gpio(pdev); |
1022 | } |
1023 | |
1024 | static struct platform_driver ep93xx_pata_platform_driver = { |
1025 | .driver = { |
1026 | .name = DRV_NAME, |
1027 | }, |
1028 | .probe = ep93xx_pata_probe, |
1029 | .remove_new = ep93xx_pata_remove, |
1030 | }; |
1031 | |
1032 | module_platform_driver(ep93xx_pata_platform_driver); |
1033 | |
1034 | MODULE_AUTHOR("Alessandro Zummo, Lennert Buytenhek, Joao Ramos, " |
1035 | "Bartlomiej Zolnierkiewicz, Rafal Prylowski" ); |
1036 | MODULE_DESCRIPTION("low-level driver for cirrus ep93xx IDE controller" ); |
1037 | MODULE_LICENSE("GPL" ); |
1038 | MODULE_VERSION(DRV_VERSION); |
1039 | MODULE_ALIAS("platform:pata_ep93xx" ); |
1040 | |