1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * libata-sff.c - helper library for PCI IDE BMDMA |
4 | * |
5 | * Copyright 2003-2006 Red Hat, Inc. All rights reserved. |
6 | * Copyright 2003-2006 Jeff Garzik |
7 | * |
8 | * libata documentation is available via 'make {ps|pdf}docs', |
9 | * as Documentation/driver-api/libata.rst |
10 | * |
11 | * Hardware documentation available from http://www.t13.org/ and |
12 | * http://www.sata-io.org/ |
13 | */ |
14 | |
15 | #include <linux/kernel.h> |
16 | #include <linux/gfp.h> |
17 | #include <linux/pci.h> |
18 | #include <linux/module.h> |
19 | #include <linux/libata.h> |
20 | #include <linux/highmem.h> |
21 | #include <trace/events/libata.h> |
22 | #include "libata.h" |
23 | |
24 | static struct workqueue_struct *ata_sff_wq; |
25 | |
26 | const struct ata_port_operations ata_sff_port_ops = { |
27 | .inherits = &ata_base_port_ops, |
28 | |
29 | .qc_prep = ata_noop_qc_prep, |
30 | .qc_issue = ata_sff_qc_issue, |
31 | .qc_fill_rtf = ata_sff_qc_fill_rtf, |
32 | |
33 | .freeze = ata_sff_freeze, |
34 | .thaw = ata_sff_thaw, |
35 | .prereset = ata_sff_prereset, |
36 | .softreset = ata_sff_softreset, |
37 | .hardreset = sata_sff_hardreset, |
38 | .postreset = ata_sff_postreset, |
39 | .error_handler = ata_sff_error_handler, |
40 | |
41 | .sff_dev_select = ata_sff_dev_select, |
42 | .sff_check_status = ata_sff_check_status, |
43 | .sff_tf_load = ata_sff_tf_load, |
44 | .sff_tf_read = ata_sff_tf_read, |
45 | .sff_exec_command = ata_sff_exec_command, |
46 | .sff_data_xfer = ata_sff_data_xfer, |
47 | .sff_drain_fifo = ata_sff_drain_fifo, |
48 | |
49 | .lost_interrupt = ata_sff_lost_interrupt, |
50 | }; |
51 | EXPORT_SYMBOL_GPL(ata_sff_port_ops); |
52 | |
53 | /** |
54 | * ata_sff_check_status - Read device status reg & clear interrupt |
55 | * @ap: port where the device is |
56 | * |
57 | * Reads ATA taskfile status register for currently-selected device |
58 | * and return its value. This also clears pending interrupts |
59 | * from this device |
60 | * |
61 | * LOCKING: |
62 | * Inherited from caller. |
63 | */ |
64 | u8 ata_sff_check_status(struct ata_port *ap) |
65 | { |
66 | return ioread8(ap->ioaddr.status_addr); |
67 | } |
68 | EXPORT_SYMBOL_GPL(ata_sff_check_status); |
69 | |
70 | /** |
71 | * ata_sff_altstatus - Read device alternate status reg |
72 | * @ap: port where the device is |
73 | * @status: pointer to a status value |
74 | * |
75 | * Reads ATA alternate status register for currently-selected device |
76 | * and return its value. |
77 | * |
78 | * RETURN: |
79 | * true if the register exists, false if not. |
80 | * |
81 | * LOCKING: |
82 | * Inherited from caller. |
83 | */ |
84 | static bool ata_sff_altstatus(struct ata_port *ap, u8 *status) |
85 | { |
86 | u8 tmp; |
87 | |
88 | if (ap->ops->sff_check_altstatus) { |
89 | tmp = ap->ops->sff_check_altstatus(ap); |
90 | goto read; |
91 | } |
92 | if (ap->ioaddr.altstatus_addr) { |
93 | tmp = ioread8(ap->ioaddr.altstatus_addr); |
94 | goto read; |
95 | } |
96 | return false; |
97 | |
98 | read: |
99 | if (status) |
100 | *status = tmp; |
101 | return true; |
102 | } |
103 | |
104 | /** |
105 | * ata_sff_irq_status - Check if the device is busy |
106 | * @ap: port where the device is |
107 | * |
108 | * Determine if the port is currently busy. Uses altstatus |
109 | * if available in order to avoid clearing shared IRQ status |
110 | * when finding an IRQ source. Non ctl capable devices don't |
111 | * share interrupt lines fortunately for us. |
112 | * |
113 | * LOCKING: |
114 | * Inherited from caller. |
115 | */ |
116 | static u8 ata_sff_irq_status(struct ata_port *ap) |
117 | { |
118 | u8 status; |
119 | |
120 | /* Not us: We are busy */ |
121 | if (ata_sff_altstatus(ap, status: &status) && (status & ATA_BUSY)) |
122 | return status; |
123 | /* Clear INTRQ latch */ |
124 | status = ap->ops->sff_check_status(ap); |
125 | return status; |
126 | } |
127 | |
128 | /** |
129 | * ata_sff_sync - Flush writes |
130 | * @ap: Port to wait for. |
131 | * |
132 | * CAUTION: |
133 | * If we have an mmio device with no ctl and no altstatus |
134 | * method this will fail. No such devices are known to exist. |
135 | * |
136 | * LOCKING: |
137 | * Inherited from caller. |
138 | */ |
139 | |
140 | static void ata_sff_sync(struct ata_port *ap) |
141 | { |
142 | ata_sff_altstatus(ap, NULL); |
143 | } |
144 | |
145 | /** |
146 | * ata_sff_pause - Flush writes and wait 400nS |
147 | * @ap: Port to pause for. |
148 | * |
149 | * CAUTION: |
150 | * If we have an mmio device with no ctl and no altstatus |
151 | * method this will fail. No such devices are known to exist. |
152 | * |
153 | * LOCKING: |
154 | * Inherited from caller. |
155 | */ |
156 | |
157 | void ata_sff_pause(struct ata_port *ap) |
158 | { |
159 | ata_sff_sync(ap); |
160 | ndelay(400); |
161 | } |
162 | EXPORT_SYMBOL_GPL(ata_sff_pause); |
163 | |
164 | /** |
165 | * ata_sff_dma_pause - Pause before commencing DMA |
166 | * @ap: Port to pause for. |
167 | * |
168 | * Perform I/O fencing and ensure sufficient cycle delays occur |
169 | * for the HDMA1:0 transition |
170 | */ |
171 | |
172 | void ata_sff_dma_pause(struct ata_port *ap) |
173 | { |
174 | /* |
175 | * An altstatus read will cause the needed delay without |
176 | * messing up the IRQ status |
177 | */ |
178 | if (ata_sff_altstatus(ap, NULL)) |
179 | return; |
180 | /* There are no DMA controllers without ctl. BUG here to ensure |
181 | we never violate the HDMA1:0 transition timing and risk |
182 | corruption. */ |
183 | BUG(); |
184 | } |
185 | EXPORT_SYMBOL_GPL(ata_sff_dma_pause); |
186 | |
187 | static int ata_sff_check_ready(struct ata_link *link) |
188 | { |
189 | u8 status = link->ap->ops->sff_check_status(link->ap); |
190 | |
191 | return ata_check_ready(status); |
192 | } |
193 | |
194 | /** |
195 | * ata_sff_wait_ready - sleep until BSY clears, or timeout |
196 | * @link: SFF link to wait ready status for |
197 | * @deadline: deadline jiffies for the operation |
198 | * |
199 | * Sleep until ATA Status register bit BSY clears, or timeout |
200 | * occurs. |
201 | * |
202 | * LOCKING: |
203 | * Kernel thread context (may sleep). |
204 | * |
205 | * RETURNS: |
206 | * 0 on success, -errno otherwise. |
207 | */ |
208 | int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) |
209 | { |
210 | return ata_wait_ready(link, deadline, check_ready: ata_sff_check_ready); |
211 | } |
212 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); |
213 | |
214 | /** |
215 | * ata_sff_set_devctl - Write device control reg |
216 | * @ap: port where the device is |
217 | * @ctl: value to write |
218 | * |
219 | * Writes ATA device control register. |
220 | * |
221 | * RETURN: |
222 | * true if the register exists, false if not. |
223 | * |
224 | * LOCKING: |
225 | * Inherited from caller. |
226 | */ |
227 | static bool ata_sff_set_devctl(struct ata_port *ap, u8 ctl) |
228 | { |
229 | if (ap->ops->sff_set_devctl) { |
230 | ap->ops->sff_set_devctl(ap, ctl); |
231 | return true; |
232 | } |
233 | if (ap->ioaddr.ctl_addr) { |
234 | iowrite8(ctl, ap->ioaddr.ctl_addr); |
235 | return true; |
236 | } |
237 | |
238 | return false; |
239 | } |
240 | |
241 | /** |
242 | * ata_sff_dev_select - Select device 0/1 on ATA bus |
243 | * @ap: ATA channel to manipulate |
244 | * @device: ATA device (numbered from zero) to select |
245 | * |
246 | * Use the method defined in the ATA specification to |
247 | * make either device 0, or device 1, active on the |
248 | * ATA channel. Works with both PIO and MMIO. |
249 | * |
250 | * May be used as the dev_select() entry in ata_port_operations. |
251 | * |
252 | * LOCKING: |
253 | * caller. |
254 | */ |
255 | void ata_sff_dev_select(struct ata_port *ap, unsigned int device) |
256 | { |
257 | u8 tmp; |
258 | |
259 | if (device == 0) |
260 | tmp = ATA_DEVICE_OBS; |
261 | else |
262 | tmp = ATA_DEVICE_OBS | ATA_DEV1; |
263 | |
264 | iowrite8(tmp, ap->ioaddr.device_addr); |
265 | ata_sff_pause(ap); /* needed; also flushes, for mmio */ |
266 | } |
267 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); |
268 | |
269 | /** |
270 | * ata_dev_select - Select device 0/1 on ATA bus |
271 | * @ap: ATA channel to manipulate |
272 | * @device: ATA device (numbered from zero) to select |
273 | * @wait: non-zero to wait for Status register BSY bit to clear |
274 | * @can_sleep: non-zero if context allows sleeping |
275 | * |
276 | * Use the method defined in the ATA specification to |
277 | * make either device 0, or device 1, active on the |
278 | * ATA channel. |
279 | * |
280 | * This is a high-level version of ata_sff_dev_select(), which |
281 | * additionally provides the services of inserting the proper |
282 | * pauses and status polling, where needed. |
283 | * |
284 | * LOCKING: |
285 | * caller. |
286 | */ |
287 | static void ata_dev_select(struct ata_port *ap, unsigned int device, |
288 | unsigned int wait, unsigned int can_sleep) |
289 | { |
290 | if (wait) |
291 | ata_wait_idle(ap); |
292 | |
293 | ap->ops->sff_dev_select(ap, device); |
294 | |
295 | if (wait) { |
296 | if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) |
297 | ata_msleep(ap, msecs: 150); |
298 | ata_wait_idle(ap); |
299 | } |
300 | } |
301 | |
302 | /** |
303 | * ata_sff_irq_on - Enable interrupts on a port. |
304 | * @ap: Port on which interrupts are enabled. |
305 | * |
306 | * Enable interrupts on a legacy IDE device using MMIO or PIO, |
307 | * wait for idle, clear any pending interrupts. |
308 | * |
309 | * Note: may NOT be used as the sff_irq_on() entry in |
310 | * ata_port_operations. |
311 | * |
312 | * LOCKING: |
313 | * Inherited from caller. |
314 | */ |
315 | void ata_sff_irq_on(struct ata_port *ap) |
316 | { |
317 | if (ap->ops->sff_irq_on) { |
318 | ap->ops->sff_irq_on(ap); |
319 | return; |
320 | } |
321 | |
322 | ap->ctl &= ~ATA_NIEN; |
323 | ap->last_ctl = ap->ctl; |
324 | |
325 | ata_sff_set_devctl(ap, ctl: ap->ctl); |
326 | ata_wait_idle(ap); |
327 | |
328 | if (ap->ops->sff_irq_clear) |
329 | ap->ops->sff_irq_clear(ap); |
330 | } |
331 | EXPORT_SYMBOL_GPL(ata_sff_irq_on); |
332 | |
333 | /** |
334 | * ata_sff_tf_load - send taskfile registers to host controller |
335 | * @ap: Port to which output is sent |
336 | * @tf: ATA taskfile register set |
337 | * |
338 | * Outputs ATA taskfile to standard ATA host controller. |
339 | * |
340 | * LOCKING: |
341 | * Inherited from caller. |
342 | */ |
343 | void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) |
344 | { |
345 | struct ata_ioports *ioaddr = &ap->ioaddr; |
346 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; |
347 | |
348 | if (tf->ctl != ap->last_ctl) { |
349 | if (ioaddr->ctl_addr) |
350 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
351 | ap->last_ctl = tf->ctl; |
352 | ata_wait_idle(ap); |
353 | } |
354 | |
355 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { |
356 | WARN_ON_ONCE(!ioaddr->ctl_addr); |
357 | iowrite8(tf->hob_feature, ioaddr->feature_addr); |
358 | iowrite8(tf->hob_nsect, ioaddr->nsect_addr); |
359 | iowrite8(tf->hob_lbal, ioaddr->lbal_addr); |
360 | iowrite8(tf->hob_lbam, ioaddr->lbam_addr); |
361 | iowrite8(tf->hob_lbah, ioaddr->lbah_addr); |
362 | } |
363 | |
364 | if (is_addr) { |
365 | iowrite8(tf->feature, ioaddr->feature_addr); |
366 | iowrite8(tf->nsect, ioaddr->nsect_addr); |
367 | iowrite8(tf->lbal, ioaddr->lbal_addr); |
368 | iowrite8(tf->lbam, ioaddr->lbam_addr); |
369 | iowrite8(tf->lbah, ioaddr->lbah_addr); |
370 | } |
371 | |
372 | if (tf->flags & ATA_TFLAG_DEVICE) |
373 | iowrite8(tf->device, ioaddr->device_addr); |
374 | |
375 | ata_wait_idle(ap); |
376 | } |
377 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); |
378 | |
379 | /** |
380 | * ata_sff_tf_read - input device's ATA taskfile shadow registers |
381 | * @ap: Port from which input is read |
382 | * @tf: ATA taskfile register set for storing input |
383 | * |
384 | * Reads ATA taskfile registers for currently-selected device |
385 | * into @tf. Assumes the device has a fully SFF compliant task file |
386 | * layout and behaviour. If you device does not (eg has a different |
387 | * status method) then you will need to provide a replacement tf_read |
388 | * |
389 | * LOCKING: |
390 | * Inherited from caller. |
391 | */ |
392 | void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) |
393 | { |
394 | struct ata_ioports *ioaddr = &ap->ioaddr; |
395 | |
396 | tf->status = ata_sff_check_status(ap); |
397 | tf->error = ioread8(ioaddr->error_addr); |
398 | tf->nsect = ioread8(ioaddr->nsect_addr); |
399 | tf->lbal = ioread8(ioaddr->lbal_addr); |
400 | tf->lbam = ioread8(ioaddr->lbam_addr); |
401 | tf->lbah = ioread8(ioaddr->lbah_addr); |
402 | tf->device = ioread8(ioaddr->device_addr); |
403 | |
404 | if (tf->flags & ATA_TFLAG_LBA48) { |
405 | if (likely(ioaddr->ctl_addr)) { |
406 | iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr); |
407 | tf->hob_feature = ioread8(ioaddr->error_addr); |
408 | tf->hob_nsect = ioread8(ioaddr->nsect_addr); |
409 | tf->hob_lbal = ioread8(ioaddr->lbal_addr); |
410 | tf->hob_lbam = ioread8(ioaddr->lbam_addr); |
411 | tf->hob_lbah = ioread8(ioaddr->lbah_addr); |
412 | iowrite8(tf->ctl, ioaddr->ctl_addr); |
413 | ap->last_ctl = tf->ctl; |
414 | } else |
415 | WARN_ON_ONCE(1); |
416 | } |
417 | } |
418 | EXPORT_SYMBOL_GPL(ata_sff_tf_read); |
419 | |
420 | /** |
421 | * ata_sff_exec_command - issue ATA command to host controller |
422 | * @ap: port to which command is being issued |
423 | * @tf: ATA taskfile register set |
424 | * |
425 | * Issues ATA command, with proper synchronization with interrupt |
426 | * handler / other threads. |
427 | * |
428 | * LOCKING: |
429 | * spin_lock_irqsave(host lock) |
430 | */ |
431 | void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) |
432 | { |
433 | iowrite8(tf->command, ap->ioaddr.command_addr); |
434 | ata_sff_pause(ap); |
435 | } |
436 | EXPORT_SYMBOL_GPL(ata_sff_exec_command); |
437 | |
438 | /** |
439 | * ata_tf_to_host - issue ATA taskfile to host controller |
440 | * @ap: port to which command is being issued |
441 | * @tf: ATA taskfile register set |
442 | * @tag: tag of the associated command |
443 | * |
444 | * Issues ATA taskfile register set to ATA host controller, |
445 | * with proper synchronization with interrupt handler and |
446 | * other threads. |
447 | * |
448 | * LOCKING: |
449 | * spin_lock_irqsave(host lock) |
450 | */ |
451 | static inline void ata_tf_to_host(struct ata_port *ap, |
452 | const struct ata_taskfile *tf, |
453 | unsigned int tag) |
454 | { |
455 | trace_ata_tf_load(ap, tf); |
456 | ap->ops->sff_tf_load(ap, tf); |
457 | trace_ata_exec_command(ap, tf, tag); |
458 | ap->ops->sff_exec_command(ap, tf); |
459 | } |
460 | |
461 | /** |
462 | * ata_sff_data_xfer - Transfer data by PIO |
463 | * @qc: queued command |
464 | * @buf: data buffer |
465 | * @buflen: buffer length |
466 | * @rw: read/write |
467 | * |
468 | * Transfer data from/to the device data register by PIO. |
469 | * |
470 | * LOCKING: |
471 | * Inherited from caller. |
472 | * |
473 | * RETURNS: |
474 | * Bytes consumed. |
475 | */ |
476 | unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf, |
477 | unsigned int buflen, int rw) |
478 | { |
479 | struct ata_port *ap = qc->dev->link->ap; |
480 | void __iomem *data_addr = ap->ioaddr.data_addr; |
481 | unsigned int words = buflen >> 1; |
482 | |
483 | /* Transfer multiple of 2 bytes */ |
484 | if (rw == READ) |
485 | ioread16_rep(port: data_addr, buf, count: words); |
486 | else |
487 | iowrite16_rep(port: data_addr, buf, count: words); |
488 | |
489 | /* Transfer trailing byte, if any. */ |
490 | if (unlikely(buflen & 0x01)) { |
491 | unsigned char pad[2] = { }; |
492 | |
493 | /* Point buf to the tail of buffer */ |
494 | buf += buflen - 1; |
495 | |
496 | /* |
497 | * Use io*16_rep() accessors here as well to avoid pointlessly |
498 | * swapping bytes to and from on the big endian machines... |
499 | */ |
500 | if (rw == READ) { |
501 | ioread16_rep(port: data_addr, buf: pad, count: 1); |
502 | *buf = pad[0]; |
503 | } else { |
504 | pad[0] = *buf; |
505 | iowrite16_rep(port: data_addr, buf: pad, count: 1); |
506 | } |
507 | words++; |
508 | } |
509 | |
510 | return words << 1; |
511 | } |
512 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer); |
513 | |
514 | /** |
515 | * ata_sff_data_xfer32 - Transfer data by PIO |
516 | * @qc: queued command |
517 | * @buf: data buffer |
518 | * @buflen: buffer length |
519 | * @rw: read/write |
520 | * |
521 | * Transfer data from/to the device data register by PIO using 32bit |
522 | * I/O operations. |
523 | * |
524 | * LOCKING: |
525 | * Inherited from caller. |
526 | * |
527 | * RETURNS: |
528 | * Bytes consumed. |
529 | */ |
530 | |
531 | unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf, |
532 | unsigned int buflen, int rw) |
533 | { |
534 | struct ata_device *dev = qc->dev; |
535 | struct ata_port *ap = dev->link->ap; |
536 | void __iomem *data_addr = ap->ioaddr.data_addr; |
537 | unsigned int words = buflen >> 2; |
538 | int slop = buflen & 3; |
539 | |
540 | if (!(ap->pflags & ATA_PFLAG_PIO32)) |
541 | return ata_sff_data_xfer(qc, buf, buflen, rw); |
542 | |
543 | /* Transfer multiple of 4 bytes */ |
544 | if (rw == READ) |
545 | ioread32_rep(port: data_addr, buf, count: words); |
546 | else |
547 | iowrite32_rep(port: data_addr, buf, count: words); |
548 | |
549 | /* Transfer trailing bytes, if any */ |
550 | if (unlikely(slop)) { |
551 | unsigned char pad[4] = { }; |
552 | |
553 | /* Point buf to the tail of buffer */ |
554 | buf += buflen - slop; |
555 | |
556 | /* |
557 | * Use io*_rep() accessors here as well to avoid pointlessly |
558 | * swapping bytes to and from on the big endian machines... |
559 | */ |
560 | if (rw == READ) { |
561 | if (slop < 3) |
562 | ioread16_rep(port: data_addr, buf: pad, count: 1); |
563 | else |
564 | ioread32_rep(port: data_addr, buf: pad, count: 1); |
565 | memcpy(buf, pad, slop); |
566 | } else { |
567 | memcpy(pad, buf, slop); |
568 | if (slop < 3) |
569 | iowrite16_rep(port: data_addr, buf: pad, count: 1); |
570 | else |
571 | iowrite32_rep(port: data_addr, buf: pad, count: 1); |
572 | } |
573 | } |
574 | return (buflen + 1) & ~1; |
575 | } |
576 | EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); |
577 | |
578 | static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page, |
579 | unsigned int offset, size_t xfer_size) |
580 | { |
581 | bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE); |
582 | unsigned char *buf; |
583 | |
584 | buf = kmap_atomic(page); |
585 | qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write); |
586 | kunmap_atomic(buf); |
587 | |
588 | if (!do_write && !PageSlab(page)) |
589 | flush_dcache_page(page); |
590 | } |
591 | |
592 | /** |
593 | * ata_pio_sector - Transfer a sector of data. |
594 | * @qc: Command on going |
595 | * |
596 | * Transfer qc->sect_size bytes of data from/to the ATA device. |
597 | * |
598 | * LOCKING: |
599 | * Inherited from caller. |
600 | */ |
601 | static void ata_pio_sector(struct ata_queued_cmd *qc) |
602 | { |
603 | struct ata_port *ap = qc->ap; |
604 | struct page *page; |
605 | unsigned int offset; |
606 | |
607 | if (!qc->cursg) { |
608 | qc->curbytes = qc->nbytes; |
609 | return; |
610 | } |
611 | if (qc->curbytes == qc->nbytes - qc->sect_size) |
612 | ap->hsm_task_state = HSM_ST_LAST; |
613 | |
614 | page = sg_page(sg: qc->cursg); |
615 | offset = qc->cursg->offset + qc->cursg_ofs; |
616 | |
617 | /* get the current page and offset */ |
618 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
619 | offset %= PAGE_SIZE; |
620 | |
621 | trace_ata_sff_pio_transfer_data(qc, offset, count: qc->sect_size); |
622 | |
623 | /* |
624 | * Split the transfer when it splits a page boundary. Note that the |
625 | * split still has to be dword aligned like all ATA data transfers. |
626 | */ |
627 | WARN_ON_ONCE(offset % 4); |
628 | if (offset + qc->sect_size > PAGE_SIZE) { |
629 | unsigned int split_len = PAGE_SIZE - offset; |
630 | |
631 | ata_pio_xfer(qc, page, offset, xfer_size: split_len); |
632 | ata_pio_xfer(qc, nth_page(page, 1), offset: 0, |
633 | xfer_size: qc->sect_size - split_len); |
634 | } else { |
635 | ata_pio_xfer(qc, page, offset, xfer_size: qc->sect_size); |
636 | } |
637 | |
638 | qc->curbytes += qc->sect_size; |
639 | qc->cursg_ofs += qc->sect_size; |
640 | |
641 | if (qc->cursg_ofs == qc->cursg->length) { |
642 | qc->cursg = sg_next(qc->cursg); |
643 | if (!qc->cursg) |
644 | ap->hsm_task_state = HSM_ST_LAST; |
645 | qc->cursg_ofs = 0; |
646 | } |
647 | } |
648 | |
649 | /** |
650 | * ata_pio_sectors - Transfer one or many sectors. |
651 | * @qc: Command on going |
652 | * |
653 | * Transfer one or many sectors of data from/to the |
654 | * ATA device for the DRQ request. |
655 | * |
656 | * LOCKING: |
657 | * Inherited from caller. |
658 | */ |
659 | static void ata_pio_sectors(struct ata_queued_cmd *qc) |
660 | { |
661 | if (is_multi_taskfile(tf: &qc->tf)) { |
662 | /* READ/WRITE MULTIPLE */ |
663 | unsigned int nsect; |
664 | |
665 | WARN_ON_ONCE(qc->dev->multi_count == 0); |
666 | |
667 | nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, |
668 | qc->dev->multi_count); |
669 | while (nsect--) |
670 | ata_pio_sector(qc); |
671 | } else |
672 | ata_pio_sector(qc); |
673 | |
674 | ata_sff_sync(ap: qc->ap); /* flush */ |
675 | } |
676 | |
677 | /** |
678 | * atapi_send_cdb - Write CDB bytes to hardware |
679 | * @ap: Port to which ATAPI device is attached. |
680 | * @qc: Taskfile currently active |
681 | * |
682 | * When device has indicated its readiness to accept |
683 | * a CDB, this function is called. Send the CDB. |
684 | * |
685 | * LOCKING: |
686 | * caller. |
687 | */ |
688 | static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) |
689 | { |
690 | /* send SCSI cdb */ |
691 | trace_atapi_send_cdb(qc, offset: 0, count: qc->dev->cdb_len); |
692 | WARN_ON_ONCE(qc->dev->cdb_len < 12); |
693 | |
694 | ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1); |
695 | ata_sff_sync(ap); |
696 | /* FIXME: If the CDB is for DMA do we need to do the transition delay |
697 | or is bmdma_start guaranteed to do it ? */ |
698 | switch (qc->tf.protocol) { |
699 | case ATAPI_PROT_PIO: |
700 | ap->hsm_task_state = HSM_ST; |
701 | break; |
702 | case ATAPI_PROT_NODATA: |
703 | ap->hsm_task_state = HSM_ST_LAST; |
704 | break; |
705 | #ifdef CONFIG_ATA_BMDMA |
706 | case ATAPI_PROT_DMA: |
707 | ap->hsm_task_state = HSM_ST_LAST; |
708 | /* initiate bmdma */ |
709 | trace_ata_bmdma_start(ap, tf: &qc->tf, tag: qc->tag); |
710 | ap->ops->bmdma_start(qc); |
711 | break; |
712 | #endif /* CONFIG_ATA_BMDMA */ |
713 | default: |
714 | BUG(); |
715 | } |
716 | } |
717 | |
718 | /** |
719 | * __atapi_pio_bytes - Transfer data from/to the ATAPI device. |
720 | * @qc: Command on going |
721 | * @bytes: number of bytes |
722 | * |
723 | * Transfer data from/to the ATAPI device. |
724 | * |
725 | * LOCKING: |
726 | * Inherited from caller. |
727 | * |
728 | */ |
729 | static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) |
730 | { |
731 | int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; |
732 | struct ata_port *ap = qc->ap; |
733 | struct ata_device *dev = qc->dev; |
734 | struct ata_eh_info *ehi = &dev->link->eh_info; |
735 | struct scatterlist *sg; |
736 | struct page *page; |
737 | unsigned char *buf; |
738 | unsigned int offset, count, consumed; |
739 | |
740 | next_sg: |
741 | sg = qc->cursg; |
742 | if (unlikely(!sg)) { |
743 | ata_ehi_push_desc(ehi, fmt: "unexpected or too much trailing data " |
744 | "buf=%u cur=%u bytes=%u" , |
745 | qc->nbytes, qc->curbytes, bytes); |
746 | return -1; |
747 | } |
748 | |
749 | page = sg_page(sg); |
750 | offset = sg->offset + qc->cursg_ofs; |
751 | |
752 | /* get the current page and offset */ |
753 | page = nth_page(page, (offset >> PAGE_SHIFT)); |
754 | offset %= PAGE_SIZE; |
755 | |
756 | /* don't overrun current sg */ |
757 | count = min(sg->length - qc->cursg_ofs, bytes); |
758 | |
759 | /* don't cross page boundaries */ |
760 | count = min(count, (unsigned int)PAGE_SIZE - offset); |
761 | |
762 | trace_atapi_pio_transfer_data(qc, offset, count); |
763 | |
764 | /* do the actual data transfer */ |
765 | buf = kmap_atomic(page); |
766 | consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw); |
767 | kunmap_atomic(buf); |
768 | |
769 | bytes -= min(bytes, consumed); |
770 | qc->curbytes += count; |
771 | qc->cursg_ofs += count; |
772 | |
773 | if (qc->cursg_ofs == sg->length) { |
774 | qc->cursg = sg_next(qc->cursg); |
775 | qc->cursg_ofs = 0; |
776 | } |
777 | |
778 | /* |
779 | * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed); |
780 | * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN |
781 | * check correctly as it doesn't know if it is the last request being |
782 | * made. Somebody should implement a proper sanity check. |
783 | */ |
784 | if (bytes) |
785 | goto next_sg; |
786 | return 0; |
787 | } |
788 | |
789 | /** |
790 | * atapi_pio_bytes - Transfer data from/to the ATAPI device. |
791 | * @qc: Command on going |
792 | * |
793 | * Transfer Transfer data from/to the ATAPI device. |
794 | * |
795 | * LOCKING: |
796 | * Inherited from caller. |
797 | */ |
798 | static void atapi_pio_bytes(struct ata_queued_cmd *qc) |
799 | { |
800 | struct ata_port *ap = qc->ap; |
801 | struct ata_device *dev = qc->dev; |
802 | struct ata_eh_info *ehi = &dev->link->eh_info; |
803 | unsigned int ireason, bc_lo, bc_hi, bytes; |
804 | int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; |
805 | |
806 | /* Abuse qc->result_tf for temp storage of intermediate TF |
807 | * here to save some kernel stack usage. |
808 | * For normal completion, qc->result_tf is not relevant. For |
809 | * error, qc->result_tf is later overwritten by ata_qc_complete(). |
810 | * So, the correctness of qc->result_tf is not affected. |
811 | */ |
812 | ap->ops->sff_tf_read(ap, &qc->result_tf); |
813 | ireason = qc->result_tf.nsect; |
814 | bc_lo = qc->result_tf.lbam; |
815 | bc_hi = qc->result_tf.lbah; |
816 | bytes = (bc_hi << 8) | bc_lo; |
817 | |
818 | /* shall be cleared to zero, indicating xfer of data */ |
819 | if (unlikely(ireason & ATAPI_COD)) |
820 | goto atapi_check; |
821 | |
822 | /* make sure transfer direction matches expected */ |
823 | i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0; |
824 | if (unlikely(do_write != i_write)) |
825 | goto atapi_check; |
826 | |
827 | if (unlikely(!bytes)) |
828 | goto atapi_check; |
829 | |
830 | if (unlikely(__atapi_pio_bytes(qc, bytes))) |
831 | goto err_out; |
832 | ata_sff_sync(ap); /* flush */ |
833 | |
834 | return; |
835 | |
836 | atapi_check: |
837 | ata_ehi_push_desc(ehi, fmt: "ATAPI check failed (ireason=0x%x bytes=%u)" , |
838 | ireason, bytes); |
839 | err_out: |
840 | qc->err_mask |= AC_ERR_HSM; |
841 | ap->hsm_task_state = HSM_ST_ERR; |
842 | } |
843 | |
844 | /** |
845 | * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. |
846 | * @ap: the target ata_port |
847 | * @qc: qc on going |
848 | * |
849 | * RETURNS: |
850 | * 1 if ok in workqueue, 0 otherwise. |
851 | */ |
852 | static inline int ata_hsm_ok_in_wq(struct ata_port *ap, |
853 | struct ata_queued_cmd *qc) |
854 | { |
855 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
856 | return 1; |
857 | |
858 | if (ap->hsm_task_state == HSM_ST_FIRST) { |
859 | if (qc->tf.protocol == ATA_PROT_PIO && |
860 | (qc->tf.flags & ATA_TFLAG_WRITE)) |
861 | return 1; |
862 | |
863 | if (ata_is_atapi(prot: qc->tf.protocol) && |
864 | !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
865 | return 1; |
866 | } |
867 | |
868 | return 0; |
869 | } |
870 | |
871 | /** |
872 | * ata_hsm_qc_complete - finish a qc running on standard HSM |
873 | * @qc: Command to complete |
874 | * @in_wq: 1 if called from workqueue, 0 otherwise |
875 | * |
876 | * Finish @qc which is running on standard HSM. |
877 | * |
878 | * LOCKING: |
879 | * If @in_wq is zero, spin_lock_irqsave(host lock). |
880 | * Otherwise, none on entry and grabs host lock. |
881 | */ |
882 | static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) |
883 | { |
884 | struct ata_port *ap = qc->ap; |
885 | |
886 | if (in_wq) { |
887 | /* EH might have kicked in while host lock is released. */ |
888 | qc = ata_qc_from_tag(ap, tag: qc->tag); |
889 | if (qc) { |
890 | if (likely(!(qc->err_mask & AC_ERR_HSM))) { |
891 | ata_sff_irq_on(ap); |
892 | ata_qc_complete(qc); |
893 | } else |
894 | ata_port_freeze(ap); |
895 | } |
896 | } else { |
897 | if (likely(!(qc->err_mask & AC_ERR_HSM))) |
898 | ata_qc_complete(qc); |
899 | else |
900 | ata_port_freeze(ap); |
901 | } |
902 | } |
903 | |
904 | /** |
905 | * ata_sff_hsm_move - move the HSM to the next state. |
906 | * @ap: the target ata_port |
907 | * @qc: qc on going |
908 | * @status: current device status |
909 | * @in_wq: 1 if called from workqueue, 0 otherwise |
910 | * |
911 | * RETURNS: |
912 | * 1 when poll next status needed, 0 otherwise. |
913 | */ |
914 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
915 | u8 status, int in_wq) |
916 | { |
917 | struct ata_link *link = qc->dev->link; |
918 | struct ata_eh_info *ehi = &link->eh_info; |
919 | int poll_next; |
920 | |
921 | lockdep_assert_held(ap->lock); |
922 | |
923 | WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); |
924 | |
925 | /* Make sure ata_sff_qc_issue() does not throw things |
926 | * like DMA polling into the workqueue. Notice that |
927 | * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). |
928 | */ |
929 | WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); |
930 | |
931 | fsm_start: |
932 | trace_ata_sff_hsm_state(qc, state: status); |
933 | |
934 | switch (ap->hsm_task_state) { |
935 | case HSM_ST_FIRST: |
936 | /* Send first data block or PACKET CDB */ |
937 | |
938 | /* If polling, we will stay in the work queue after |
939 | * sending the data. Otherwise, interrupt handler |
940 | * takes over after sending the data. |
941 | */ |
942 | poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); |
943 | |
944 | /* check device status */ |
945 | if (unlikely((status & ATA_DRQ) == 0)) { |
946 | /* handle BSY=0, DRQ=0 as error */ |
947 | if (likely(status & (ATA_ERR | ATA_DF))) |
948 | /* device stops HSM for abort/error */ |
949 | qc->err_mask |= AC_ERR_DEV; |
950 | else { |
951 | /* HSM violation. Let EH handle this */ |
952 | ata_ehi_push_desc(ehi, |
953 | fmt: "ST_FIRST: !(DRQ|ERR|DF)" ); |
954 | qc->err_mask |= AC_ERR_HSM; |
955 | } |
956 | |
957 | ap->hsm_task_state = HSM_ST_ERR; |
958 | goto fsm_start; |
959 | } |
960 | |
961 | /* Device should not ask for data transfer (DRQ=1) |
962 | * when it finds something wrong. |
963 | * We ignore DRQ here and stop the HSM by |
964 | * changing hsm_task_state to HSM_ST_ERR and |
965 | * let the EH abort the command or reset the device. |
966 | */ |
967 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
968 | /* Some ATAPI tape drives forget to clear the ERR bit |
969 | * when doing the next command (mostly request sense). |
970 | * We ignore ERR here to workaround and proceed sending |
971 | * the CDB. |
972 | */ |
973 | if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { |
974 | ata_ehi_push_desc(ehi, fmt: "ST_FIRST: " |
975 | "DRQ=1 with device error, " |
976 | "dev_stat 0x%X" , status); |
977 | qc->err_mask |= AC_ERR_HSM; |
978 | ap->hsm_task_state = HSM_ST_ERR; |
979 | goto fsm_start; |
980 | } |
981 | } |
982 | |
983 | if (qc->tf.protocol == ATA_PROT_PIO) { |
984 | /* PIO data out protocol. |
985 | * send first data block. |
986 | */ |
987 | |
988 | /* ata_pio_sectors() might change the state |
989 | * to HSM_ST_LAST. so, the state is changed here |
990 | * before ata_pio_sectors(). |
991 | */ |
992 | ap->hsm_task_state = HSM_ST; |
993 | ata_pio_sectors(qc); |
994 | } else |
995 | /* send CDB */ |
996 | atapi_send_cdb(ap, qc); |
997 | |
998 | /* if polling, ata_sff_pio_task() handles the rest. |
999 | * otherwise, interrupt handler takes over from here. |
1000 | */ |
1001 | break; |
1002 | |
1003 | case HSM_ST: |
1004 | /* complete command or read/write the data register */ |
1005 | if (qc->tf.protocol == ATAPI_PROT_PIO) { |
1006 | /* ATAPI PIO protocol */ |
1007 | if ((status & ATA_DRQ) == 0) { |
1008 | /* No more data to transfer or device error. |
1009 | * Device error will be tagged in HSM_ST_LAST. |
1010 | */ |
1011 | ap->hsm_task_state = HSM_ST_LAST; |
1012 | goto fsm_start; |
1013 | } |
1014 | |
1015 | /* Device should not ask for data transfer (DRQ=1) |
1016 | * when it finds something wrong. |
1017 | * We ignore DRQ here and stop the HSM by |
1018 | * changing hsm_task_state to HSM_ST_ERR and |
1019 | * let the EH abort the command or reset the device. |
1020 | */ |
1021 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
1022 | ata_ehi_push_desc(ehi, fmt: "ST-ATAPI: " |
1023 | "DRQ=1 with device error, " |
1024 | "dev_stat 0x%X" , status); |
1025 | qc->err_mask |= AC_ERR_HSM; |
1026 | ap->hsm_task_state = HSM_ST_ERR; |
1027 | goto fsm_start; |
1028 | } |
1029 | |
1030 | atapi_pio_bytes(qc); |
1031 | |
1032 | if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) |
1033 | /* bad ireason reported by device */ |
1034 | goto fsm_start; |
1035 | |
1036 | } else { |
1037 | /* ATA PIO protocol */ |
1038 | if (unlikely((status & ATA_DRQ) == 0)) { |
1039 | /* handle BSY=0, DRQ=0 as error */ |
1040 | if (likely(status & (ATA_ERR | ATA_DF))) { |
1041 | /* device stops HSM for abort/error */ |
1042 | qc->err_mask |= AC_ERR_DEV; |
1043 | |
1044 | /* If diagnostic failed and this is |
1045 | * IDENTIFY, it's likely a phantom |
1046 | * device. Mark hint. |
1047 | */ |
1048 | if (qc->dev->horkage & |
1049 | ATA_HORKAGE_DIAGNOSTIC) |
1050 | qc->err_mask |= |
1051 | AC_ERR_NODEV_HINT; |
1052 | } else { |
1053 | /* HSM violation. Let EH handle this. |
1054 | * Phantom devices also trigger this |
1055 | * condition. Mark hint. |
1056 | */ |
1057 | ata_ehi_push_desc(ehi, fmt: "ST-ATA: " |
1058 | "DRQ=0 without device error, " |
1059 | "dev_stat 0x%X" , status); |
1060 | qc->err_mask |= AC_ERR_HSM | |
1061 | AC_ERR_NODEV_HINT; |
1062 | } |
1063 | |
1064 | ap->hsm_task_state = HSM_ST_ERR; |
1065 | goto fsm_start; |
1066 | } |
1067 | |
1068 | /* For PIO reads, some devices may ask for |
1069 | * data transfer (DRQ=1) alone with ERR=1. |
1070 | * We respect DRQ here and transfer one |
1071 | * block of junk data before changing the |
1072 | * hsm_task_state to HSM_ST_ERR. |
1073 | * |
1074 | * For PIO writes, ERR=1 DRQ=1 doesn't make |
1075 | * sense since the data block has been |
1076 | * transferred to the device. |
1077 | */ |
1078 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
1079 | /* data might be corrputed */ |
1080 | qc->err_mask |= AC_ERR_DEV; |
1081 | |
1082 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { |
1083 | ata_pio_sectors(qc); |
1084 | status = ata_wait_idle(ap); |
1085 | } |
1086 | |
1087 | if (status & (ATA_BUSY | ATA_DRQ)) { |
1088 | ata_ehi_push_desc(ehi, fmt: "ST-ATA: " |
1089 | "BUSY|DRQ persists on ERR|DF, " |
1090 | "dev_stat 0x%X" , status); |
1091 | qc->err_mask |= AC_ERR_HSM; |
1092 | } |
1093 | |
1094 | /* There are oddball controllers with |
1095 | * status register stuck at 0x7f and |
1096 | * lbal/m/h at zero which makes it |
1097 | * pass all other presence detection |
1098 | * mechanisms we have. Set NODEV_HINT |
1099 | * for it. Kernel bz#7241. |
1100 | */ |
1101 | if (status == 0x7f) |
1102 | qc->err_mask |= AC_ERR_NODEV_HINT; |
1103 | |
1104 | /* ata_pio_sectors() might change the |
1105 | * state to HSM_ST_LAST. so, the state |
1106 | * is changed after ata_pio_sectors(). |
1107 | */ |
1108 | ap->hsm_task_state = HSM_ST_ERR; |
1109 | goto fsm_start; |
1110 | } |
1111 | |
1112 | ata_pio_sectors(qc); |
1113 | |
1114 | if (ap->hsm_task_state == HSM_ST_LAST && |
1115 | (!(qc->tf.flags & ATA_TFLAG_WRITE))) { |
1116 | /* all data read */ |
1117 | status = ata_wait_idle(ap); |
1118 | goto fsm_start; |
1119 | } |
1120 | } |
1121 | |
1122 | poll_next = 1; |
1123 | break; |
1124 | |
1125 | case HSM_ST_LAST: |
1126 | if (unlikely(!ata_ok(status))) { |
1127 | qc->err_mask |= __ac_err_mask(status); |
1128 | ap->hsm_task_state = HSM_ST_ERR; |
1129 | goto fsm_start; |
1130 | } |
1131 | |
1132 | /* no more data to transfer */ |
1133 | trace_ata_sff_hsm_command_complete(qc, state: status); |
1134 | |
1135 | WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); |
1136 | |
1137 | ap->hsm_task_state = HSM_ST_IDLE; |
1138 | |
1139 | /* complete taskfile transaction */ |
1140 | ata_hsm_qc_complete(qc, in_wq); |
1141 | |
1142 | poll_next = 0; |
1143 | break; |
1144 | |
1145 | case HSM_ST_ERR: |
1146 | ap->hsm_task_state = HSM_ST_IDLE; |
1147 | |
1148 | /* complete taskfile transaction */ |
1149 | ata_hsm_qc_complete(qc, in_wq); |
1150 | |
1151 | poll_next = 0; |
1152 | break; |
1153 | default: |
1154 | poll_next = 0; |
1155 | WARN(true, "ata%d: SFF host state machine in invalid state %d" , |
1156 | ap->print_id, ap->hsm_task_state); |
1157 | } |
1158 | |
1159 | return poll_next; |
1160 | } |
1161 | EXPORT_SYMBOL_GPL(ata_sff_hsm_move); |
1162 | |
1163 | void ata_sff_queue_work(struct work_struct *work) |
1164 | { |
1165 | queue_work(wq: ata_sff_wq, work); |
1166 | } |
1167 | EXPORT_SYMBOL_GPL(ata_sff_queue_work); |
1168 | |
1169 | void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay) |
1170 | { |
1171 | queue_delayed_work(wq: ata_sff_wq, dwork, delay); |
1172 | } |
1173 | EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work); |
1174 | |
1175 | void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay) |
1176 | { |
1177 | struct ata_port *ap = link->ap; |
1178 | |
1179 | WARN_ON((ap->sff_pio_task_link != NULL) && |
1180 | (ap->sff_pio_task_link != link)); |
1181 | ap->sff_pio_task_link = link; |
1182 | |
1183 | /* may fail if ata_sff_flush_pio_task() in progress */ |
1184 | ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(m: delay)); |
1185 | } |
1186 | EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task); |
1187 | |
1188 | void ata_sff_flush_pio_task(struct ata_port *ap) |
1189 | { |
1190 | trace_ata_sff_flush_pio_task(ap); |
1191 | |
1192 | cancel_delayed_work_sync(dwork: &ap->sff_pio_task); |
1193 | |
1194 | /* |
1195 | * We wanna reset the HSM state to IDLE. If we do so without |
1196 | * grabbing the port lock, critical sections protected by it which |
1197 | * expect the HSM state to stay stable may get surprised. For |
1198 | * example, we may set IDLE in between the time |
1199 | * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls |
1200 | * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG(). |
1201 | */ |
1202 | spin_lock_irq(lock: ap->lock); |
1203 | ap->hsm_task_state = HSM_ST_IDLE; |
1204 | spin_unlock_irq(lock: ap->lock); |
1205 | |
1206 | ap->sff_pio_task_link = NULL; |
1207 | } |
1208 | |
1209 | static void ata_sff_pio_task(struct work_struct *work) |
1210 | { |
1211 | struct ata_port *ap = |
1212 | container_of(work, struct ata_port, sff_pio_task.work); |
1213 | struct ata_link *link = ap->sff_pio_task_link; |
1214 | struct ata_queued_cmd *qc; |
1215 | u8 status; |
1216 | int poll_next; |
1217 | |
1218 | spin_lock_irq(lock: ap->lock); |
1219 | |
1220 | BUG_ON(ap->sff_pio_task_link == NULL); |
1221 | /* qc can be NULL if timeout occurred */ |
1222 | qc = ata_qc_from_tag(ap, tag: link->active_tag); |
1223 | if (!qc) { |
1224 | ap->sff_pio_task_link = NULL; |
1225 | goto out_unlock; |
1226 | } |
1227 | |
1228 | fsm_start: |
1229 | WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE); |
1230 | |
1231 | /* |
1232 | * This is purely heuristic. This is a fast path. |
1233 | * Sometimes when we enter, BSY will be cleared in |
1234 | * a chk-status or two. If not, the drive is probably seeking |
1235 | * or something. Snooze for a couple msecs, then |
1236 | * chk-status again. If still busy, queue delayed work. |
1237 | */ |
1238 | status = ata_sff_busy_wait(ap, bits: ATA_BUSY, max: 5); |
1239 | if (status & ATA_BUSY) { |
1240 | spin_unlock_irq(lock: ap->lock); |
1241 | ata_msleep(ap, msecs: 2); |
1242 | spin_lock_irq(lock: ap->lock); |
1243 | |
1244 | status = ata_sff_busy_wait(ap, bits: ATA_BUSY, max: 10); |
1245 | if (status & ATA_BUSY) { |
1246 | ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); |
1247 | goto out_unlock; |
1248 | } |
1249 | } |
1250 | |
1251 | /* |
1252 | * hsm_move() may trigger another command to be processed. |
1253 | * clean the link beforehand. |
1254 | */ |
1255 | ap->sff_pio_task_link = NULL; |
1256 | /* move the HSM */ |
1257 | poll_next = ata_sff_hsm_move(ap, qc, status, 1); |
1258 | |
1259 | /* another command or interrupt handler |
1260 | * may be running at this point. |
1261 | */ |
1262 | if (poll_next) |
1263 | goto fsm_start; |
1264 | out_unlock: |
1265 | spin_unlock_irq(lock: ap->lock); |
1266 | } |
1267 | |
1268 | /** |
1269 | * ata_sff_qc_issue - issue taskfile to a SFF controller |
1270 | * @qc: command to issue to device |
1271 | * |
1272 | * This function issues a PIO or NODATA command to a SFF |
1273 | * controller. |
1274 | * |
1275 | * LOCKING: |
1276 | * spin_lock_irqsave(host lock) |
1277 | * |
1278 | * RETURNS: |
1279 | * Zero on success, AC_ERR_* mask on failure |
1280 | */ |
1281 | unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) |
1282 | { |
1283 | struct ata_port *ap = qc->ap; |
1284 | struct ata_link *link = qc->dev->link; |
1285 | |
1286 | /* Use polling pio if the LLD doesn't handle |
1287 | * interrupt driven pio and atapi CDB interrupt. |
1288 | */ |
1289 | if (ap->flags & ATA_FLAG_PIO_POLLING) |
1290 | qc->tf.flags |= ATA_TFLAG_POLLING; |
1291 | |
1292 | /* select the device */ |
1293 | ata_dev_select(ap, device: qc->dev->devno, wait: 1, can_sleep: 0); |
1294 | |
1295 | /* start the command */ |
1296 | switch (qc->tf.protocol) { |
1297 | case ATA_PROT_NODATA: |
1298 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1299 | ata_qc_set_polling(qc); |
1300 | |
1301 | ata_tf_to_host(ap, tf: &qc->tf, tag: qc->tag); |
1302 | ap->hsm_task_state = HSM_ST_LAST; |
1303 | |
1304 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1305 | ata_sff_queue_pio_task(link, 0); |
1306 | |
1307 | break; |
1308 | |
1309 | case ATA_PROT_PIO: |
1310 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1311 | ata_qc_set_polling(qc); |
1312 | |
1313 | ata_tf_to_host(ap, tf: &qc->tf, tag: qc->tag); |
1314 | |
1315 | if (qc->tf.flags & ATA_TFLAG_WRITE) { |
1316 | /* PIO data out protocol */ |
1317 | ap->hsm_task_state = HSM_ST_FIRST; |
1318 | ata_sff_queue_pio_task(link, 0); |
1319 | |
1320 | /* always send first data block using the |
1321 | * ata_sff_pio_task() codepath. |
1322 | */ |
1323 | } else { |
1324 | /* PIO data in protocol */ |
1325 | ap->hsm_task_state = HSM_ST; |
1326 | |
1327 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1328 | ata_sff_queue_pio_task(link, 0); |
1329 | |
1330 | /* if polling, ata_sff_pio_task() handles the |
1331 | * rest. otherwise, interrupt handler takes |
1332 | * over from here. |
1333 | */ |
1334 | } |
1335 | |
1336 | break; |
1337 | |
1338 | case ATAPI_PROT_PIO: |
1339 | case ATAPI_PROT_NODATA: |
1340 | if (qc->tf.flags & ATA_TFLAG_POLLING) |
1341 | ata_qc_set_polling(qc); |
1342 | |
1343 | ata_tf_to_host(ap, tf: &qc->tf, tag: qc->tag); |
1344 | |
1345 | ap->hsm_task_state = HSM_ST_FIRST; |
1346 | |
1347 | /* send cdb by polling if no cdb interrupt */ |
1348 | if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || |
1349 | (qc->tf.flags & ATA_TFLAG_POLLING)) |
1350 | ata_sff_queue_pio_task(link, 0); |
1351 | break; |
1352 | |
1353 | default: |
1354 | return AC_ERR_SYSTEM; |
1355 | } |
1356 | |
1357 | return 0; |
1358 | } |
1359 | EXPORT_SYMBOL_GPL(ata_sff_qc_issue); |
1360 | |
1361 | /** |
1362 | * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read |
1363 | * @qc: qc to fill result TF for |
1364 | * |
1365 | * @qc is finished and result TF needs to be filled. Fill it |
1366 | * using ->sff_tf_read. |
1367 | * |
1368 | * LOCKING: |
1369 | * spin_lock_irqsave(host lock) |
1370 | */ |
1371 | void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) |
1372 | { |
1373 | qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); |
1374 | } |
1375 | EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); |
1376 | |
1377 | static unsigned int ata_sff_idle_irq(struct ata_port *ap) |
1378 | { |
1379 | ap->stats.idle_irq++; |
1380 | |
1381 | #ifdef ATA_IRQ_TRAP |
1382 | if ((ap->stats.idle_irq % 1000) == 0) { |
1383 | ap->ops->sff_check_status(ap); |
1384 | if (ap->ops->sff_irq_clear) |
1385 | ap->ops->sff_irq_clear(ap); |
1386 | ata_port_warn(ap, "irq trap\n" ); |
1387 | return 1; |
1388 | } |
1389 | #endif |
1390 | return 0; /* irq not handled */ |
1391 | } |
1392 | |
1393 | static unsigned int __ata_sff_port_intr(struct ata_port *ap, |
1394 | struct ata_queued_cmd *qc, |
1395 | bool hsmv_on_idle) |
1396 | { |
1397 | u8 status; |
1398 | |
1399 | trace_ata_sff_port_intr(qc, state: hsmv_on_idle); |
1400 | |
1401 | /* Check whether we are expecting interrupt in this state */ |
1402 | switch (ap->hsm_task_state) { |
1403 | case HSM_ST_FIRST: |
1404 | /* Some pre-ATAPI-4 devices assert INTRQ |
1405 | * at this state when ready to receive CDB. |
1406 | */ |
1407 | |
1408 | /* Check the ATA_DFLAG_CDB_INTR flag is enough here. |
1409 | * The flag was turned on only for atapi devices. No |
1410 | * need to check ata_is_atapi(qc->tf.protocol) again. |
1411 | */ |
1412 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
1413 | return ata_sff_idle_irq(ap); |
1414 | break; |
1415 | case HSM_ST_IDLE: |
1416 | return ata_sff_idle_irq(ap); |
1417 | default: |
1418 | break; |
1419 | } |
1420 | |
1421 | /* check main status, clearing INTRQ if needed */ |
1422 | status = ata_sff_irq_status(ap); |
1423 | if (status & ATA_BUSY) { |
1424 | if (hsmv_on_idle) { |
1425 | /* BMDMA engine is already stopped, we're screwed */ |
1426 | qc->err_mask |= AC_ERR_HSM; |
1427 | ap->hsm_task_state = HSM_ST_ERR; |
1428 | } else |
1429 | return ata_sff_idle_irq(ap); |
1430 | } |
1431 | |
1432 | /* clear irq events */ |
1433 | if (ap->ops->sff_irq_clear) |
1434 | ap->ops->sff_irq_clear(ap); |
1435 | |
1436 | ata_sff_hsm_move(ap, qc, status, 0); |
1437 | |
1438 | return 1; /* irq handled */ |
1439 | } |
1440 | |
1441 | /** |
1442 | * ata_sff_port_intr - Handle SFF port interrupt |
1443 | * @ap: Port on which interrupt arrived (possibly...) |
1444 | * @qc: Taskfile currently active in engine |
1445 | * |
1446 | * Handle port interrupt for given queued command. |
1447 | * |
1448 | * LOCKING: |
1449 | * spin_lock_irqsave(host lock) |
1450 | * |
1451 | * RETURNS: |
1452 | * One if interrupt was handled, zero if not (shared irq). |
1453 | */ |
1454 | unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
1455 | { |
1456 | return __ata_sff_port_intr(ap, qc, hsmv_on_idle: false); |
1457 | } |
1458 | EXPORT_SYMBOL_GPL(ata_sff_port_intr); |
1459 | |
1460 | static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance, |
1461 | unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *)) |
1462 | { |
1463 | struct ata_host *host = dev_instance; |
1464 | bool retried = false; |
1465 | unsigned int i; |
1466 | unsigned int handled, idle, polling; |
1467 | unsigned long flags; |
1468 | |
1469 | /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ |
1470 | spin_lock_irqsave(&host->lock, flags); |
1471 | |
1472 | retry: |
1473 | handled = idle = polling = 0; |
1474 | for (i = 0; i < host->n_ports; i++) { |
1475 | struct ata_port *ap = host->ports[i]; |
1476 | struct ata_queued_cmd *qc; |
1477 | |
1478 | qc = ata_qc_from_tag(ap, tag: ap->link.active_tag); |
1479 | if (qc) { |
1480 | if (!(qc->tf.flags & ATA_TFLAG_POLLING)) |
1481 | handled |= port_intr(ap, qc); |
1482 | else |
1483 | polling |= 1 << i; |
1484 | } else |
1485 | idle |= 1 << i; |
1486 | } |
1487 | |
1488 | /* |
1489 | * If no port was expecting IRQ but the controller is actually |
1490 | * asserting IRQ line, nobody cared will ensue. Check IRQ |
1491 | * pending status if available and clear spurious IRQ. |
1492 | */ |
1493 | if (!handled && !retried) { |
1494 | bool retry = false; |
1495 | |
1496 | for (i = 0; i < host->n_ports; i++) { |
1497 | struct ata_port *ap = host->ports[i]; |
1498 | |
1499 | if (polling & (1 << i)) |
1500 | continue; |
1501 | |
1502 | if (!ap->ops->sff_irq_check || |
1503 | !ap->ops->sff_irq_check(ap)) |
1504 | continue; |
1505 | |
1506 | if (idle & (1 << i)) { |
1507 | ap->ops->sff_check_status(ap); |
1508 | if (ap->ops->sff_irq_clear) |
1509 | ap->ops->sff_irq_clear(ap); |
1510 | } else { |
1511 | /* clear INTRQ and check if BUSY cleared */ |
1512 | if (!(ap->ops->sff_check_status(ap) & ATA_BUSY)) |
1513 | retry |= true; |
1514 | /* |
1515 | * With command in flight, we can't do |
1516 | * sff_irq_clear() w/o racing with completion. |
1517 | */ |
1518 | } |
1519 | } |
1520 | |
1521 | if (retry) { |
1522 | retried = true; |
1523 | goto retry; |
1524 | } |
1525 | } |
1526 | |
1527 | spin_unlock_irqrestore(lock: &host->lock, flags); |
1528 | |
1529 | return IRQ_RETVAL(handled); |
1530 | } |
1531 | |
1532 | /** |
1533 | * ata_sff_interrupt - Default SFF ATA host interrupt handler |
1534 | * @irq: irq line (unused) |
1535 | * @dev_instance: pointer to our ata_host information structure |
1536 | * |
1537 | * Default interrupt handler for PCI IDE devices. Calls |
1538 | * ata_sff_port_intr() for each port that is not disabled. |
1539 | * |
1540 | * LOCKING: |
1541 | * Obtains host lock during operation. |
1542 | * |
1543 | * RETURNS: |
1544 | * IRQ_NONE or IRQ_HANDLED. |
1545 | */ |
1546 | irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) |
1547 | { |
1548 | return __ata_sff_interrupt(irq, dev_instance, port_intr: ata_sff_port_intr); |
1549 | } |
1550 | EXPORT_SYMBOL_GPL(ata_sff_interrupt); |
1551 | |
1552 | /** |
1553 | * ata_sff_lost_interrupt - Check for an apparent lost interrupt |
1554 | * @ap: port that appears to have timed out |
1555 | * |
1556 | * Called from the libata error handlers when the core code suspects |
1557 | * an interrupt has been lost. If it has complete anything we can and |
1558 | * then return. Interface must support altstatus for this faster |
1559 | * recovery to occur. |
1560 | * |
1561 | * Locking: |
1562 | * Caller holds host lock |
1563 | */ |
1564 | |
1565 | void ata_sff_lost_interrupt(struct ata_port *ap) |
1566 | { |
1567 | u8 status = 0; |
1568 | struct ata_queued_cmd *qc; |
1569 | |
1570 | /* Only one outstanding command per SFF channel */ |
1571 | qc = ata_qc_from_tag(ap, tag: ap->link.active_tag); |
1572 | /* We cannot lose an interrupt on a non-existent or polled command */ |
1573 | if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) |
1574 | return; |
1575 | /* See if the controller thinks it is still busy - if so the command |
1576 | isn't a lost IRQ but is still in progress */ |
1577 | if (WARN_ON_ONCE(!ata_sff_altstatus(ap, &status))) |
1578 | return; |
1579 | if (status & ATA_BUSY) |
1580 | return; |
1581 | |
1582 | /* There was a command running, we are no longer busy and we have |
1583 | no interrupt. */ |
1584 | ata_port_warn(ap, "lost interrupt (Status 0x%x)\n" , status); |
1585 | /* Run the host interrupt logic as if the interrupt had not been |
1586 | lost */ |
1587 | ata_sff_port_intr(ap, qc); |
1588 | } |
1589 | EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt); |
1590 | |
1591 | /** |
1592 | * ata_sff_freeze - Freeze SFF controller port |
1593 | * @ap: port to freeze |
1594 | * |
1595 | * Freeze SFF controller port. |
1596 | * |
1597 | * LOCKING: |
1598 | * Inherited from caller. |
1599 | */ |
1600 | void ata_sff_freeze(struct ata_port *ap) |
1601 | { |
1602 | ap->ctl |= ATA_NIEN; |
1603 | ap->last_ctl = ap->ctl; |
1604 | |
1605 | ata_sff_set_devctl(ap, ctl: ap->ctl); |
1606 | |
1607 | /* Under certain circumstances, some controllers raise IRQ on |
1608 | * ATA_NIEN manipulation. Also, many controllers fail to mask |
1609 | * previously pending IRQ on ATA_NIEN assertion. Clear it. |
1610 | */ |
1611 | ap->ops->sff_check_status(ap); |
1612 | |
1613 | if (ap->ops->sff_irq_clear) |
1614 | ap->ops->sff_irq_clear(ap); |
1615 | } |
1616 | EXPORT_SYMBOL_GPL(ata_sff_freeze); |
1617 | |
1618 | /** |
1619 | * ata_sff_thaw - Thaw SFF controller port |
1620 | * @ap: port to thaw |
1621 | * |
1622 | * Thaw SFF controller port. |
1623 | * |
1624 | * LOCKING: |
1625 | * Inherited from caller. |
1626 | */ |
1627 | void ata_sff_thaw(struct ata_port *ap) |
1628 | { |
1629 | /* clear & re-enable interrupts */ |
1630 | ap->ops->sff_check_status(ap); |
1631 | if (ap->ops->sff_irq_clear) |
1632 | ap->ops->sff_irq_clear(ap); |
1633 | ata_sff_irq_on(ap); |
1634 | } |
1635 | EXPORT_SYMBOL_GPL(ata_sff_thaw); |
1636 | |
1637 | /** |
1638 | * ata_sff_prereset - prepare SFF link for reset |
1639 | * @link: SFF link to be reset |
1640 | * @deadline: deadline jiffies for the operation |
1641 | * |
1642 | * SFF link @link is about to be reset. Initialize it. It first |
1643 | * calls ata_std_prereset() and wait for !BSY if the port is |
1644 | * being softreset. |
1645 | * |
1646 | * LOCKING: |
1647 | * Kernel thread context (may sleep) |
1648 | * |
1649 | * RETURNS: |
1650 | * Always 0. |
1651 | */ |
1652 | int ata_sff_prereset(struct ata_link *link, unsigned long deadline) |
1653 | { |
1654 | struct ata_eh_context *ehc = &link->eh_context; |
1655 | int rc; |
1656 | |
1657 | /* The standard prereset is best-effort and always returns 0 */ |
1658 | ata_std_prereset(link, deadline); |
1659 | |
1660 | /* if we're about to do hardreset, nothing more to do */ |
1661 | if (ehc->i.action & ATA_EH_HARDRESET) |
1662 | return 0; |
1663 | |
1664 | /* wait for !BSY if we don't know that no device is attached */ |
1665 | if (!ata_link_offline(link)) { |
1666 | rc = ata_sff_wait_ready(link, deadline); |
1667 | if (rc && rc != -ENODEV) { |
1668 | ata_link_warn(link, |
1669 | "device not ready (errno=%d), forcing hardreset\n" , |
1670 | rc); |
1671 | ehc->i.action |= ATA_EH_HARDRESET; |
1672 | } |
1673 | } |
1674 | |
1675 | return 0; |
1676 | } |
1677 | EXPORT_SYMBOL_GPL(ata_sff_prereset); |
1678 | |
1679 | /** |
1680 | * ata_devchk - PATA device presence detection |
1681 | * @ap: ATA channel to examine |
1682 | * @device: Device to examine (starting at zero) |
1683 | * |
1684 | * This technique was originally described in |
1685 | * Hale Landis's ATADRVR (www.ata-atapi.com), and |
1686 | * later found its way into the ATA/ATAPI spec. |
1687 | * |
1688 | * Write a pattern to the ATA shadow registers, |
1689 | * and if a device is present, it will respond by |
1690 | * correctly storing and echoing back the |
1691 | * ATA shadow register contents. |
1692 | * |
1693 | * RETURN: |
1694 | * true if device is present, false if not. |
1695 | * |
1696 | * LOCKING: |
1697 | * caller. |
1698 | */ |
1699 | static bool ata_devchk(struct ata_port *ap, unsigned int device) |
1700 | { |
1701 | struct ata_ioports *ioaddr = &ap->ioaddr; |
1702 | u8 nsect, lbal; |
1703 | |
1704 | ap->ops->sff_dev_select(ap, device); |
1705 | |
1706 | iowrite8(0x55, ioaddr->nsect_addr); |
1707 | iowrite8(0xaa, ioaddr->lbal_addr); |
1708 | |
1709 | iowrite8(0xaa, ioaddr->nsect_addr); |
1710 | iowrite8(0x55, ioaddr->lbal_addr); |
1711 | |
1712 | iowrite8(0x55, ioaddr->nsect_addr); |
1713 | iowrite8(0xaa, ioaddr->lbal_addr); |
1714 | |
1715 | nsect = ioread8(ioaddr->nsect_addr); |
1716 | lbal = ioread8(ioaddr->lbal_addr); |
1717 | |
1718 | if ((nsect == 0x55) && (lbal == 0xaa)) |
1719 | return true; /* we found a device */ |
1720 | |
1721 | return false; /* nothing found */ |
1722 | } |
1723 | |
1724 | /** |
1725 | * ata_sff_dev_classify - Parse returned ATA device signature |
1726 | * @dev: ATA device to classify (starting at zero) |
1727 | * @present: device seems present |
1728 | * @r_err: Value of error register on completion |
1729 | * |
1730 | * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, |
1731 | * an ATA/ATAPI-defined set of values is placed in the ATA |
1732 | * shadow registers, indicating the results of device detection |
1733 | * and diagnostics. |
1734 | * |
1735 | * Select the ATA device, and read the values from the ATA shadow |
1736 | * registers. Then parse according to the Error register value, |
1737 | * and the spec-defined values examined by ata_dev_classify(). |
1738 | * |
1739 | * LOCKING: |
1740 | * caller. |
1741 | * |
1742 | * RETURNS: |
1743 | * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. |
1744 | */ |
1745 | unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, |
1746 | u8 *r_err) |
1747 | { |
1748 | struct ata_port *ap = dev->link->ap; |
1749 | struct ata_taskfile tf; |
1750 | unsigned int class; |
1751 | u8 err; |
1752 | |
1753 | ap->ops->sff_dev_select(ap, dev->devno); |
1754 | |
1755 | memset(&tf, 0, sizeof(tf)); |
1756 | |
1757 | ap->ops->sff_tf_read(ap, &tf); |
1758 | err = tf.error; |
1759 | if (r_err) |
1760 | *r_err = err; |
1761 | |
1762 | /* see if device passed diags: continue and warn later */ |
1763 | if (err == 0) |
1764 | /* diagnostic fail : do nothing _YET_ */ |
1765 | dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; |
1766 | else if (err == 1) |
1767 | /* do nothing */ ; |
1768 | else if ((dev->devno == 0) && (err == 0x81)) |
1769 | /* do nothing */ ; |
1770 | else |
1771 | return ATA_DEV_NONE; |
1772 | |
1773 | /* determine if device is ATA or ATAPI */ |
1774 | class = ata_port_classify(ap, tf: &tf); |
1775 | switch (class) { |
1776 | case ATA_DEV_UNKNOWN: |
1777 | /* |
1778 | * If the device failed diagnostic, it's likely to |
1779 | * have reported incorrect device signature too. |
1780 | * Assume ATA device if the device seems present but |
1781 | * device signature is invalid with diagnostic |
1782 | * failure. |
1783 | */ |
1784 | if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) |
1785 | class = ATA_DEV_ATA; |
1786 | else |
1787 | class = ATA_DEV_NONE; |
1788 | break; |
1789 | case ATA_DEV_ATA: |
1790 | if (ap->ops->sff_check_status(ap) == 0) |
1791 | class = ATA_DEV_NONE; |
1792 | break; |
1793 | } |
1794 | return class; |
1795 | } |
1796 | EXPORT_SYMBOL_GPL(ata_sff_dev_classify); |
1797 | |
1798 | /** |
1799 | * ata_sff_wait_after_reset - wait for devices to become ready after reset |
1800 | * @link: SFF link which is just reset |
1801 | * @devmask: mask of present devices |
1802 | * @deadline: deadline jiffies for the operation |
1803 | * |
1804 | * Wait devices attached to SFF @link to become ready after |
1805 | * reset. It contains preceding 150ms wait to avoid accessing TF |
1806 | * status register too early. |
1807 | * |
1808 | * LOCKING: |
1809 | * Kernel thread context (may sleep). |
1810 | * |
1811 | * RETURNS: |
1812 | * 0 on success, -ENODEV if some or all of devices in @devmask |
1813 | * don't seem to exist. -errno on other errors. |
1814 | */ |
1815 | int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, |
1816 | unsigned long deadline) |
1817 | { |
1818 | struct ata_port *ap = link->ap; |
1819 | struct ata_ioports *ioaddr = &ap->ioaddr; |
1820 | unsigned int dev0 = devmask & (1 << 0); |
1821 | unsigned int dev1 = devmask & (1 << 1); |
1822 | int rc, ret = 0; |
1823 | |
1824 | ata_msleep(ap, msecs: ATA_WAIT_AFTER_RESET); |
1825 | |
1826 | /* always check readiness of the master device */ |
1827 | rc = ata_sff_wait_ready(link, deadline); |
1828 | /* -ENODEV means the odd clown forgot the D7 pulldown resistor |
1829 | * and TF status is 0xff, bail out on it too. |
1830 | */ |
1831 | if (rc) |
1832 | return rc; |
1833 | |
1834 | /* if device 1 was found in ata_devchk, wait for register |
1835 | * access briefly, then wait for BSY to clear. |
1836 | */ |
1837 | if (dev1) { |
1838 | int i; |
1839 | |
1840 | ap->ops->sff_dev_select(ap, 1); |
1841 | |
1842 | /* Wait for register access. Some ATAPI devices fail |
1843 | * to set nsect/lbal after reset, so don't waste too |
1844 | * much time on it. We're gonna wait for !BSY anyway. |
1845 | */ |
1846 | for (i = 0; i < 2; i++) { |
1847 | u8 nsect, lbal; |
1848 | |
1849 | nsect = ioread8(ioaddr->nsect_addr); |
1850 | lbal = ioread8(ioaddr->lbal_addr); |
1851 | if ((nsect == 1) && (lbal == 1)) |
1852 | break; |
1853 | ata_msleep(ap, msecs: 50); /* give drive a breather */ |
1854 | } |
1855 | |
1856 | rc = ata_sff_wait_ready(link, deadline); |
1857 | if (rc) { |
1858 | if (rc != -ENODEV) |
1859 | return rc; |
1860 | ret = rc; |
1861 | } |
1862 | } |
1863 | |
1864 | /* is all this really necessary? */ |
1865 | ap->ops->sff_dev_select(ap, 0); |
1866 | if (dev1) |
1867 | ap->ops->sff_dev_select(ap, 1); |
1868 | if (dev0) |
1869 | ap->ops->sff_dev_select(ap, 0); |
1870 | |
1871 | return ret; |
1872 | } |
1873 | EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); |
1874 | |
1875 | static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, |
1876 | unsigned long deadline) |
1877 | { |
1878 | struct ata_ioports *ioaddr = &ap->ioaddr; |
1879 | |
1880 | if (ap->ioaddr.ctl_addr) { |
1881 | /* software reset. causes dev0 to be selected */ |
1882 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
1883 | udelay(20); /* FIXME: flush */ |
1884 | iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); |
1885 | udelay(20); /* FIXME: flush */ |
1886 | iowrite8(ap->ctl, ioaddr->ctl_addr); |
1887 | ap->last_ctl = ap->ctl; |
1888 | } |
1889 | |
1890 | /* wait the port to become ready */ |
1891 | return ata_sff_wait_after_reset(&ap->link, devmask, deadline); |
1892 | } |
1893 | |
1894 | /** |
1895 | * ata_sff_softreset - reset host port via ATA SRST |
1896 | * @link: ATA link to reset |
1897 | * @classes: resulting classes of attached devices |
1898 | * @deadline: deadline jiffies for the operation |
1899 | * |
1900 | * Reset host port using ATA SRST. |
1901 | * |
1902 | * LOCKING: |
1903 | * Kernel thread context (may sleep) |
1904 | * |
1905 | * RETURNS: |
1906 | * 0 on success, -errno otherwise. |
1907 | */ |
1908 | int ata_sff_softreset(struct ata_link *link, unsigned int *classes, |
1909 | unsigned long deadline) |
1910 | { |
1911 | struct ata_port *ap = link->ap; |
1912 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
1913 | unsigned int devmask = 0; |
1914 | int rc; |
1915 | u8 err; |
1916 | |
1917 | /* determine if device 0/1 are present */ |
1918 | if (ata_devchk(ap, device: 0)) |
1919 | devmask |= (1 << 0); |
1920 | if (slave_possible && ata_devchk(ap, device: 1)) |
1921 | devmask |= (1 << 1); |
1922 | |
1923 | /* select device 0 again */ |
1924 | ap->ops->sff_dev_select(ap, 0); |
1925 | |
1926 | /* issue bus reset */ |
1927 | rc = ata_bus_softreset(ap, devmask, deadline); |
1928 | /* if link is occupied, -ENODEV too is an error */ |
1929 | if (rc && (rc != -ENODEV || sata_scr_valid(link))) { |
1930 | ata_link_err(link, "SRST failed (errno=%d)\n" , rc); |
1931 | return rc; |
1932 | } |
1933 | |
1934 | /* determine by signature whether we have ATA or ATAPI devices */ |
1935 | classes[0] = ata_sff_dev_classify(&link->device[0], |
1936 | devmask & (1 << 0), &err); |
1937 | if (slave_possible && err != 0x81) |
1938 | classes[1] = ata_sff_dev_classify(&link->device[1], |
1939 | devmask & (1 << 1), &err); |
1940 | |
1941 | return 0; |
1942 | } |
1943 | EXPORT_SYMBOL_GPL(ata_sff_softreset); |
1944 | |
1945 | /** |
1946 | * sata_sff_hardreset - reset host port via SATA phy reset |
1947 | * @link: link to reset |
1948 | * @class: resulting class of attached device |
1949 | * @deadline: deadline jiffies for the operation |
1950 | * |
1951 | * SATA phy-reset host port using DET bits of SControl register, |
1952 | * wait for !BSY and classify the attached device. |
1953 | * |
1954 | * LOCKING: |
1955 | * Kernel thread context (may sleep) |
1956 | * |
1957 | * RETURNS: |
1958 | * 0 on success, -errno otherwise. |
1959 | */ |
1960 | int sata_sff_hardreset(struct ata_link *link, unsigned int *class, |
1961 | unsigned long deadline) |
1962 | { |
1963 | struct ata_eh_context *ehc = &link->eh_context; |
1964 | const unsigned int *timing = sata_ehc_deb_timing(ehc); |
1965 | bool online; |
1966 | int rc; |
1967 | |
1968 | rc = sata_link_hardreset(link, timing, deadline, online: &online, |
1969 | check_ready: ata_sff_check_ready); |
1970 | if (online) |
1971 | *class = ata_sff_dev_classify(link->device, 1, NULL); |
1972 | |
1973 | return rc; |
1974 | } |
1975 | EXPORT_SYMBOL_GPL(sata_sff_hardreset); |
1976 | |
1977 | /** |
1978 | * ata_sff_postreset - SFF postreset callback |
1979 | * @link: the target SFF ata_link |
1980 | * @classes: classes of attached devices |
1981 | * |
1982 | * This function is invoked after a successful reset. It first |
1983 | * calls ata_std_postreset() and performs SFF specific postreset |
1984 | * processing. |
1985 | * |
1986 | * LOCKING: |
1987 | * Kernel thread context (may sleep) |
1988 | */ |
1989 | void ata_sff_postreset(struct ata_link *link, unsigned int *classes) |
1990 | { |
1991 | struct ata_port *ap = link->ap; |
1992 | |
1993 | ata_std_postreset(link, classes); |
1994 | |
1995 | /* is double-select really necessary? */ |
1996 | if (classes[0] != ATA_DEV_NONE) |
1997 | ap->ops->sff_dev_select(ap, 1); |
1998 | if (classes[1] != ATA_DEV_NONE) |
1999 | ap->ops->sff_dev_select(ap, 0); |
2000 | |
2001 | /* bail out if no device is present */ |
2002 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) |
2003 | return; |
2004 | |
2005 | /* set up device control */ |
2006 | if (ata_sff_set_devctl(ap, ctl: ap->ctl)) |
2007 | ap->last_ctl = ap->ctl; |
2008 | } |
2009 | EXPORT_SYMBOL_GPL(ata_sff_postreset); |
2010 | |
2011 | /** |
2012 | * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers |
2013 | * @qc: command |
2014 | * |
2015 | * Drain the FIFO and device of any stuck data following a command |
2016 | * failing to complete. In some cases this is necessary before a |
2017 | * reset will recover the device. |
2018 | * |
2019 | */ |
2020 | |
2021 | void ata_sff_drain_fifo(struct ata_queued_cmd *qc) |
2022 | { |
2023 | int count; |
2024 | struct ata_port *ap; |
2025 | |
2026 | /* We only need to flush incoming data when a command was running */ |
2027 | if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) |
2028 | return; |
2029 | |
2030 | ap = qc->ap; |
2031 | /* Drain up to 64K of data before we give up this recovery method */ |
2032 | for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ) |
2033 | && count < 65536; count += 2) |
2034 | ioread16(ap->ioaddr.data_addr); |
2035 | |
2036 | if (count) |
2037 | ata_port_dbg(ap, "drained %d bytes to clear DRQ\n" , count); |
2038 | |
2039 | } |
2040 | EXPORT_SYMBOL_GPL(ata_sff_drain_fifo); |
2041 | |
2042 | /** |
2043 | * ata_sff_error_handler - Stock error handler for SFF controller |
2044 | * @ap: port to handle error for |
2045 | * |
2046 | * Stock error handler for SFF controller. It can handle both |
2047 | * PATA and SATA controllers. Many controllers should be able to |
2048 | * use this EH as-is or with some added handling before and |
2049 | * after. |
2050 | * |
2051 | * LOCKING: |
2052 | * Kernel thread context (may sleep) |
2053 | */ |
2054 | void ata_sff_error_handler(struct ata_port *ap) |
2055 | { |
2056 | ata_reset_fn_t softreset = ap->ops->softreset; |
2057 | ata_reset_fn_t hardreset = ap->ops->hardreset; |
2058 | struct ata_queued_cmd *qc; |
2059 | unsigned long flags; |
2060 | |
2061 | qc = __ata_qc_from_tag(ap, tag: ap->link.active_tag); |
2062 | if (qc && !(qc->flags & ATA_QCFLAG_EH)) |
2063 | qc = NULL; |
2064 | |
2065 | spin_lock_irqsave(ap->lock, flags); |
2066 | |
2067 | /* |
2068 | * We *MUST* do FIFO draining before we issue a reset as |
2069 | * several devices helpfully clear their internal state and |
2070 | * will lock solid if we touch the data port post reset. Pass |
2071 | * qc in case anyone wants to do different PIO/DMA recovery or |
2072 | * has per command fixups |
2073 | */ |
2074 | if (ap->ops->sff_drain_fifo) |
2075 | ap->ops->sff_drain_fifo(qc); |
2076 | |
2077 | spin_unlock_irqrestore(lock: ap->lock, flags); |
2078 | |
2079 | /* ignore built-in hardresets if SCR access is not available */ |
2080 | if ((hardreset == sata_std_hardreset || |
2081 | hardreset == sata_sff_hardreset) && !sata_scr_valid(link: &ap->link)) |
2082 | hardreset = NULL; |
2083 | |
2084 | ata_do_eh(ap, prereset: ap->ops->prereset, softreset, hardreset, |
2085 | postreset: ap->ops->postreset); |
2086 | } |
2087 | EXPORT_SYMBOL_GPL(ata_sff_error_handler); |
2088 | |
2089 | /** |
2090 | * ata_sff_std_ports - initialize ioaddr with standard port offsets. |
2091 | * @ioaddr: IO address structure to be initialized |
2092 | * |
2093 | * Utility function which initializes data_addr, error_addr, |
2094 | * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, |
2095 | * device_addr, status_addr, and command_addr to standard offsets |
2096 | * relative to cmd_addr. |
2097 | * |
2098 | * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. |
2099 | */ |
2100 | void ata_sff_std_ports(struct ata_ioports *ioaddr) |
2101 | { |
2102 | ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; |
2103 | ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; |
2104 | ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; |
2105 | ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; |
2106 | ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; |
2107 | ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; |
2108 | ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; |
2109 | ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; |
2110 | ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; |
2111 | ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; |
2112 | } |
2113 | EXPORT_SYMBOL_GPL(ata_sff_std_ports); |
2114 | |
2115 | #ifdef CONFIG_PCI |
2116 | |
2117 | static bool ata_resources_present(struct pci_dev *pdev, int port) |
2118 | { |
2119 | int i; |
2120 | |
2121 | /* Check the PCI resources for this channel are enabled */ |
2122 | port *= 2; |
2123 | for (i = 0; i < 2; i++) { |
2124 | if (pci_resource_start(pdev, port + i) == 0 || |
2125 | pci_resource_len(pdev, port + i) == 0) |
2126 | return false; |
2127 | } |
2128 | return true; |
2129 | } |
2130 | |
2131 | /** |
2132 | * ata_pci_sff_init_host - acquire native PCI ATA resources and init host |
2133 | * @host: target ATA host |
2134 | * |
2135 | * Acquire native PCI ATA resources for @host and initialize the |
2136 | * first two ports of @host accordingly. Ports marked dummy are |
2137 | * skipped and allocation failure makes the port dummy. |
2138 | * |
2139 | * Note that native PCI resources are valid even for legacy hosts |
2140 | * as we fix up pdev resources array early in boot, so this |
2141 | * function can be used for both native and legacy SFF hosts. |
2142 | * |
2143 | * LOCKING: |
2144 | * Inherited from calling layer (may sleep). |
2145 | * |
2146 | * RETURNS: |
2147 | * 0 if at least one port is initialized, -ENODEV if no port is |
2148 | * available. |
2149 | */ |
2150 | int ata_pci_sff_init_host(struct ata_host *host) |
2151 | { |
2152 | struct device *gdev = host->dev; |
2153 | struct pci_dev *pdev = to_pci_dev(gdev); |
2154 | unsigned int mask = 0; |
2155 | int i, rc; |
2156 | |
2157 | /* request, iomap BARs and init port addresses accordingly */ |
2158 | for (i = 0; i < 2; i++) { |
2159 | struct ata_port *ap = host->ports[i]; |
2160 | int base = i * 2; |
2161 | void __iomem * const *iomap; |
2162 | |
2163 | if (ata_port_is_dummy(ap)) |
2164 | continue; |
2165 | |
2166 | /* Discard disabled ports. Some controllers show |
2167 | * their unused channels this way. Disabled ports are |
2168 | * made dummy. |
2169 | */ |
2170 | if (!ata_resources_present(pdev, port: i)) { |
2171 | ap->ops = &ata_dummy_port_ops; |
2172 | continue; |
2173 | } |
2174 | |
2175 | rc = pcim_iomap_regions(pdev, mask: 0x3 << base, |
2176 | name: dev_driver_string(dev: gdev)); |
2177 | if (rc) { |
2178 | dev_warn(gdev, |
2179 | "failed to request/iomap BARs for port %d (errno=%d)\n" , |
2180 | i, rc); |
2181 | if (rc == -EBUSY) |
2182 | pcim_pin_device(pdev); |
2183 | ap->ops = &ata_dummy_port_ops; |
2184 | continue; |
2185 | } |
2186 | host->iomap = iomap = pcim_iomap_table(pdev); |
2187 | |
2188 | ap->ioaddr.cmd_addr = iomap[base]; |
2189 | ap->ioaddr.altstatus_addr = |
2190 | ap->ioaddr.ctl_addr = (void __iomem *) |
2191 | ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS); |
2192 | ata_sff_std_ports(&ap->ioaddr); |
2193 | |
2194 | ata_port_desc(ap, fmt: "cmd 0x%llx ctl 0x%llx" , |
2195 | (unsigned long long)pci_resource_start(pdev, base), |
2196 | (unsigned long long)pci_resource_start(pdev, base + 1)); |
2197 | |
2198 | mask |= 1 << i; |
2199 | } |
2200 | |
2201 | if (!mask) { |
2202 | dev_err(gdev, "no available native port\n" ); |
2203 | return -ENODEV; |
2204 | } |
2205 | |
2206 | return 0; |
2207 | } |
2208 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); |
2209 | |
2210 | /** |
2211 | * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host |
2212 | * @pdev: target PCI device |
2213 | * @ppi: array of port_info, must be enough for two ports |
2214 | * @r_host: out argument for the initialized ATA host |
2215 | * |
2216 | * Helper to allocate PIO-only SFF ATA host for @pdev, acquire |
2217 | * all PCI resources and initialize it accordingly in one go. |
2218 | * |
2219 | * LOCKING: |
2220 | * Inherited from calling layer (may sleep). |
2221 | * |
2222 | * RETURNS: |
2223 | * 0 on success, -errno otherwise. |
2224 | */ |
2225 | int ata_pci_sff_prepare_host(struct pci_dev *pdev, |
2226 | const struct ata_port_info * const *ppi, |
2227 | struct ata_host **r_host) |
2228 | { |
2229 | struct ata_host *host; |
2230 | int rc; |
2231 | |
2232 | if (!devres_open_group(dev: &pdev->dev, NULL, GFP_KERNEL)) |
2233 | return -ENOMEM; |
2234 | |
2235 | host = ata_host_alloc_pinfo(dev: &pdev->dev, ppi, n_ports: 2); |
2236 | if (!host) { |
2237 | dev_err(&pdev->dev, "failed to allocate ATA host\n" ); |
2238 | rc = -ENOMEM; |
2239 | goto err_out; |
2240 | } |
2241 | |
2242 | rc = ata_pci_sff_init_host(host); |
2243 | if (rc) |
2244 | goto err_out; |
2245 | |
2246 | devres_remove_group(dev: &pdev->dev, NULL); |
2247 | *r_host = host; |
2248 | return 0; |
2249 | |
2250 | err_out: |
2251 | devres_release_group(dev: &pdev->dev, NULL); |
2252 | return rc; |
2253 | } |
2254 | EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); |
2255 | |
2256 | /** |
2257 | * ata_pci_sff_activate_host - start SFF host, request IRQ and register it |
2258 | * @host: target SFF ATA host |
2259 | * @irq_handler: irq_handler used when requesting IRQ(s) |
2260 | * @sht: scsi_host_template to use when registering the host |
2261 | * |
2262 | * This is the counterpart of ata_host_activate() for SFF ATA |
2263 | * hosts. This separate helper is necessary because SFF hosts |
2264 | * use two separate interrupts in legacy mode. |
2265 | * |
2266 | * LOCKING: |
2267 | * Inherited from calling layer (may sleep). |
2268 | * |
2269 | * RETURNS: |
2270 | * 0 on success, -errno otherwise. |
2271 | */ |
2272 | int ata_pci_sff_activate_host(struct ata_host *host, |
2273 | irq_handler_t irq_handler, |
2274 | const struct scsi_host_template *sht) |
2275 | { |
2276 | struct device *dev = host->dev; |
2277 | struct pci_dev *pdev = to_pci_dev(dev); |
2278 | const char *drv_name = dev_driver_string(dev: host->dev); |
2279 | int legacy_mode = 0, rc; |
2280 | |
2281 | rc = ata_host_start(host); |
2282 | if (rc) |
2283 | return rc; |
2284 | |
2285 | if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { |
2286 | u8 tmp8, mask = 0; |
2287 | |
2288 | /* |
2289 | * ATA spec says we should use legacy mode when one |
2290 | * port is in legacy mode, but disabled ports on some |
2291 | * PCI hosts appear as fixed legacy ports, e.g SB600/700 |
2292 | * on which the secondary port is not wired, so |
2293 | * ignore ports that are marked as 'dummy' during |
2294 | * this check |
2295 | */ |
2296 | pci_read_config_byte(dev: pdev, PCI_CLASS_PROG, val: &tmp8); |
2297 | if (!ata_port_is_dummy(ap: host->ports[0])) |
2298 | mask |= (1 << 0); |
2299 | if (!ata_port_is_dummy(ap: host->ports[1])) |
2300 | mask |= (1 << 2); |
2301 | if ((tmp8 & mask) != mask) |
2302 | legacy_mode = 1; |
2303 | } |
2304 | |
2305 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) |
2306 | return -ENOMEM; |
2307 | |
2308 | if (!legacy_mode && pdev->irq) { |
2309 | int i; |
2310 | |
2311 | rc = devm_request_irq(dev, irq: pdev->irq, handler: irq_handler, |
2312 | IRQF_SHARED, devname: drv_name, dev_id: host); |
2313 | if (rc) |
2314 | goto out; |
2315 | |
2316 | for (i = 0; i < 2; i++) { |
2317 | if (ata_port_is_dummy(ap: host->ports[i])) |
2318 | continue; |
2319 | ata_port_desc_misc(ap: host->ports[i], irq: pdev->irq); |
2320 | } |
2321 | } else if (legacy_mode) { |
2322 | if (!ata_port_is_dummy(ap: host->ports[0])) { |
2323 | rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev), |
2324 | handler: irq_handler, IRQF_SHARED, |
2325 | devname: drv_name, dev_id: host); |
2326 | if (rc) |
2327 | goto out; |
2328 | |
2329 | ata_port_desc_misc(ap: host->ports[0], |
2330 | ATA_PRIMARY_IRQ(pdev)); |
2331 | } |
2332 | |
2333 | if (!ata_port_is_dummy(ap: host->ports[1])) { |
2334 | rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev), |
2335 | handler: irq_handler, IRQF_SHARED, |
2336 | devname: drv_name, dev_id: host); |
2337 | if (rc) |
2338 | goto out; |
2339 | |
2340 | ata_port_desc_misc(ap: host->ports[1], |
2341 | ATA_SECONDARY_IRQ(pdev)); |
2342 | } |
2343 | } |
2344 | |
2345 | rc = ata_host_register(host, sht); |
2346 | out: |
2347 | if (rc == 0) |
2348 | devres_remove_group(dev, NULL); |
2349 | else |
2350 | devres_release_group(dev, NULL); |
2351 | |
2352 | return rc; |
2353 | } |
2354 | EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); |
2355 | |
2356 | static const struct ata_port_info *ata_sff_find_valid_pi( |
2357 | const struct ata_port_info * const *ppi) |
2358 | { |
2359 | int i; |
2360 | |
2361 | /* look up the first valid port_info */ |
2362 | for (i = 0; i < 2 && ppi[i]; i++) |
2363 | if (ppi[i]->port_ops != &ata_dummy_port_ops) |
2364 | return ppi[i]; |
2365 | |
2366 | return NULL; |
2367 | } |
2368 | |
2369 | static int ata_pci_init_one(struct pci_dev *pdev, |
2370 | const struct ata_port_info * const *ppi, |
2371 | const struct scsi_host_template *sht, void *host_priv, |
2372 | int hflags, bool bmdma) |
2373 | { |
2374 | struct device *dev = &pdev->dev; |
2375 | const struct ata_port_info *pi; |
2376 | struct ata_host *host = NULL; |
2377 | int rc; |
2378 | |
2379 | pi = ata_sff_find_valid_pi(ppi); |
2380 | if (!pi) { |
2381 | dev_err(&pdev->dev, "no valid port_info specified\n" ); |
2382 | return -EINVAL; |
2383 | } |
2384 | |
2385 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) |
2386 | return -ENOMEM; |
2387 | |
2388 | rc = pcim_enable_device(pdev); |
2389 | if (rc) |
2390 | goto out; |
2391 | |
2392 | #ifdef CONFIG_ATA_BMDMA |
2393 | if (bmdma) |
2394 | /* prepare and activate BMDMA host */ |
2395 | rc = ata_pci_bmdma_prepare_host(pdev, ppi, r_host: &host); |
2396 | else |
2397 | #endif |
2398 | /* prepare and activate SFF host */ |
2399 | rc = ata_pci_sff_prepare_host(pdev, ppi, &host); |
2400 | if (rc) |
2401 | goto out; |
2402 | host->private_data = host_priv; |
2403 | host->flags |= hflags; |
2404 | |
2405 | #ifdef CONFIG_ATA_BMDMA |
2406 | if (bmdma) { |
2407 | pci_set_master(dev: pdev); |
2408 | rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); |
2409 | } else |
2410 | #endif |
2411 | rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); |
2412 | out: |
2413 | if (rc == 0) |
2414 | devres_remove_group(dev: &pdev->dev, NULL); |
2415 | else |
2416 | devres_release_group(dev: &pdev->dev, NULL); |
2417 | |
2418 | return rc; |
2419 | } |
2420 | |
2421 | /** |
2422 | * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller |
2423 | * @pdev: Controller to be initialized |
2424 | * @ppi: array of port_info, must be enough for two ports |
2425 | * @sht: scsi_host_template to use when registering the host |
2426 | * @host_priv: host private_data |
2427 | * @hflag: host flags |
2428 | * |
2429 | * This is a helper function which can be called from a driver's |
2430 | * xxx_init_one() probe function if the hardware uses traditional |
2431 | * IDE taskfile registers and is PIO only. |
2432 | * |
2433 | * ASSUMPTION: |
2434 | * Nobody makes a single channel controller that appears solely as |
2435 | * the secondary legacy port on PCI. |
2436 | * |
2437 | * LOCKING: |
2438 | * Inherited from PCI layer (may sleep). |
2439 | * |
2440 | * RETURNS: |
2441 | * Zero on success, negative on errno-based value on error. |
2442 | */ |
2443 | int ata_pci_sff_init_one(struct pci_dev *pdev, |
2444 | const struct ata_port_info * const *ppi, |
2445 | const struct scsi_host_template *sht, void *host_priv, int hflag) |
2446 | { |
2447 | return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags: hflag, bmdma: 0); |
2448 | } |
2449 | EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); |
2450 | |
2451 | #endif /* CONFIG_PCI */ |
2452 | |
2453 | /* |
2454 | * BMDMA support |
2455 | */ |
2456 | |
2457 | #ifdef CONFIG_ATA_BMDMA |
2458 | |
2459 | const struct ata_port_operations ata_bmdma_port_ops = { |
2460 | .inherits = &ata_sff_port_ops, |
2461 | |
2462 | .error_handler = ata_bmdma_error_handler, |
2463 | .post_internal_cmd = ata_bmdma_post_internal_cmd, |
2464 | |
2465 | .qc_prep = ata_bmdma_qc_prep, |
2466 | .qc_issue = ata_bmdma_qc_issue, |
2467 | |
2468 | .sff_irq_clear = ata_bmdma_irq_clear, |
2469 | .bmdma_setup = ata_bmdma_setup, |
2470 | .bmdma_start = ata_bmdma_start, |
2471 | .bmdma_stop = ata_bmdma_stop, |
2472 | .bmdma_status = ata_bmdma_status, |
2473 | |
2474 | .port_start = ata_bmdma_port_start, |
2475 | }; |
2476 | EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); |
2477 | |
2478 | const struct ata_port_operations ata_bmdma32_port_ops = { |
2479 | .inherits = &ata_bmdma_port_ops, |
2480 | |
2481 | .sff_data_xfer = ata_sff_data_xfer32, |
2482 | .port_start = ata_bmdma_port_start32, |
2483 | }; |
2484 | EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); |
2485 | |
2486 | /** |
2487 | * ata_bmdma_fill_sg - Fill PCI IDE PRD table |
2488 | * @qc: Metadata associated with taskfile to be transferred |
2489 | * |
2490 | * Fill PCI IDE PRD (scatter-gather) table with segments |
2491 | * associated with the current disk command. |
2492 | * |
2493 | * LOCKING: |
2494 | * spin_lock_irqsave(host lock) |
2495 | * |
2496 | */ |
2497 | static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) |
2498 | { |
2499 | struct ata_port *ap = qc->ap; |
2500 | struct ata_bmdma_prd *prd = ap->bmdma_prd; |
2501 | struct scatterlist *sg; |
2502 | unsigned int si, pi; |
2503 | |
2504 | pi = 0; |
2505 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
2506 | u32 addr, offset; |
2507 | u32 sg_len, len; |
2508 | |
2509 | /* determine if physical DMA addr spans 64K boundary. |
2510 | * Note h/w doesn't support 64-bit, so we unconditionally |
2511 | * truncate dma_addr_t to u32. |
2512 | */ |
2513 | addr = (u32) sg_dma_address(sg); |
2514 | sg_len = sg_dma_len(sg); |
2515 | |
2516 | while (sg_len) { |
2517 | offset = addr & 0xffff; |
2518 | len = sg_len; |
2519 | if ((offset + sg_len) > 0x10000) |
2520 | len = 0x10000 - offset; |
2521 | |
2522 | prd[pi].addr = cpu_to_le32(addr); |
2523 | prd[pi].flags_len = cpu_to_le32(len & 0xffff); |
2524 | |
2525 | pi++; |
2526 | sg_len -= len; |
2527 | addr += len; |
2528 | } |
2529 | } |
2530 | |
2531 | prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
2532 | } |
2533 | |
2534 | /** |
2535 | * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table |
2536 | * @qc: Metadata associated with taskfile to be transferred |
2537 | * |
2538 | * Fill PCI IDE PRD (scatter-gather) table with segments |
2539 | * associated with the current disk command. Perform the fill |
2540 | * so that we avoid writing any length 64K records for |
2541 | * controllers that don't follow the spec. |
2542 | * |
2543 | * LOCKING: |
2544 | * spin_lock_irqsave(host lock) |
2545 | * |
2546 | */ |
2547 | static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) |
2548 | { |
2549 | struct ata_port *ap = qc->ap; |
2550 | struct ata_bmdma_prd *prd = ap->bmdma_prd; |
2551 | struct scatterlist *sg; |
2552 | unsigned int si, pi; |
2553 | |
2554 | pi = 0; |
2555 | for_each_sg(qc->sg, sg, qc->n_elem, si) { |
2556 | u32 addr, offset; |
2557 | u32 sg_len, len, blen; |
2558 | |
2559 | /* determine if physical DMA addr spans 64K boundary. |
2560 | * Note h/w doesn't support 64-bit, so we unconditionally |
2561 | * truncate dma_addr_t to u32. |
2562 | */ |
2563 | addr = (u32) sg_dma_address(sg); |
2564 | sg_len = sg_dma_len(sg); |
2565 | |
2566 | while (sg_len) { |
2567 | offset = addr & 0xffff; |
2568 | len = sg_len; |
2569 | if ((offset + sg_len) > 0x10000) |
2570 | len = 0x10000 - offset; |
2571 | |
2572 | blen = len & 0xffff; |
2573 | prd[pi].addr = cpu_to_le32(addr); |
2574 | if (blen == 0) { |
2575 | /* Some PATA chipsets like the CS5530 can't |
2576 | cope with 0x0000 meaning 64K as the spec |
2577 | says */ |
2578 | prd[pi].flags_len = cpu_to_le32(0x8000); |
2579 | blen = 0x8000; |
2580 | prd[++pi].addr = cpu_to_le32(addr + 0x8000); |
2581 | } |
2582 | prd[pi].flags_len = cpu_to_le32(blen); |
2583 | |
2584 | pi++; |
2585 | sg_len -= len; |
2586 | addr += len; |
2587 | } |
2588 | } |
2589 | |
2590 | prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); |
2591 | } |
2592 | |
2593 | /** |
2594 | * ata_bmdma_qc_prep - Prepare taskfile for submission |
2595 | * @qc: Metadata associated with taskfile to be prepared |
2596 | * |
2597 | * Prepare ATA taskfile for submission. |
2598 | * |
2599 | * LOCKING: |
2600 | * spin_lock_irqsave(host lock) |
2601 | */ |
2602 | enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) |
2603 | { |
2604 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2605 | return AC_ERR_OK; |
2606 | |
2607 | ata_bmdma_fill_sg(qc); |
2608 | |
2609 | return AC_ERR_OK; |
2610 | } |
2611 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); |
2612 | |
2613 | /** |
2614 | * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission |
2615 | * @qc: Metadata associated with taskfile to be prepared |
2616 | * |
2617 | * Prepare ATA taskfile for submission. |
2618 | * |
2619 | * LOCKING: |
2620 | * spin_lock_irqsave(host lock) |
2621 | */ |
2622 | enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) |
2623 | { |
2624 | if (!(qc->flags & ATA_QCFLAG_DMAMAP)) |
2625 | return AC_ERR_OK; |
2626 | |
2627 | ata_bmdma_fill_sg_dumb(qc); |
2628 | |
2629 | return AC_ERR_OK; |
2630 | } |
2631 | EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); |
2632 | |
2633 | /** |
2634 | * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller |
2635 | * @qc: command to issue to device |
2636 | * |
2637 | * This function issues a PIO, NODATA or DMA command to a |
2638 | * SFF/BMDMA controller. PIO and NODATA are handled by |
2639 | * ata_sff_qc_issue(). |
2640 | * |
2641 | * LOCKING: |
2642 | * spin_lock_irqsave(host lock) |
2643 | * |
2644 | * RETURNS: |
2645 | * Zero on success, AC_ERR_* mask on failure |
2646 | */ |
2647 | unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) |
2648 | { |
2649 | struct ata_port *ap = qc->ap; |
2650 | struct ata_link *link = qc->dev->link; |
2651 | |
2652 | /* defer PIO handling to sff_qc_issue */ |
2653 | if (!ata_is_dma(prot: qc->tf.protocol)) |
2654 | return ata_sff_qc_issue(qc); |
2655 | |
2656 | /* select the device */ |
2657 | ata_dev_select(ap, device: qc->dev->devno, wait: 1, can_sleep: 0); |
2658 | |
2659 | /* start the command */ |
2660 | switch (qc->tf.protocol) { |
2661 | case ATA_PROT_DMA: |
2662 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
2663 | |
2664 | trace_ata_tf_load(ap, tf: &qc->tf); |
2665 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
2666 | trace_ata_bmdma_setup(ap, tf: &qc->tf, tag: qc->tag); |
2667 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
2668 | trace_ata_bmdma_start(ap, tf: &qc->tf, tag: qc->tag); |
2669 | ap->ops->bmdma_start(qc); /* initiate bmdma */ |
2670 | ap->hsm_task_state = HSM_ST_LAST; |
2671 | break; |
2672 | |
2673 | case ATAPI_PROT_DMA: |
2674 | WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); |
2675 | |
2676 | trace_ata_tf_load(ap, tf: &qc->tf); |
2677 | ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ |
2678 | trace_ata_bmdma_setup(ap, tf: &qc->tf, tag: qc->tag); |
2679 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
2680 | ap->hsm_task_state = HSM_ST_FIRST; |
2681 | |
2682 | /* send cdb by polling if no cdb interrupt */ |
2683 | if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) |
2684 | ata_sff_queue_pio_task(link, 0); |
2685 | break; |
2686 | |
2687 | default: |
2688 | WARN_ON(1); |
2689 | return AC_ERR_SYSTEM; |
2690 | } |
2691 | |
2692 | return 0; |
2693 | } |
2694 | EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue); |
2695 | |
2696 | /** |
2697 | * ata_bmdma_port_intr - Handle BMDMA port interrupt |
2698 | * @ap: Port on which interrupt arrived (possibly...) |
2699 | * @qc: Taskfile currently active in engine |
2700 | * |
2701 | * Handle port interrupt for given queued command. |
2702 | * |
2703 | * LOCKING: |
2704 | * spin_lock_irqsave(host lock) |
2705 | * |
2706 | * RETURNS: |
2707 | * One if interrupt was handled, zero if not (shared irq). |
2708 | */ |
2709 | unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) |
2710 | { |
2711 | struct ata_eh_info *ehi = &ap->link.eh_info; |
2712 | u8 host_stat = 0; |
2713 | bool bmdma_stopped = false; |
2714 | unsigned int handled; |
2715 | |
2716 | if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(prot: qc->tf.protocol)) { |
2717 | /* check status of DMA engine */ |
2718 | host_stat = ap->ops->bmdma_status(ap); |
2719 | trace_ata_bmdma_status(ap, host_stat); |
2720 | |
2721 | /* if it's not our irq... */ |
2722 | if (!(host_stat & ATA_DMA_INTR)) |
2723 | return ata_sff_idle_irq(ap); |
2724 | |
2725 | /* before we do anything else, clear DMA-Start bit */ |
2726 | trace_ata_bmdma_stop(ap, tf: &qc->tf, tag: qc->tag); |
2727 | ap->ops->bmdma_stop(qc); |
2728 | bmdma_stopped = true; |
2729 | |
2730 | if (unlikely(host_stat & ATA_DMA_ERR)) { |
2731 | /* error when transferring data to/from memory */ |
2732 | qc->err_mask |= AC_ERR_HOST_BUS; |
2733 | ap->hsm_task_state = HSM_ST_ERR; |
2734 | } |
2735 | } |
2736 | |
2737 | handled = __ata_sff_port_intr(ap, qc, hsmv_on_idle: bmdma_stopped); |
2738 | |
2739 | if (unlikely(qc->err_mask) && ata_is_dma(prot: qc->tf.protocol)) |
2740 | ata_ehi_push_desc(ehi, fmt: "BMDMA stat 0x%x" , host_stat); |
2741 | |
2742 | return handled; |
2743 | } |
2744 | EXPORT_SYMBOL_GPL(ata_bmdma_port_intr); |
2745 | |
2746 | /** |
2747 | * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler |
2748 | * @irq: irq line (unused) |
2749 | * @dev_instance: pointer to our ata_host information structure |
2750 | * |
2751 | * Default interrupt handler for PCI IDE devices. Calls |
2752 | * ata_bmdma_port_intr() for each port that is not disabled. |
2753 | * |
2754 | * LOCKING: |
2755 | * Obtains host lock during operation. |
2756 | * |
2757 | * RETURNS: |
2758 | * IRQ_NONE or IRQ_HANDLED. |
2759 | */ |
2760 | irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance) |
2761 | { |
2762 | return __ata_sff_interrupt(irq, dev_instance, port_intr: ata_bmdma_port_intr); |
2763 | } |
2764 | EXPORT_SYMBOL_GPL(ata_bmdma_interrupt); |
2765 | |
2766 | /** |
2767 | * ata_bmdma_error_handler - Stock error handler for BMDMA controller |
2768 | * @ap: port to handle error for |
2769 | * |
2770 | * Stock error handler for BMDMA controller. It can handle both |
2771 | * PATA and SATA controllers. Most BMDMA controllers should be |
2772 | * able to use this EH as-is or with some added handling before |
2773 | * and after. |
2774 | * |
2775 | * LOCKING: |
2776 | * Kernel thread context (may sleep) |
2777 | */ |
2778 | void ata_bmdma_error_handler(struct ata_port *ap) |
2779 | { |
2780 | struct ata_queued_cmd *qc; |
2781 | unsigned long flags; |
2782 | bool thaw = false; |
2783 | |
2784 | qc = __ata_qc_from_tag(ap, tag: ap->link.active_tag); |
2785 | if (qc && !(qc->flags & ATA_QCFLAG_EH)) |
2786 | qc = NULL; |
2787 | |
2788 | /* reset PIO HSM and stop DMA engine */ |
2789 | spin_lock_irqsave(ap->lock, flags); |
2790 | |
2791 | if (qc && ata_is_dma(prot: qc->tf.protocol)) { |
2792 | u8 host_stat; |
2793 | |
2794 | host_stat = ap->ops->bmdma_status(ap); |
2795 | trace_ata_bmdma_status(ap, host_stat); |
2796 | |
2797 | /* BMDMA controllers indicate host bus error by |
2798 | * setting DMA_ERR bit and timing out. As it wasn't |
2799 | * really a timeout event, adjust error mask and |
2800 | * cancel frozen state. |
2801 | */ |
2802 | if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { |
2803 | qc->err_mask = AC_ERR_HOST_BUS; |
2804 | thaw = true; |
2805 | } |
2806 | |
2807 | trace_ata_bmdma_stop(ap, tf: &qc->tf, tag: qc->tag); |
2808 | ap->ops->bmdma_stop(qc); |
2809 | |
2810 | /* if we're gonna thaw, make sure IRQ is clear */ |
2811 | if (thaw) { |
2812 | ap->ops->sff_check_status(ap); |
2813 | if (ap->ops->sff_irq_clear) |
2814 | ap->ops->sff_irq_clear(ap); |
2815 | } |
2816 | } |
2817 | |
2818 | spin_unlock_irqrestore(lock: ap->lock, flags); |
2819 | |
2820 | if (thaw) |
2821 | ata_eh_thaw_port(ap); |
2822 | |
2823 | ata_sff_error_handler(ap); |
2824 | } |
2825 | EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); |
2826 | |
2827 | /** |
2828 | * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA |
2829 | * @qc: internal command to clean up |
2830 | * |
2831 | * LOCKING: |
2832 | * Kernel thread context (may sleep) |
2833 | */ |
2834 | void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) |
2835 | { |
2836 | struct ata_port *ap = qc->ap; |
2837 | unsigned long flags; |
2838 | |
2839 | if (ata_is_dma(prot: qc->tf.protocol)) { |
2840 | spin_lock_irqsave(ap->lock, flags); |
2841 | trace_ata_bmdma_stop(ap, tf: &qc->tf, tag: qc->tag); |
2842 | ap->ops->bmdma_stop(qc); |
2843 | spin_unlock_irqrestore(lock: ap->lock, flags); |
2844 | } |
2845 | } |
2846 | EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); |
2847 | |
2848 | /** |
2849 | * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. |
2850 | * @ap: Port associated with this ATA transaction. |
2851 | * |
2852 | * Clear interrupt and error flags in DMA status register. |
2853 | * |
2854 | * May be used as the irq_clear() entry in ata_port_operations. |
2855 | * |
2856 | * LOCKING: |
2857 | * spin_lock_irqsave(host lock) |
2858 | */ |
2859 | void ata_bmdma_irq_clear(struct ata_port *ap) |
2860 | { |
2861 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
2862 | |
2863 | if (!mmio) |
2864 | return; |
2865 | |
2866 | iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); |
2867 | } |
2868 | EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); |
2869 | |
2870 | /** |
2871 | * ata_bmdma_setup - Set up PCI IDE BMDMA transaction |
2872 | * @qc: Info associated with this ATA transaction. |
2873 | * |
2874 | * LOCKING: |
2875 | * spin_lock_irqsave(host lock) |
2876 | */ |
2877 | void ata_bmdma_setup(struct ata_queued_cmd *qc) |
2878 | { |
2879 | struct ata_port *ap = qc->ap; |
2880 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); |
2881 | u8 dmactl; |
2882 | |
2883 | /* load PRD table addr. */ |
2884 | mb(); /* make sure PRD table writes are visible to controller */ |
2885 | iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); |
2886 | |
2887 | /* specify data direction, triple-check start bit is clear */ |
2888 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
2889 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); |
2890 | if (!rw) |
2891 | dmactl |= ATA_DMA_WR; |
2892 | iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
2893 | |
2894 | /* issue r/w command */ |
2895 | ap->ops->sff_exec_command(ap, &qc->tf); |
2896 | } |
2897 | EXPORT_SYMBOL_GPL(ata_bmdma_setup); |
2898 | |
2899 | /** |
2900 | * ata_bmdma_start - Start a PCI IDE BMDMA transaction |
2901 | * @qc: Info associated with this ATA transaction. |
2902 | * |
2903 | * LOCKING: |
2904 | * spin_lock_irqsave(host lock) |
2905 | */ |
2906 | void ata_bmdma_start(struct ata_queued_cmd *qc) |
2907 | { |
2908 | struct ata_port *ap = qc->ap; |
2909 | u8 dmactl; |
2910 | |
2911 | /* start host DMA transaction */ |
2912 | dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
2913 | iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); |
2914 | |
2915 | /* Strictly, one may wish to issue an ioread8() here, to |
2916 | * flush the mmio write. However, control also passes |
2917 | * to the hardware at this point, and it will interrupt |
2918 | * us when we are to resume control. So, in effect, |
2919 | * we don't care when the mmio write flushes. |
2920 | * Further, a read of the DMA status register _immediately_ |
2921 | * following the write may not be what certain flaky hardware |
2922 | * is expected, so I think it is best to not add a readb() |
2923 | * without first all the MMIO ATA cards/mobos. |
2924 | * Or maybe I'm just being paranoid. |
2925 | * |
2926 | * FIXME: The posting of this write means I/O starts are |
2927 | * unnecessarily delayed for MMIO |
2928 | */ |
2929 | } |
2930 | EXPORT_SYMBOL_GPL(ata_bmdma_start); |
2931 | |
2932 | /** |
2933 | * ata_bmdma_stop - Stop PCI IDE BMDMA transfer |
2934 | * @qc: Command we are ending DMA for |
2935 | * |
2936 | * Clears the ATA_DMA_START flag in the dma control register |
2937 | * |
2938 | * May be used as the bmdma_stop() entry in ata_port_operations. |
2939 | * |
2940 | * LOCKING: |
2941 | * spin_lock_irqsave(host lock) |
2942 | */ |
2943 | void ata_bmdma_stop(struct ata_queued_cmd *qc) |
2944 | { |
2945 | struct ata_port *ap = qc->ap; |
2946 | void __iomem *mmio = ap->ioaddr.bmdma_addr; |
2947 | |
2948 | /* clear start/stop bit */ |
2949 | iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, |
2950 | mmio + ATA_DMA_CMD); |
2951 | |
2952 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
2953 | ata_sff_dma_pause(ap); |
2954 | } |
2955 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); |
2956 | |
2957 | /** |
2958 | * ata_bmdma_status - Read PCI IDE BMDMA status |
2959 | * @ap: Port associated with this ATA transaction. |
2960 | * |
2961 | * Read and return BMDMA status register. |
2962 | * |
2963 | * May be used as the bmdma_status() entry in ata_port_operations. |
2964 | * |
2965 | * LOCKING: |
2966 | * spin_lock_irqsave(host lock) |
2967 | */ |
2968 | u8 ata_bmdma_status(struct ata_port *ap) |
2969 | { |
2970 | return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); |
2971 | } |
2972 | EXPORT_SYMBOL_GPL(ata_bmdma_status); |
2973 | |
2974 | |
2975 | /** |
2976 | * ata_bmdma_port_start - Set port up for bmdma. |
2977 | * @ap: Port to initialize |
2978 | * |
2979 | * Called just after data structures for each port are |
2980 | * initialized. Allocates space for PRD table. |
2981 | * |
2982 | * May be used as the port_start() entry in ata_port_operations. |
2983 | * |
2984 | * LOCKING: |
2985 | * Inherited from caller. |
2986 | */ |
2987 | int ata_bmdma_port_start(struct ata_port *ap) |
2988 | { |
2989 | if (ap->mwdma_mask || ap->udma_mask) { |
2990 | ap->bmdma_prd = |
2991 | dmam_alloc_coherent(dev: ap->host->dev, size: ATA_PRD_TBL_SZ, |
2992 | dma_handle: &ap->bmdma_prd_dma, GFP_KERNEL); |
2993 | if (!ap->bmdma_prd) |
2994 | return -ENOMEM; |
2995 | } |
2996 | |
2997 | return 0; |
2998 | } |
2999 | EXPORT_SYMBOL_GPL(ata_bmdma_port_start); |
3000 | |
3001 | /** |
3002 | * ata_bmdma_port_start32 - Set port up for dma. |
3003 | * @ap: Port to initialize |
3004 | * |
3005 | * Called just after data structures for each port are |
3006 | * initialized. Enables 32bit PIO and allocates space for PRD |
3007 | * table. |
3008 | * |
3009 | * May be used as the port_start() entry in ata_port_operations for |
3010 | * devices that are capable of 32bit PIO. |
3011 | * |
3012 | * LOCKING: |
3013 | * Inherited from caller. |
3014 | */ |
3015 | int ata_bmdma_port_start32(struct ata_port *ap) |
3016 | { |
3017 | ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; |
3018 | return ata_bmdma_port_start(ap); |
3019 | } |
3020 | EXPORT_SYMBOL_GPL(ata_bmdma_port_start32); |
3021 | |
3022 | #ifdef CONFIG_PCI |
3023 | |
3024 | /** |
3025 | * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex |
3026 | * @pdev: PCI device |
3027 | * |
3028 | * Some PCI ATA devices report simplex mode but in fact can be told to |
3029 | * enter non simplex mode. This implements the necessary logic to |
3030 | * perform the task on such devices. Calling it on other devices will |
3031 | * have -undefined- behaviour. |
3032 | */ |
3033 | int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) |
3034 | { |
3035 | unsigned long bmdma = pci_resource_start(pdev, 4); |
3036 | u8 simplex; |
3037 | |
3038 | if (bmdma == 0) |
3039 | return -ENOENT; |
3040 | |
3041 | simplex = inb(port: bmdma + 0x02); |
3042 | outb(value: simplex & 0x60, port: bmdma + 0x02); |
3043 | simplex = inb(port: bmdma + 0x02); |
3044 | if (simplex & 0x80) |
3045 | return -EOPNOTSUPP; |
3046 | return 0; |
3047 | } |
3048 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); |
3049 | |
3050 | static void ata_bmdma_nodma(struct ata_host *host, const char *reason) |
3051 | { |
3052 | int i; |
3053 | |
3054 | dev_err(host->dev, "BMDMA: %s, falling back to PIO\n" , reason); |
3055 | |
3056 | for (i = 0; i < 2; i++) { |
3057 | host->ports[i]->mwdma_mask = 0; |
3058 | host->ports[i]->udma_mask = 0; |
3059 | } |
3060 | } |
3061 | |
3062 | /** |
3063 | * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host |
3064 | * @host: target ATA host |
3065 | * |
3066 | * Acquire PCI BMDMA resources and initialize @host accordingly. |
3067 | * |
3068 | * LOCKING: |
3069 | * Inherited from calling layer (may sleep). |
3070 | */ |
3071 | void ata_pci_bmdma_init(struct ata_host *host) |
3072 | { |
3073 | struct device *gdev = host->dev; |
3074 | struct pci_dev *pdev = to_pci_dev(gdev); |
3075 | int i, rc; |
3076 | |
3077 | /* No BAR4 allocation: No DMA */ |
3078 | if (pci_resource_start(pdev, 4) == 0) { |
3079 | ata_bmdma_nodma(host, reason: "BAR4 is zero" ); |
3080 | return; |
3081 | } |
3082 | |
3083 | /* |
3084 | * Some controllers require BMDMA region to be initialized |
3085 | * even if DMA is not in use to clear IRQ status via |
3086 | * ->sff_irq_clear method. Try to initialize bmdma_addr |
3087 | * regardless of dma masks. |
3088 | */ |
3089 | rc = dma_set_mask_and_coherent(dev: &pdev->dev, ATA_DMA_MASK); |
3090 | if (rc) |
3091 | ata_bmdma_nodma(host, reason: "failed to set dma mask" ); |
3092 | |
3093 | /* request and iomap DMA region */ |
3094 | rc = pcim_iomap_regions(pdev, mask: 1 << 4, name: dev_driver_string(dev: gdev)); |
3095 | if (rc) { |
3096 | ata_bmdma_nodma(host, reason: "failed to request/iomap BAR4" ); |
3097 | return; |
3098 | } |
3099 | host->iomap = pcim_iomap_table(pdev); |
3100 | |
3101 | for (i = 0; i < 2; i++) { |
3102 | struct ata_port *ap = host->ports[i]; |
3103 | void __iomem *bmdma = host->iomap[4] + 8 * i; |
3104 | |
3105 | if (ata_port_is_dummy(ap)) |
3106 | continue; |
3107 | |
3108 | ap->ioaddr.bmdma_addr = bmdma; |
3109 | if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) && |
3110 | (ioread8(bmdma + 2) & 0x80)) |
3111 | host->flags |= ATA_HOST_SIMPLEX; |
3112 | |
3113 | ata_port_desc(ap, fmt: "bmdma 0x%llx" , |
3114 | (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); |
3115 | } |
3116 | } |
3117 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); |
3118 | |
3119 | /** |
3120 | * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host |
3121 | * @pdev: target PCI device |
3122 | * @ppi: array of port_info, must be enough for two ports |
3123 | * @r_host: out argument for the initialized ATA host |
3124 | * |
3125 | * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI |
3126 | * resources and initialize it accordingly in one go. |
3127 | * |
3128 | * LOCKING: |
3129 | * Inherited from calling layer (may sleep). |
3130 | * |
3131 | * RETURNS: |
3132 | * 0 on success, -errno otherwise. |
3133 | */ |
3134 | int ata_pci_bmdma_prepare_host(struct pci_dev *pdev, |
3135 | const struct ata_port_info * const * ppi, |
3136 | struct ata_host **r_host) |
3137 | { |
3138 | int rc; |
3139 | |
3140 | rc = ata_pci_sff_prepare_host(pdev, ppi, r_host); |
3141 | if (rc) |
3142 | return rc; |
3143 | |
3144 | ata_pci_bmdma_init(*r_host); |
3145 | return 0; |
3146 | } |
3147 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host); |
3148 | |
3149 | /** |
3150 | * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller |
3151 | * @pdev: Controller to be initialized |
3152 | * @ppi: array of port_info, must be enough for two ports |
3153 | * @sht: scsi_host_template to use when registering the host |
3154 | * @host_priv: host private_data |
3155 | * @hflags: host flags |
3156 | * |
3157 | * This function is similar to ata_pci_sff_init_one() but also |
3158 | * takes care of BMDMA initialization. |
3159 | * |
3160 | * LOCKING: |
3161 | * Inherited from PCI layer (may sleep). |
3162 | * |
3163 | * RETURNS: |
3164 | * Zero on success, negative on errno-based value on error. |
3165 | */ |
3166 | int ata_pci_bmdma_init_one(struct pci_dev *pdev, |
3167 | const struct ata_port_info * const * ppi, |
3168 | const struct scsi_host_template *sht, void *host_priv, |
3169 | int hflags) |
3170 | { |
3171 | return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, bmdma: 1); |
3172 | } |
3173 | EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one); |
3174 | |
3175 | #endif /* CONFIG_PCI */ |
3176 | #endif /* CONFIG_ATA_BMDMA */ |
3177 | |
3178 | /** |
3179 | * ata_sff_port_init - Initialize SFF/BMDMA ATA port |
3180 | * @ap: Port to initialize |
3181 | * |
3182 | * Called on port allocation to initialize SFF/BMDMA specific |
3183 | * fields. |
3184 | * |
3185 | * LOCKING: |
3186 | * None. |
3187 | */ |
3188 | void ata_sff_port_init(struct ata_port *ap) |
3189 | { |
3190 | INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task); |
3191 | ap->ctl = ATA_DEVCTL_OBS; |
3192 | ap->last_ctl = 0xFF; |
3193 | } |
3194 | |
3195 | int __init ata_sff_init(void) |
3196 | { |
3197 | ata_sff_wq = alloc_workqueue(fmt: "ata_sff" , flags: WQ_MEM_RECLAIM, max_active: WQ_MAX_ACTIVE); |
3198 | if (!ata_sff_wq) |
3199 | return -ENOMEM; |
3200 | |
3201 | return 0; |
3202 | } |
3203 | |
3204 | void ata_sff_exit(void) |
3205 | { |
3206 | destroy_workqueue(wq: ata_sff_wq); |
3207 | } |
3208 | |