1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright © 2009 - Maxim Levitsky |
4 | * driver for Ricoh xD readers |
5 | */ |
6 | |
7 | #define DRV_NAME "r852" |
8 | #define pr_fmt(fmt) DRV_NAME ": " fmt |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> |
12 | #include <linux/jiffies.h> |
13 | #include <linux/workqueue.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/pci_ids.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/slab.h> |
19 | #include <asm/byteorder.h> |
20 | #include <linux/sched.h> |
21 | #include "sm_common.h" |
22 | #include "r852.h" |
23 | |
24 | |
25 | static bool r852_enable_dma = 1; |
26 | module_param(r852_enable_dma, bool, S_IRUGO); |
27 | MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)" ); |
28 | |
29 | static int debug; |
30 | module_param(debug, int, S_IRUGO | S_IWUSR); |
31 | MODULE_PARM_DESC(debug, "Debug level (0-2)" ); |
32 | |
33 | /* read register */ |
34 | static inline uint8_t r852_read_reg(struct r852_device *dev, int address) |
35 | { |
36 | uint8_t reg = readb(addr: dev->mmio + address); |
37 | return reg; |
38 | } |
39 | |
40 | /* write register */ |
41 | static inline void r852_write_reg(struct r852_device *dev, |
42 | int address, uint8_t value) |
43 | { |
44 | writeb(val: value, addr: dev->mmio + address); |
45 | } |
46 | |
47 | |
48 | /* read dword sized register */ |
49 | static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) |
50 | { |
51 | uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); |
52 | return reg; |
53 | } |
54 | |
55 | /* write dword sized register */ |
56 | static inline void r852_write_reg_dword(struct r852_device *dev, |
57 | int address, uint32_t value) |
58 | { |
59 | writel(cpu_to_le32(value), addr: dev->mmio + address); |
60 | } |
61 | |
62 | /* returns pointer to our private structure */ |
63 | static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) |
64 | { |
65 | struct nand_chip *chip = mtd_to_nand(mtd); |
66 | return nand_get_controller_data(chip); |
67 | } |
68 | |
69 | |
70 | /* check if controller supports dma */ |
71 | static void r852_dma_test(struct r852_device *dev) |
72 | { |
73 | dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & |
74 | (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); |
75 | |
76 | if (!dev->dma_usable) |
77 | message("Non dma capable device detected, dma disabled" ); |
78 | |
79 | if (!r852_enable_dma) { |
80 | message("disabling dma on user request" ); |
81 | dev->dma_usable = 0; |
82 | } |
83 | } |
84 | |
85 | /* |
86 | * Enable dma. Enables ether first or second stage of the DMA, |
87 | * Expects dev->dma_dir and dev->dma_state be set |
88 | */ |
89 | static void r852_dma_enable(struct r852_device *dev) |
90 | { |
91 | uint8_t dma_reg, dma_irq_reg; |
92 | |
93 | /* Set up dma settings */ |
94 | dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); |
95 | dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); |
96 | |
97 | if (dev->dma_dir) |
98 | dma_reg |= R852_DMA_READ; |
99 | |
100 | if (dev->dma_state == DMA_INTERNAL) { |
101 | dma_reg |= R852_DMA_INTERNAL; |
102 | /* Precaution to make sure HW doesn't write */ |
103 | /* to random kernel memory */ |
104 | r852_write_reg_dword(dev, R852_DMA_ADDR, |
105 | cpu_to_le32(dev->phys_bounce_buffer)); |
106 | } else { |
107 | dma_reg |= R852_DMA_MEMORY; |
108 | r852_write_reg_dword(dev, R852_DMA_ADDR, |
109 | cpu_to_le32(dev->phys_dma_addr)); |
110 | } |
111 | |
112 | /* Precaution: make sure write reached the device */ |
113 | r852_read_reg_dword(dev, R852_DMA_ADDR); |
114 | |
115 | r852_write_reg_dword(dev, R852_DMA_SETTINGS, value: dma_reg); |
116 | |
117 | /* Set dma irq */ |
118 | dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); |
119 | r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, |
120 | value: dma_irq_reg | |
121 | R852_DMA_IRQ_INTERNAL | |
122 | R852_DMA_IRQ_ERROR | |
123 | R852_DMA_IRQ_MEMORY); |
124 | } |
125 | |
126 | /* |
127 | * Disable dma, called from the interrupt handler, which specifies |
128 | * success of the operation via 'error' argument |
129 | */ |
130 | static void r852_dma_done(struct r852_device *dev, int error) |
131 | { |
132 | WARN_ON(dev->dma_stage == 0); |
133 | |
134 | r852_write_reg_dword(dev, R852_DMA_IRQ_STA, |
135 | value: r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); |
136 | |
137 | r852_write_reg_dword(dev, R852_DMA_SETTINGS, value: 0); |
138 | r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, value: 0); |
139 | |
140 | /* Precaution to make sure HW doesn't write to random kernel memory */ |
141 | r852_write_reg_dword(dev, R852_DMA_ADDR, |
142 | cpu_to_le32(dev->phys_bounce_buffer)); |
143 | r852_read_reg_dword(dev, R852_DMA_ADDR); |
144 | |
145 | dev->dma_error = error; |
146 | dev->dma_stage = 0; |
147 | |
148 | if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) |
149 | dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr, |
150 | R852_DMA_LEN, |
151 | dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
152 | } |
153 | |
154 | /* |
155 | * Wait, till dma is done, which includes both phases of it |
156 | */ |
157 | static int r852_dma_wait(struct r852_device *dev) |
158 | { |
159 | long timeout = wait_for_completion_timeout(x: &dev->dma_done, |
160 | timeout: msecs_to_jiffies(m: 1000)); |
161 | if (!timeout) { |
162 | dbg("timeout waiting for DMA interrupt" ); |
163 | return -ETIMEDOUT; |
164 | } |
165 | |
166 | return 0; |
167 | } |
168 | |
169 | /* |
170 | * Read/Write one page using dma. Only pages can be read (512 bytes) |
171 | */ |
172 | static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) |
173 | { |
174 | int bounce = 0; |
175 | unsigned long flags; |
176 | int error; |
177 | |
178 | dev->dma_error = 0; |
179 | |
180 | /* Set dma direction */ |
181 | dev->dma_dir = do_read; |
182 | dev->dma_stage = 1; |
183 | reinit_completion(x: &dev->dma_done); |
184 | |
185 | dbg_verbose("doing dma %s " , do_read ? "read" : "write" ); |
186 | |
187 | /* Set initial dma state: for reading first fill on board buffer, |
188 | from device, for writes first fill the buffer from memory*/ |
189 | dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; |
190 | |
191 | /* if incoming buffer is not page aligned, we should do bounce */ |
192 | if ((unsigned long)buf & (R852_DMA_LEN-1)) |
193 | bounce = 1; |
194 | |
195 | if (!bounce) { |
196 | dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf, |
197 | R852_DMA_LEN, |
198 | do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
199 | if (dma_mapping_error(dev: &dev->pci_dev->dev, dma_addr: dev->phys_dma_addr)) |
200 | bounce = 1; |
201 | } |
202 | |
203 | if (bounce) { |
204 | dbg_verbose("dma: using bounce buffer" ); |
205 | dev->phys_dma_addr = dev->phys_bounce_buffer; |
206 | if (!do_read) |
207 | memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); |
208 | } |
209 | |
210 | /* Enable DMA */ |
211 | spin_lock_irqsave(&dev->irqlock, flags); |
212 | r852_dma_enable(dev); |
213 | spin_unlock_irqrestore(lock: &dev->irqlock, flags); |
214 | |
215 | /* Wait till complete */ |
216 | error = r852_dma_wait(dev); |
217 | |
218 | if (error) { |
219 | r852_dma_done(dev, error); |
220 | return; |
221 | } |
222 | |
223 | if (do_read && bounce) |
224 | memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); |
225 | } |
226 | |
227 | /* |
228 | * Program data lines of the nand chip to send data to it |
229 | */ |
230 | static void r852_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) |
231 | { |
232 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
233 | uint32_t reg; |
234 | |
235 | /* Don't allow any access to hardware if we suspect card removal */ |
236 | if (dev->card_unstable) |
237 | return; |
238 | |
239 | /* Special case for whole sector read */ |
240 | if (len == R852_DMA_LEN && dev->dma_usable) { |
241 | r852_do_dma(dev, buf: (uint8_t *)buf, do_read: 0); |
242 | return; |
243 | } |
244 | |
245 | /* write DWORD chinks - faster */ |
246 | while (len >= 4) { |
247 | reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; |
248 | r852_write_reg_dword(dev, R852_DATALINE, value: reg); |
249 | buf += 4; |
250 | len -= 4; |
251 | |
252 | } |
253 | |
254 | /* write rest */ |
255 | while (len > 0) { |
256 | r852_write_reg(dev, R852_DATALINE, value: *buf++); |
257 | len--; |
258 | } |
259 | } |
260 | |
261 | /* |
262 | * Read data lines of the nand chip to retrieve data |
263 | */ |
264 | static void r852_read_buf(struct nand_chip *chip, uint8_t *buf, int len) |
265 | { |
266 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
267 | uint32_t reg; |
268 | |
269 | if (dev->card_unstable) { |
270 | /* since we can't signal error here, at least, return |
271 | predictable buffer */ |
272 | memset(buf, 0, len); |
273 | return; |
274 | } |
275 | |
276 | /* special case for whole sector read */ |
277 | if (len == R852_DMA_LEN && dev->dma_usable) { |
278 | r852_do_dma(dev, buf, do_read: 1); |
279 | return; |
280 | } |
281 | |
282 | /* read in dword sized chunks */ |
283 | while (len >= 4) { |
284 | |
285 | reg = r852_read_reg_dword(dev, R852_DATALINE); |
286 | *buf++ = reg & 0xFF; |
287 | *buf++ = (reg >> 8) & 0xFF; |
288 | *buf++ = (reg >> 16) & 0xFF; |
289 | *buf++ = (reg >> 24) & 0xFF; |
290 | len -= 4; |
291 | } |
292 | |
293 | /* read the reset by bytes */ |
294 | while (len--) |
295 | *buf++ = r852_read_reg(dev, R852_DATALINE); |
296 | } |
297 | |
298 | /* |
299 | * Read one byte from nand chip |
300 | */ |
301 | static uint8_t r852_read_byte(struct nand_chip *chip) |
302 | { |
303 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
304 | |
305 | /* Same problem as in r852_read_buf.... */ |
306 | if (dev->card_unstable) |
307 | return 0; |
308 | |
309 | return r852_read_reg(dev, R852_DATALINE); |
310 | } |
311 | |
312 | /* |
313 | * Control several chip lines & send commands |
314 | */ |
315 | static void r852_cmdctl(struct nand_chip *chip, int dat, unsigned int ctrl) |
316 | { |
317 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
318 | |
319 | if (dev->card_unstable) |
320 | return; |
321 | |
322 | if (ctrl & NAND_CTRL_CHANGE) { |
323 | |
324 | dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | |
325 | R852_CTL_ON | R852_CTL_CARDENABLE); |
326 | |
327 | if (ctrl & NAND_ALE) |
328 | dev->ctlreg |= R852_CTL_DATA; |
329 | |
330 | if (ctrl & NAND_CLE) |
331 | dev->ctlreg |= R852_CTL_COMMAND; |
332 | |
333 | if (ctrl & NAND_NCE) |
334 | dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); |
335 | else |
336 | dev->ctlreg &= ~R852_CTL_WRITE; |
337 | |
338 | /* when write is stareted, enable write access */ |
339 | if (dat == NAND_CMD_ERASE1) |
340 | dev->ctlreg |= R852_CTL_WRITE; |
341 | |
342 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg); |
343 | } |
344 | |
345 | /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need |
346 | to set write mode */ |
347 | if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { |
348 | dev->ctlreg |= R852_CTL_WRITE; |
349 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg); |
350 | } |
351 | |
352 | if (dat != NAND_CMD_NONE) |
353 | r852_write_reg(dev, R852_DATALINE, value: dat); |
354 | } |
355 | |
356 | /* |
357 | * Wait till card is ready. |
358 | * based on nand_wait, but returns errors on DMA error |
359 | */ |
360 | static int r852_wait(struct nand_chip *chip) |
361 | { |
362 | struct r852_device *dev = nand_get_controller_data(chip); |
363 | |
364 | unsigned long timeout; |
365 | u8 status; |
366 | |
367 | timeout = jiffies + msecs_to_jiffies(m: 400); |
368 | |
369 | while (time_before(jiffies, timeout)) |
370 | if (chip->legacy.dev_ready(chip)) |
371 | break; |
372 | |
373 | nand_status_op(chip, status: &status); |
374 | |
375 | /* Unfortunelly, no way to send detailed error status... */ |
376 | if (dev->dma_error) { |
377 | status |= NAND_STATUS_FAIL; |
378 | dev->dma_error = 0; |
379 | } |
380 | return status; |
381 | } |
382 | |
383 | /* |
384 | * Check if card is ready |
385 | */ |
386 | |
387 | static int r852_ready(struct nand_chip *chip) |
388 | { |
389 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
390 | return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); |
391 | } |
392 | |
393 | |
394 | /* |
395 | * Set ECC engine mode |
396 | */ |
397 | |
398 | static void r852_ecc_hwctl(struct nand_chip *chip, int mode) |
399 | { |
400 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
401 | |
402 | if (dev->card_unstable) |
403 | return; |
404 | |
405 | switch (mode) { |
406 | case NAND_ECC_READ: |
407 | case NAND_ECC_WRITE: |
408 | /* enable ecc generation/check*/ |
409 | dev->ctlreg |= R852_CTL_ECC_ENABLE; |
410 | |
411 | /* flush ecc buffer */ |
412 | r852_write_reg(dev, R852_CTL, |
413 | value: dev->ctlreg | R852_CTL_ECC_ACCESS); |
414 | |
415 | r852_read_reg_dword(dev, R852_DATALINE); |
416 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg); |
417 | return; |
418 | |
419 | case NAND_ECC_READSYN: |
420 | /* disable ecc generation */ |
421 | dev->ctlreg &= ~R852_CTL_ECC_ENABLE; |
422 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg); |
423 | } |
424 | } |
425 | |
426 | /* |
427 | * Calculate ECC, only used for writes |
428 | */ |
429 | |
430 | static int r852_ecc_calculate(struct nand_chip *chip, const uint8_t *dat, |
431 | uint8_t *ecc_code) |
432 | { |
433 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
434 | struct sm_oob *oob = (struct sm_oob *)ecc_code; |
435 | uint32_t ecc1, ecc2; |
436 | |
437 | if (dev->card_unstable) |
438 | return 0; |
439 | |
440 | dev->ctlreg &= ~R852_CTL_ECC_ENABLE; |
441 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg | R852_CTL_ECC_ACCESS); |
442 | |
443 | ecc1 = r852_read_reg_dword(dev, R852_DATALINE); |
444 | ecc2 = r852_read_reg_dword(dev, R852_DATALINE); |
445 | |
446 | oob->ecc1[0] = (ecc1) & 0xFF; |
447 | oob->ecc1[1] = (ecc1 >> 8) & 0xFF; |
448 | oob->ecc1[2] = (ecc1 >> 16) & 0xFF; |
449 | |
450 | oob->ecc2[0] = (ecc2) & 0xFF; |
451 | oob->ecc2[1] = (ecc2 >> 8) & 0xFF; |
452 | oob->ecc2[2] = (ecc2 >> 16) & 0xFF; |
453 | |
454 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg); |
455 | return 0; |
456 | } |
457 | |
458 | /* |
459 | * Correct the data using ECC, hw did almost everything for us |
460 | */ |
461 | |
462 | static int r852_ecc_correct(struct nand_chip *chip, uint8_t *dat, |
463 | uint8_t *read_ecc, uint8_t *calc_ecc) |
464 | { |
465 | uint32_t ecc_reg; |
466 | uint8_t ecc_status, err_byte; |
467 | int i, error = 0; |
468 | |
469 | struct r852_device *dev = r852_get_dev(mtd: nand_to_mtd(chip)); |
470 | |
471 | if (dev->card_unstable) |
472 | return 0; |
473 | |
474 | if (dev->dma_error) { |
475 | dev->dma_error = 0; |
476 | return -EIO; |
477 | } |
478 | |
479 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg | R852_CTL_ECC_ACCESS); |
480 | ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); |
481 | r852_write_reg(dev, R852_CTL, value: dev->ctlreg); |
482 | |
483 | for (i = 0 ; i <= 1 ; i++) { |
484 | |
485 | ecc_status = (ecc_reg >> 8) & 0xFF; |
486 | |
487 | /* ecc uncorrectable error */ |
488 | if (ecc_status & R852_ECC_FAIL) { |
489 | dbg("ecc: unrecoverable error, in half %d" , i); |
490 | error = -EBADMSG; |
491 | goto exit; |
492 | } |
493 | |
494 | /* correctable error */ |
495 | if (ecc_status & R852_ECC_CORRECTABLE) { |
496 | |
497 | err_byte = ecc_reg & 0xFF; |
498 | dbg("ecc: recoverable error, " |
499 | "in half %d, byte %d, bit %d" , i, |
500 | err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); |
501 | |
502 | dat[err_byte] ^= |
503 | 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); |
504 | error++; |
505 | } |
506 | |
507 | dat += 256; |
508 | ecc_reg >>= 16; |
509 | } |
510 | exit: |
511 | return error; |
512 | } |
513 | |
514 | /* |
515 | * This is copy of nand_read_oob_std |
516 | * nand_read_oob_syndrome assumes we can send column address - we can't |
517 | */ |
518 | static int r852_read_oob(struct nand_chip *chip, int page) |
519 | { |
520 | struct mtd_info *mtd = nand_to_mtd(chip); |
521 | |
522 | return nand_read_oob_op(chip, page, offset_in_page: 0, buf: chip->oob_poi, len: mtd->oobsize); |
523 | } |
524 | |
525 | /* |
526 | * Start the nand engine |
527 | */ |
528 | |
529 | static void r852_engine_enable(struct r852_device *dev) |
530 | { |
531 | if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { |
532 | r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); |
533 | r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); |
534 | } else { |
535 | r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); |
536 | r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); |
537 | } |
538 | msleep(msecs: 300); |
539 | r852_write_reg(dev, R852_CTL, value: 0); |
540 | } |
541 | |
542 | |
543 | /* |
544 | * Stop the nand engine |
545 | */ |
546 | |
547 | static void r852_engine_disable(struct r852_device *dev) |
548 | { |
549 | r852_write_reg_dword(dev, R852_HW, value: 0); |
550 | r852_write_reg(dev, R852_CTL, R852_CTL_RESET); |
551 | } |
552 | |
553 | /* |
554 | * Test if card is present |
555 | */ |
556 | |
557 | static void r852_card_update_present(struct r852_device *dev) |
558 | { |
559 | unsigned long flags; |
560 | uint8_t reg; |
561 | |
562 | spin_lock_irqsave(&dev->irqlock, flags); |
563 | reg = r852_read_reg(dev, R852_CARD_STA); |
564 | dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); |
565 | spin_unlock_irqrestore(lock: &dev->irqlock, flags); |
566 | } |
567 | |
568 | /* |
569 | * Update card detection IRQ state according to current card state |
570 | * which is read in r852_card_update_present |
571 | */ |
572 | static void r852_update_card_detect(struct r852_device *dev) |
573 | { |
574 | int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); |
575 | dev->card_unstable = 0; |
576 | |
577 | card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); |
578 | card_detect_reg |= R852_CARD_IRQ_GENABLE; |
579 | |
580 | card_detect_reg |= dev->card_detected ? |
581 | R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; |
582 | |
583 | r852_write_reg(dev, R852_CARD_IRQ_ENABLE, value: card_detect_reg); |
584 | } |
585 | |
586 | static ssize_t media_type_show(struct device *sys_dev, |
587 | struct device_attribute *attr, char *buf) |
588 | { |
589 | struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); |
590 | struct r852_device *dev = r852_get_dev(mtd); |
591 | char *data = dev->sm ? "smartmedia" : "xd" ; |
592 | |
593 | strcpy(p: buf, q: data); |
594 | return strlen(data); |
595 | } |
596 | static DEVICE_ATTR_RO(media_type); |
597 | |
598 | |
599 | /* Detect properties of card in slot */ |
600 | static void r852_update_media_status(struct r852_device *dev) |
601 | { |
602 | uint8_t reg; |
603 | unsigned long flags; |
604 | int readonly; |
605 | |
606 | spin_lock_irqsave(&dev->irqlock, flags); |
607 | if (!dev->card_detected) { |
608 | message("card removed" ); |
609 | spin_unlock_irqrestore(lock: &dev->irqlock, flags); |
610 | return ; |
611 | } |
612 | |
613 | readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; |
614 | reg = r852_read_reg(dev, R852_DMA_CAP); |
615 | dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); |
616 | |
617 | message("detected %s %s card in slot" , |
618 | dev->sm ? "SmartMedia" : "xD" , |
619 | readonly ? "readonly" : "writeable" ); |
620 | |
621 | dev->readonly = readonly; |
622 | spin_unlock_irqrestore(lock: &dev->irqlock, flags); |
623 | } |
624 | |
625 | /* |
626 | * Register the nand device |
627 | * Called when the card is detected |
628 | */ |
629 | static int r852_register_nand_device(struct r852_device *dev) |
630 | { |
631 | struct mtd_info *mtd = nand_to_mtd(chip: dev->chip); |
632 | |
633 | WARN_ON(dev->card_registered); |
634 | |
635 | mtd->dev.parent = &dev->pci_dev->dev; |
636 | |
637 | if (dev->readonly) |
638 | dev->chip->options |= NAND_ROM; |
639 | |
640 | r852_engine_enable(dev); |
641 | |
642 | if (sm_register_device(mtd, smartmedia: dev->sm)) |
643 | goto error1; |
644 | |
645 | if (device_create_file(device: &mtd->dev, entry: &dev_attr_media_type)) { |
646 | message("can't create media type sysfs attribute" ); |
647 | goto error3; |
648 | } |
649 | |
650 | dev->card_registered = 1; |
651 | return 0; |
652 | error3: |
653 | WARN_ON(mtd_device_unregister(nand_to_mtd(dev->chip))); |
654 | nand_cleanup(chip: dev->chip); |
655 | error1: |
656 | /* Force card redetect */ |
657 | dev->card_detected = 0; |
658 | return -1; |
659 | } |
660 | |
661 | /* |
662 | * Unregister the card |
663 | */ |
664 | |
665 | static void r852_unregister_nand_device(struct r852_device *dev) |
666 | { |
667 | struct mtd_info *mtd = nand_to_mtd(chip: dev->chip); |
668 | |
669 | if (!dev->card_registered) |
670 | return; |
671 | |
672 | device_remove_file(dev: &mtd->dev, attr: &dev_attr_media_type); |
673 | WARN_ON(mtd_device_unregister(mtd)); |
674 | nand_cleanup(chip: dev->chip); |
675 | r852_engine_disable(dev); |
676 | dev->card_registered = 0; |
677 | } |
678 | |
679 | /* Card state updater */ |
680 | static void r852_card_detect_work(struct work_struct *work) |
681 | { |
682 | struct r852_device *dev = |
683 | container_of(work, struct r852_device, card_detect_work.work); |
684 | |
685 | r852_card_update_present(dev); |
686 | r852_update_card_detect(dev); |
687 | dev->card_unstable = 0; |
688 | |
689 | /* False alarm */ |
690 | if (dev->card_detected == dev->card_registered) |
691 | goto exit; |
692 | |
693 | /* Read media properties */ |
694 | r852_update_media_status(dev); |
695 | |
696 | /* Register the card */ |
697 | if (dev->card_detected) |
698 | r852_register_nand_device(dev); |
699 | else |
700 | r852_unregister_nand_device(dev); |
701 | exit: |
702 | r852_update_card_detect(dev); |
703 | } |
704 | |
705 | /* Ack + disable IRQ generation */ |
706 | static void r852_disable_irqs(struct r852_device *dev) |
707 | { |
708 | uint8_t reg; |
709 | reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); |
710 | r852_write_reg(dev, R852_CARD_IRQ_ENABLE, value: reg & ~R852_CARD_IRQ_MASK); |
711 | |
712 | reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); |
713 | r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, |
714 | value: reg & ~R852_DMA_IRQ_MASK); |
715 | |
716 | r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); |
717 | r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); |
718 | } |
719 | |
720 | /* Interrupt handler */ |
721 | static irqreturn_t r852_irq(int irq, void *data) |
722 | { |
723 | struct r852_device *dev = (struct r852_device *)data; |
724 | |
725 | uint8_t card_status, dma_status; |
726 | irqreturn_t ret = IRQ_NONE; |
727 | |
728 | spin_lock(lock: &dev->irqlock); |
729 | |
730 | /* handle card detection interrupts first */ |
731 | card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); |
732 | r852_write_reg(dev, R852_CARD_IRQ_STA, value: card_status); |
733 | |
734 | if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { |
735 | |
736 | ret = IRQ_HANDLED; |
737 | dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); |
738 | |
739 | /* we shouldn't receive any interrupts if we wait for card |
740 | to settle */ |
741 | WARN_ON(dev->card_unstable); |
742 | |
743 | /* disable irqs while card is unstable */ |
744 | /* this will timeout DMA if active, but better that garbage */ |
745 | r852_disable_irqs(dev); |
746 | |
747 | if (dev->card_unstable) |
748 | goto out; |
749 | |
750 | /* let, card state to settle a bit, and then do the work */ |
751 | dev->card_unstable = 1; |
752 | queue_delayed_work(wq: dev->card_workqueue, |
753 | dwork: &dev->card_detect_work, delay: msecs_to_jiffies(m: 100)); |
754 | goto out; |
755 | } |
756 | |
757 | |
758 | /* Handle dma interrupts */ |
759 | dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); |
760 | r852_write_reg_dword(dev, R852_DMA_IRQ_STA, value: dma_status); |
761 | |
762 | if (dma_status & R852_DMA_IRQ_MASK) { |
763 | |
764 | ret = IRQ_HANDLED; |
765 | |
766 | if (dma_status & R852_DMA_IRQ_ERROR) { |
767 | dbg("received dma error IRQ" ); |
768 | r852_dma_done(dev, error: -EIO); |
769 | complete(&dev->dma_done); |
770 | goto out; |
771 | } |
772 | |
773 | /* received DMA interrupt out of nowhere? */ |
774 | WARN_ON_ONCE(dev->dma_stage == 0); |
775 | |
776 | if (dev->dma_stage == 0) |
777 | goto out; |
778 | |
779 | /* done device access */ |
780 | if (dev->dma_state == DMA_INTERNAL && |
781 | (dma_status & R852_DMA_IRQ_INTERNAL)) { |
782 | |
783 | dev->dma_state = DMA_MEMORY; |
784 | dev->dma_stage++; |
785 | } |
786 | |
787 | /* done memory DMA */ |
788 | if (dev->dma_state == DMA_MEMORY && |
789 | (dma_status & R852_DMA_IRQ_MEMORY)) { |
790 | dev->dma_state = DMA_INTERNAL; |
791 | dev->dma_stage++; |
792 | } |
793 | |
794 | /* Enable 2nd half of dma dance */ |
795 | if (dev->dma_stage == 2) |
796 | r852_dma_enable(dev); |
797 | |
798 | /* Operation done */ |
799 | if (dev->dma_stage == 3) { |
800 | r852_dma_done(dev, error: 0); |
801 | complete(&dev->dma_done); |
802 | } |
803 | goto out; |
804 | } |
805 | |
806 | /* Handle unknown interrupts */ |
807 | if (dma_status) |
808 | dbg("bad dma IRQ status = %x" , dma_status); |
809 | |
810 | if (card_status & ~R852_CARD_STA_CD) |
811 | dbg("strange card status = %x" , card_status); |
812 | |
813 | out: |
814 | spin_unlock(lock: &dev->irqlock); |
815 | return ret; |
816 | } |
817 | |
818 | static int r852_attach_chip(struct nand_chip *chip) |
819 | { |
820 | if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) |
821 | return 0; |
822 | |
823 | chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED; |
824 | chip->ecc.size = R852_DMA_LEN; |
825 | chip->ecc.bytes = SM_OOB_SIZE; |
826 | chip->ecc.strength = 2; |
827 | chip->ecc.hwctl = r852_ecc_hwctl; |
828 | chip->ecc.calculate = r852_ecc_calculate; |
829 | chip->ecc.correct = r852_ecc_correct; |
830 | |
831 | /* TODO: hack */ |
832 | chip->ecc.read_oob = r852_read_oob; |
833 | |
834 | return 0; |
835 | } |
836 | |
837 | static const struct nand_controller_ops r852_ops = { |
838 | .attach_chip = r852_attach_chip, |
839 | }; |
840 | |
841 | static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) |
842 | { |
843 | int error; |
844 | struct nand_chip *chip; |
845 | struct r852_device *dev; |
846 | |
847 | /* pci initialization */ |
848 | error = pci_enable_device(dev: pci_dev); |
849 | |
850 | if (error) |
851 | goto error1; |
852 | |
853 | pci_set_master(dev: pci_dev); |
854 | |
855 | error = dma_set_mask(dev: &pci_dev->dev, DMA_BIT_MASK(32)); |
856 | if (error) |
857 | goto error2; |
858 | |
859 | error = pci_request_regions(pci_dev, DRV_NAME); |
860 | |
861 | if (error) |
862 | goto error3; |
863 | |
864 | error = -ENOMEM; |
865 | |
866 | /* init nand chip, but register it only on card insert */ |
867 | chip = kzalloc(size: sizeof(struct nand_chip), GFP_KERNEL); |
868 | |
869 | if (!chip) |
870 | goto error4; |
871 | |
872 | /* commands */ |
873 | chip->legacy.cmd_ctrl = r852_cmdctl; |
874 | chip->legacy.waitfunc = r852_wait; |
875 | chip->legacy.dev_ready = r852_ready; |
876 | |
877 | /* I/O */ |
878 | chip->legacy.read_byte = r852_read_byte; |
879 | chip->legacy.read_buf = r852_read_buf; |
880 | chip->legacy.write_buf = r852_write_buf; |
881 | |
882 | /* init our device structure */ |
883 | dev = kzalloc(size: sizeof(struct r852_device), GFP_KERNEL); |
884 | |
885 | if (!dev) |
886 | goto error5; |
887 | |
888 | nand_set_controller_data(chip, priv: dev); |
889 | dev->chip = chip; |
890 | dev->pci_dev = pci_dev; |
891 | pci_set_drvdata(pdev: pci_dev, data: dev); |
892 | |
893 | nand_controller_init(nfc: &dev->controller); |
894 | dev->controller.ops = &r852_ops; |
895 | chip->controller = &dev->controller; |
896 | |
897 | dev->bounce_buffer = dma_alloc_coherent(dev: &pci_dev->dev, R852_DMA_LEN, |
898 | dma_handle: &dev->phys_bounce_buffer, GFP_KERNEL); |
899 | |
900 | if (!dev->bounce_buffer) |
901 | goto error6; |
902 | |
903 | |
904 | error = -ENODEV; |
905 | dev->mmio = pci_ioremap_bar(pdev: pci_dev, bar: 0); |
906 | |
907 | if (!dev->mmio) |
908 | goto error7; |
909 | |
910 | error = -ENOMEM; |
911 | dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); |
912 | |
913 | if (!dev->tmp_buffer) |
914 | goto error8; |
915 | |
916 | init_completion(x: &dev->dma_done); |
917 | |
918 | dev->card_workqueue = create_freezable_workqueue(DRV_NAME); |
919 | |
920 | if (!dev->card_workqueue) |
921 | goto error9; |
922 | |
923 | INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); |
924 | |
925 | /* shutdown everything - precation */ |
926 | r852_engine_disable(dev); |
927 | r852_disable_irqs(dev); |
928 | |
929 | r852_dma_test(dev); |
930 | |
931 | dev->irq = pci_dev->irq; |
932 | spin_lock_init(&dev->irqlock); |
933 | |
934 | dev->card_detected = 0; |
935 | r852_card_update_present(dev); |
936 | |
937 | /*register irq handler*/ |
938 | error = -ENODEV; |
939 | if (request_irq(irq: pci_dev->irq, handler: &r852_irq, IRQF_SHARED, |
940 | DRV_NAME, dev)) |
941 | goto error10; |
942 | |
943 | /* kick initial present test */ |
944 | queue_delayed_work(wq: dev->card_workqueue, |
945 | dwork: &dev->card_detect_work, delay: 0); |
946 | |
947 | |
948 | pr_notice("driver loaded successfully\n" ); |
949 | return 0; |
950 | |
951 | error10: |
952 | destroy_workqueue(wq: dev->card_workqueue); |
953 | error9: |
954 | kfree(objp: dev->tmp_buffer); |
955 | error8: |
956 | pci_iounmap(dev: pci_dev, dev->mmio); |
957 | error7: |
958 | dma_free_coherent(dev: &pci_dev->dev, R852_DMA_LEN, cpu_addr: dev->bounce_buffer, |
959 | dma_handle: dev->phys_bounce_buffer); |
960 | error6: |
961 | kfree(objp: dev); |
962 | error5: |
963 | kfree(objp: chip); |
964 | error4: |
965 | pci_release_regions(pci_dev); |
966 | error3: |
967 | error2: |
968 | pci_disable_device(dev: pci_dev); |
969 | error1: |
970 | return error; |
971 | } |
972 | |
973 | static void r852_remove(struct pci_dev *pci_dev) |
974 | { |
975 | struct r852_device *dev = pci_get_drvdata(pdev: pci_dev); |
976 | |
977 | /* Stop detect workqueue - |
978 | we are going to unregister the device anyway*/ |
979 | cancel_delayed_work_sync(dwork: &dev->card_detect_work); |
980 | destroy_workqueue(wq: dev->card_workqueue); |
981 | |
982 | /* Unregister the device, this might make more IO */ |
983 | r852_unregister_nand_device(dev); |
984 | |
985 | /* Stop interrupts */ |
986 | r852_disable_irqs(dev); |
987 | free_irq(dev->irq, dev); |
988 | |
989 | /* Cleanup */ |
990 | kfree(objp: dev->tmp_buffer); |
991 | pci_iounmap(dev: pci_dev, dev->mmio); |
992 | dma_free_coherent(dev: &pci_dev->dev, R852_DMA_LEN, cpu_addr: dev->bounce_buffer, |
993 | dma_handle: dev->phys_bounce_buffer); |
994 | |
995 | kfree(objp: dev->chip); |
996 | kfree(objp: dev); |
997 | |
998 | /* Shutdown the PCI device */ |
999 | pci_release_regions(pci_dev); |
1000 | pci_disable_device(dev: pci_dev); |
1001 | } |
1002 | |
1003 | static void r852_shutdown(struct pci_dev *pci_dev) |
1004 | { |
1005 | struct r852_device *dev = pci_get_drvdata(pdev: pci_dev); |
1006 | |
1007 | cancel_delayed_work_sync(dwork: &dev->card_detect_work); |
1008 | r852_disable_irqs(dev); |
1009 | synchronize_irq(irq: dev->irq); |
1010 | pci_disable_device(dev: pci_dev); |
1011 | } |
1012 | |
1013 | #ifdef CONFIG_PM_SLEEP |
1014 | static int r852_suspend(struct device *device) |
1015 | { |
1016 | struct r852_device *dev = dev_get_drvdata(dev: device); |
1017 | |
1018 | if (dev->ctlreg & R852_CTL_CARDENABLE) |
1019 | return -EBUSY; |
1020 | |
1021 | /* First make sure the detect work is gone */ |
1022 | cancel_delayed_work_sync(dwork: &dev->card_detect_work); |
1023 | |
1024 | /* Turn off the interrupts and stop the device */ |
1025 | r852_disable_irqs(dev); |
1026 | r852_engine_disable(dev); |
1027 | |
1028 | /* If card was pulled off just during the suspend, which is very |
1029 | unlikely, we will remove it on resume, it too late now |
1030 | anyway... */ |
1031 | dev->card_unstable = 0; |
1032 | return 0; |
1033 | } |
1034 | |
1035 | static int r852_resume(struct device *device) |
1036 | { |
1037 | struct r852_device *dev = dev_get_drvdata(dev: device); |
1038 | |
1039 | r852_disable_irqs(dev); |
1040 | r852_card_update_present(dev); |
1041 | r852_engine_disable(dev); |
1042 | |
1043 | |
1044 | /* If card status changed, just do the work */ |
1045 | if (dev->card_detected != dev->card_registered) { |
1046 | dbg("card was %s during low power state" , |
1047 | dev->card_detected ? "added" : "removed" ); |
1048 | |
1049 | queue_delayed_work(wq: dev->card_workqueue, |
1050 | dwork: &dev->card_detect_work, delay: msecs_to_jiffies(m: 1000)); |
1051 | return 0; |
1052 | } |
1053 | |
1054 | /* Otherwise, initialize the card */ |
1055 | if (dev->card_registered) { |
1056 | r852_engine_enable(dev); |
1057 | nand_select_target(chip: dev->chip, cs: 0); |
1058 | nand_reset_op(chip: dev->chip); |
1059 | nand_deselect_target(chip: dev->chip); |
1060 | } |
1061 | |
1062 | /* Program card detection IRQ */ |
1063 | r852_update_card_detect(dev); |
1064 | return 0; |
1065 | } |
1066 | #endif |
1067 | |
1068 | static const struct pci_device_id r852_pci_id_tbl[] = { |
1069 | |
1070 | { PCI_VDEVICE(RICOH, 0x0852), }, |
1071 | { }, |
1072 | }; |
1073 | |
1074 | MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); |
1075 | |
1076 | static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); |
1077 | |
1078 | static struct pci_driver r852_pci_driver = { |
1079 | .name = DRV_NAME, |
1080 | .id_table = r852_pci_id_tbl, |
1081 | .probe = r852_probe, |
1082 | .remove = r852_remove, |
1083 | .shutdown = r852_shutdown, |
1084 | .driver.pm = &r852_pm_ops, |
1085 | }; |
1086 | |
1087 | module_pci_driver(r852_pci_driver); |
1088 | |
1089 | MODULE_LICENSE("GPL" ); |
1090 | MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>" ); |
1091 | MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver" ); |
1092 | |