1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Driver for Realtek PCI-Express card reader
4 *
5 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
6 *
7 * Author:
8 * Wei WANG (wei_wang@realsil.com.cn)
9 * Micky Ching (micky_ching@realsil.com.cn)
10 */
11
12#include <linux/blkdev.h>
13#include <linux/kthread.h>
14#include <linux/sched.h>
15#include <linux/workqueue.h>
16
17#include "rtsx.h"
18#include "ms.h"
19#include "sd.h"
20#include "xd.h"
21
22MODULE_DESCRIPTION("Realtek PCI-Express card reader rts5208/rts5288 driver");
23MODULE_LICENSE("GPL");
24
25static unsigned int delay_use = 1;
26module_param(delay_use, uint, 0644);
27MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
28
29static int ss_en;
30module_param(ss_en, int, 0644);
31MODULE_PARM_DESC(ss_en, "enable selective suspend");
32
33static int ss_interval = 50;
34module_param(ss_interval, int, 0644);
35MODULE_PARM_DESC(ss_interval, "Interval to enter ss state in seconds");
36
37static int auto_delink_en;
38module_param(auto_delink_en, int, 0644);
39MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
40
41static unsigned char aspm_l0s_l1_en;
42module_param(aspm_l0s_l1_en, byte, 0644);
43MODULE_PARM_DESC(aspm_l0s_l1_en, "enable device aspm");
44
45static int msi_en;
46module_param(msi_en, int, 0644);
47MODULE_PARM_DESC(msi_en, "enable msi");
48
49static irqreturn_t rtsx_interrupt(int irq, void *dev_id);
50
51/***********************************************************************
52 * Host functions
53 ***********************************************************************/
54
55static const char *host_info(struct Scsi_Host *host)
56{
57 return "SCSI emulation for PCI-Express Mass Storage devices";
58}
59
60static int slave_alloc(struct scsi_device *sdev)
61{
62 /*
63 * Set the INQUIRY transfer length to 36. We don't use any of
64 * the extra data and many devices choke if asked for more or
65 * less than 36 bytes.
66 */
67 sdev->inquiry_len = 36;
68 return 0;
69}
70
71static int slave_configure(struct scsi_device *sdev)
72{
73 /*
74 * Scatter-gather buffers (all but the last) must have a length
75 * divisible by the bulk maxpacket size. Otherwise a data packet
76 * would end up being short, causing a premature end to the data
77 * transfer. Since high-speed bulk pipes have a maxpacket size
78 * of 512, we'll use that as the scsi device queue's DMA alignment
79 * mask. Guaranteeing proper alignment of the first buffer will
80 * have the desired effect because, except at the beginning and
81 * the end, scatter-gather buffers follow page boundaries.
82 */
83 blk_queue_dma_alignment(sdev->request_queue, (512 - 1));
84
85 /* Set the SCSI level to at least 2. We'll leave it at 3 if that's
86 * what is originally reported. We need this to avoid confusing
87 * the SCSI layer with devices that report 0 or 1, but need 10-byte
88 * commands (ala ATAPI devices behind certain bridges, or devices
89 * which simply have broken INQUIRY data).
90 *
91 * NOTE: This means /dev/sg programs (ala cdrecord) will get the
92 * actual information. This seems to be the preference for
93 * programs like that.
94 *
95 * NOTE: This also means that /proc/scsi/scsi and sysfs may report
96 * the actual value or the modified one, depending on where the
97 * data comes from.
98 */
99 if (sdev->scsi_level < SCSI_2) {
100 sdev->scsi_level = SCSI_2;
101 sdev->sdev_target->scsi_level = SCSI_2;
102 }
103
104 return 0;
105}
106
107/***********************************************************************
108 * /proc/scsi/ functions
109 ***********************************************************************/
110
111/* we use this macro to help us write into the buffer */
112#undef SPRINTF
113#define SPRINTF(args...) \
114 do { \
115 if (pos < buffer + length) \
116 pos += sprintf(pos, ## args); \
117 } while (0)
118
119/* queue a command */
120/* This is always called with spin_lock_irq(host->host_lock) held */
121static int queuecommand_lck(struct scsi_cmnd *srb)
122{
123 void (*done)(struct scsi_cmnd *) = scsi_done;
124 struct rtsx_dev *dev = host_to_rtsx(host: srb->device->host);
125 struct rtsx_chip *chip = dev->chip;
126
127 /* check for state-transition errors */
128 if (chip->srb) {
129 dev_err(&dev->pci->dev, "Error: chip->srb = %p\n",
130 chip->srb);
131 return SCSI_MLQUEUE_HOST_BUSY;
132 }
133
134 /* fail the command if we are disconnecting */
135 if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
136 dev_info(&dev->pci->dev, "Fail command during disconnect\n");
137 srb->result = DID_NO_CONNECT << 16;
138 done(srb);
139 return 0;
140 }
141
142 /* enqueue the command and wake up the control thread */
143 chip->srb = srb;
144 complete(&dev->cmnd_ready);
145
146 return 0;
147}
148
149static DEF_SCSI_QCMD(queuecommand)
150
151/***********************************************************************
152 * Error handling functions
153 ***********************************************************************/
154
155/* Command timeout and abort */
156static int command_abort(struct scsi_cmnd *srb)
157{
158 struct Scsi_Host *host = srb->device->host;
159 struct rtsx_dev *dev = host_to_rtsx(host);
160 struct rtsx_chip *chip = dev->chip;
161
162 spin_lock_irq(lock: host->host_lock);
163
164 /* Is this command still active? */
165 if (chip->srb != srb) {
166 spin_unlock_irq(lock: host->host_lock);
167 dev_info(&dev->pci->dev, "-- nothing to abort\n");
168 return FAILED;
169 }
170
171 rtsx_set_stat(chip, RTSX_STAT_ABORT);
172
173 spin_unlock_irq(lock: host->host_lock);
174
175 /* Wait for the aborted command to finish */
176 wait_for_completion(&dev->notify);
177
178 return SUCCESS;
179}
180
181/*
182 * This invokes the transport reset mechanism to reset the state of the
183 * device
184 */
185static int device_reset(struct scsi_cmnd *srb)
186{
187 return SUCCESS;
188}
189
190/*
191 * this defines our host template, with which we'll allocate hosts
192 */
193
194static const struct scsi_host_template rtsx_host_template = {
195 /* basic userland interface stuff */
196 .name = CR_DRIVER_NAME,
197 .proc_name = CR_DRIVER_NAME,
198 .info = host_info,
199
200 /* command interface -- queued only */
201 .queuecommand = queuecommand,
202
203 /* error and abort handlers */
204 .eh_abort_handler = command_abort,
205 .eh_device_reset_handler = device_reset,
206
207 /* queue commands only, only one command per LUN */
208 .can_queue = 1,
209
210 /* unknown initiator id */
211 .this_id = -1,
212
213 .slave_alloc = slave_alloc,
214 .slave_configure = slave_configure,
215
216 /* lots of sg segments can be handled */
217 .sg_tablesize = SG_ALL,
218
219 /* limit the total size of a transfer to 120 KB */
220 .max_sectors = 240,
221
222 /* emulated HBA */
223 .emulated = 1,
224
225 /* we do our own delay after a device or bus reset */
226 .skip_settle_delay = 1,
227
228 /* module management */
229 .module = THIS_MODULE
230};
231
232static int rtsx_acquire_irq(struct rtsx_dev *dev)
233{
234 struct rtsx_chip *chip = dev->chip;
235
236 dev_info(&dev->pci->dev, "%s: chip->msi_en = %d, pci->irq = %d\n",
237 __func__, chip->msi_en, dev->pci->irq);
238
239 if (request_irq(irq: dev->pci->irq, handler: rtsx_interrupt,
240 flags: chip->msi_en ? 0 : IRQF_SHARED,
241 CR_DRIVER_NAME, dev)) {
242 dev_err(&dev->pci->dev,
243 "rtsx: unable to grab IRQ %d, disabling device\n",
244 dev->pci->irq);
245 return -1;
246 }
247
248 dev->irq = dev->pci->irq;
249 pci_intx(dev: dev->pci, enable: !chip->msi_en);
250
251 return 0;
252}
253
254/*
255 * power management
256 */
257static int __maybe_unused rtsx_suspend(struct device *dev_d)
258{
259 struct pci_dev *pci = to_pci_dev(dev_d);
260 struct rtsx_dev *dev = pci_get_drvdata(pdev: pci);
261 struct rtsx_chip *chip;
262
263 if (!dev)
264 return 0;
265
266 /* lock the device pointers */
267 mutex_lock(&dev->dev_mutex);
268
269 chip = dev->chip;
270
271 rtsx_do_before_power_down(chip, PM_S3);
272
273 if (dev->irq >= 0) {
274 free_irq(dev->irq, (void *)dev);
275 dev->irq = -1;
276 }
277
278 if (chip->msi_en)
279 pci_free_irq_vectors(dev: pci);
280
281 device_wakeup_enable(dev: dev_d);
282
283 /* unlock the device pointers */
284 mutex_unlock(lock: &dev->dev_mutex);
285
286 return 0;
287}
288
289static int __maybe_unused rtsx_resume(struct device *dev_d)
290{
291 struct pci_dev *pci = to_pci_dev(dev_d);
292 struct rtsx_dev *dev = pci_get_drvdata(pdev: pci);
293 struct rtsx_chip *chip;
294
295 if (!dev)
296 return 0;
297
298 chip = dev->chip;
299
300 /* lock the device pointers */
301 mutex_lock(&dev->dev_mutex);
302
303 pci_set_master(dev: pci);
304
305 if (chip->msi_en) {
306 if (pci_alloc_irq_vectors(dev: pci, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI) < 0)
307 chip->msi_en = 0;
308 }
309
310 if (rtsx_acquire_irq(dev) < 0) {
311 /* unlock the device pointers */
312 mutex_unlock(lock: &dev->dev_mutex);
313 return -EIO;
314 }
315
316 rtsx_write_register(chip, HOST_SLEEP_STATE, mask: 0x03, data: 0x00);
317 rtsx_init_chip(chip);
318
319 /* unlock the device pointers */
320 mutex_unlock(lock: &dev->dev_mutex);
321
322 return 0;
323}
324
325static void rtsx_shutdown(struct pci_dev *pci)
326{
327 struct rtsx_dev *dev = pci_get_drvdata(pdev: pci);
328 struct rtsx_chip *chip;
329
330 if (!dev)
331 return;
332
333 chip = dev->chip;
334
335 rtsx_do_before_power_down(chip, PM_S1);
336
337 if (dev->irq >= 0) {
338 free_irq(dev->irq, (void *)dev);
339 dev->irq = -1;
340 }
341
342 if (chip->msi_en)
343 pci_free_irq_vectors(dev: pci);
344
345 pci_disable_device(dev: pci);
346}
347
348static int rtsx_control_thread(void *__dev)
349{
350 struct rtsx_dev *dev = __dev;
351 struct rtsx_chip *chip = dev->chip;
352 struct Scsi_Host *host = rtsx_to_host(dev);
353
354 for (;;) {
355 if (wait_for_completion_interruptible(x: &dev->cmnd_ready))
356 break;
357
358 /* lock the device pointers */
359 mutex_lock(&dev->dev_mutex);
360
361 /* if the device has disconnected, we are free to exit */
362 if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
363 dev_info(&dev->pci->dev, "-- rtsx-control exiting\n");
364 mutex_unlock(lock: &dev->dev_mutex);
365 break;
366 }
367
368 /* lock access to the state */
369 spin_lock_irq(lock: host->host_lock);
370
371 /* has the command aborted ? */
372 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
373 chip->srb->result = DID_ABORT << 16;
374 goto skip_for_abort;
375 }
376
377 spin_unlock_irq(lock: host->host_lock);
378
379 /* reject the command if the direction indicator
380 * is UNKNOWN
381 */
382 if (chip->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
383 dev_err(&dev->pci->dev, "UNKNOWN data direction\n");
384 chip->srb->result = DID_ERROR << 16;
385 } else if (chip->srb->device->id) {
386 /* reject if target != 0 or if LUN is higher than
387 * the maximum known LUN
388 */
389 dev_err(&dev->pci->dev, "Bad target number (%d:%d)\n",
390 chip->srb->device->id,
391 (u8)chip->srb->device->lun);
392 chip->srb->result = DID_BAD_TARGET << 16;
393 } else if (chip->srb->device->lun > chip->max_lun) {
394 dev_err(&dev->pci->dev, "Bad LUN (%d:%d)\n",
395 chip->srb->device->id,
396 (u8)chip->srb->device->lun);
397 chip->srb->result = DID_BAD_TARGET << 16;
398 } else {
399 /* we've got a command, let's do it! */
400 scsi_show_command(chip);
401 rtsx_invoke_transport(srb: chip->srb, chip);
402 }
403
404 /* lock access to the state */
405 spin_lock_irq(lock: host->host_lock);
406
407 /* did the command already complete because of a disconnect? */
408 if (!chip->srb)
409 ; /* nothing to do */
410
411 /* indicate that the command is done */
412 else if (chip->srb->result != DID_ABORT << 16) {
413 scsi_done(cmd: chip->srb);
414 } else {
415skip_for_abort:
416 dev_err(&dev->pci->dev, "scsi command aborted\n");
417 }
418
419 if (rtsx_chk_stat(chip, RTSX_STAT_ABORT)) {
420 complete(&dev->notify);
421
422 rtsx_set_stat(chip, RTSX_STAT_IDLE);
423 }
424
425 /* finished working on this command */
426 chip->srb = NULL;
427 spin_unlock_irq(lock: host->host_lock);
428
429 /* unlock the device pointers */
430 mutex_unlock(lock: &dev->dev_mutex);
431 } /* for (;;) */
432
433 /* notify the exit routine that we're actually exiting now
434 *
435 * complete()/wait_for_completion() is similar to up()/down(),
436 * except that complete() is safe in the case where the structure
437 * is getting deleted in a parallel mode of execution (i.e. just
438 * after the down() -- that's necessary for the thread-shutdown
439 * case.
440 *
441 * kthread_complete_and_exit() goes even further than this --
442 * it is safe in the case that the thread of the caller is going away
443 * (not just the structure) -- this is necessary for the module-remove
444 * case. This is important in preemption kernels, which transfer the
445 * flow of execution immediately upon a complete().
446 */
447 kthread_complete_and_exit(&dev->control_exit, 0);
448}
449
450static int rtsx_polling_thread(void *__dev)
451{
452 struct rtsx_dev *dev = __dev;
453 struct rtsx_chip *chip = dev->chip;
454 struct sd_info *sd_card = &chip->sd_card;
455 struct xd_info *xd_card = &chip->xd_card;
456 struct ms_info *ms_card = &chip->ms_card;
457
458 sd_card->cleanup_counter = 0;
459 xd_card->cleanup_counter = 0;
460 ms_card->cleanup_counter = 0;
461
462 /* Wait until SCSI scan finished */
463 wait_timeout((delay_use + 5) * 1000);
464
465 for (;;) {
466 set_current_state(TASK_INTERRUPTIBLE);
467 schedule_timeout(timeout: msecs_to_jiffies(POLLING_INTERVAL));
468
469 /* lock the device pointers */
470 mutex_lock(&dev->dev_mutex);
471
472 /* if the device has disconnected, we are free to exit */
473 if (rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
474 dev_info(&dev->pci->dev, "-- rtsx-polling exiting\n");
475 mutex_unlock(lock: &dev->dev_mutex);
476 break;
477 }
478
479 mutex_unlock(lock: &dev->dev_mutex);
480
481 mspro_polling_format_status(chip);
482
483 /* lock the device pointers */
484 mutex_lock(&dev->dev_mutex);
485
486 rtsx_polling_func(chip);
487
488 /* unlock the device pointers */
489 mutex_unlock(lock: &dev->dev_mutex);
490 }
491
492 kthread_complete_and_exit(&dev->polling_exit, 0);
493}
494
495/*
496 * interrupt handler
497 */
498static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
499{
500 struct rtsx_dev *dev = dev_id;
501 struct rtsx_chip *chip;
502 int retval;
503 u32 status;
504
505 if (dev)
506 chip = dev->chip;
507 else
508 return IRQ_NONE;
509
510 if (!chip)
511 return IRQ_NONE;
512
513 spin_lock(lock: &dev->reg_lock);
514
515 retval = rtsx_pre_handle_interrupt(chip);
516 if (retval == STATUS_FAIL) {
517 spin_unlock(lock: &dev->reg_lock);
518 if (chip->int_reg == 0xFFFFFFFF)
519 return IRQ_HANDLED;
520 return IRQ_NONE;
521 }
522
523 status = chip->int_reg;
524
525 if (dev->check_card_cd) {
526 if (!(dev->check_card_cd & status)) {
527 /* card not exist, return TRANS_RESULT_FAIL */
528 dev->trans_result = TRANS_RESULT_FAIL;
529 if (dev->done)
530 complete(dev->done);
531 goto exit;
532 }
533 }
534
535 if (status & (NEED_COMPLETE_INT | DELINK_INT)) {
536 if (status & (TRANS_FAIL_INT | DELINK_INT)) {
537 if (status & DELINK_INT)
538 RTSX_SET_DELINK(chip);
539 dev->trans_result = TRANS_RESULT_FAIL;
540 if (dev->done)
541 complete(dev->done);
542 } else if (status & TRANS_OK_INT) {
543 dev->trans_result = TRANS_RESULT_OK;
544 if (dev->done)
545 complete(dev->done);
546 } else if (status & DATA_DONE_INT) {
547 dev->trans_result = TRANS_NOT_READY;
548 if (dev->done && dev->trans_state == STATE_TRANS_SG)
549 complete(dev->done);
550 }
551 }
552
553exit:
554 spin_unlock(lock: &dev->reg_lock);
555 return IRQ_HANDLED;
556}
557
558/* Release all our dynamic resources */
559static void rtsx_release_resources(struct rtsx_dev *dev)
560{
561 dev_info(&dev->pci->dev, "-- %s\n", __func__);
562
563 /* Tell the control thread to exit. The SCSI host must
564 * already have been removed so it won't try to queue
565 * any more commands.
566 */
567 dev_info(&dev->pci->dev, "-- sending exit command to thread\n");
568 complete(&dev->cmnd_ready);
569 if (dev->ctl_thread)
570 wait_for_completion(&dev->control_exit);
571 if (dev->polling_thread)
572 wait_for_completion(&dev->polling_exit);
573
574 wait_timeout(200);
575
576 if (dev->rtsx_resv_buf) {
577 dev->chip->host_cmds_ptr = NULL;
578 dev->chip->host_sg_tbl_ptr = NULL;
579 }
580
581 if (dev->irq > 0)
582 free_irq(dev->irq, (void *)dev);
583 if (dev->chip->msi_en)
584 pci_free_irq_vectors(dev: dev->pci);
585 if (dev->remap_addr)
586 iounmap(addr: dev->remap_addr);
587
588 rtsx_release_chip(chip: dev->chip);
589 kfree(objp: dev->chip);
590}
591
592/*
593 * First stage of disconnect processing: stop all commands and remove
594 * the host
595 */
596static void quiesce_and_remove_host(struct rtsx_dev *dev)
597{
598 struct Scsi_Host *host = rtsx_to_host(dev);
599 struct rtsx_chip *chip = dev->chip;
600
601 /*
602 * Prevent new transfers, stop the current command, and
603 * interrupt a SCSI-scan or device-reset delay
604 */
605 mutex_lock(&dev->dev_mutex);
606 spin_lock_irq(lock: host->host_lock);
607 rtsx_set_stat(chip, RTSX_STAT_DISCONNECT);
608 spin_unlock_irq(lock: host->host_lock);
609 mutex_unlock(lock: &dev->dev_mutex);
610 wake_up(&dev->delay_wait);
611 wait_for_completion(&dev->scanning_done);
612
613 /* Wait some time to let other threads exist */
614 wait_timeout(100);
615
616 /*
617 * queuecommand won't accept any new commands and the control
618 * thread won't execute a previously-queued command. If there
619 * is such a command pending, complete it with an error.
620 */
621 mutex_lock(&dev->dev_mutex);
622 if (chip->srb) {
623 chip->srb->result = DID_NO_CONNECT << 16;
624 spin_lock_irq(lock: host->host_lock);
625 scsi_done(cmd: dev->chip->srb);
626 chip->srb = NULL;
627 spin_unlock_irq(lock: host->host_lock);
628 }
629 mutex_unlock(lock: &dev->dev_mutex);
630
631 /* Now we own no commands so it's safe to remove the SCSI host */
632 scsi_remove_host(host);
633}
634
635/* Second stage of disconnect processing: deallocate all resources */
636static void release_everything(struct rtsx_dev *dev)
637{
638 rtsx_release_resources(dev);
639
640 /*
641 * Drop our reference to the host; the SCSI core will free it
642 * when the refcount becomes 0.
643 */
644 scsi_host_put(t: rtsx_to_host(dev));
645}
646
647/* Thread to carry out delayed SCSI-device scanning */
648static int rtsx_scan_thread(void *__dev)
649{
650 struct rtsx_dev *dev = __dev;
651 struct rtsx_chip *chip = dev->chip;
652
653 /* Wait for the timeout to expire or for a disconnect */
654 if (delay_use > 0) {
655 dev_info(&dev->pci->dev,
656 "%s: waiting for device to settle before scanning\n",
657 CR_DRIVER_NAME);
658 wait_event_interruptible_timeout
659 (dev->delay_wait,
660 rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT),
661 delay_use * HZ);
662 }
663
664 /* If the device is still connected, perform the scanning */
665 if (!rtsx_chk_stat(chip, RTSX_STAT_DISCONNECT)) {
666 scsi_scan_host(rtsx_to_host(dev));
667 dev_info(&dev->pci->dev, "%s: device scan complete\n",
668 CR_DRIVER_NAME);
669
670 /* Should we unbind if no devices were detected? */
671 }
672
673 kthread_complete_and_exit(&dev->scanning_done, 0);
674}
675
676static void rtsx_init_options(struct rtsx_chip *chip)
677{
678 chip->vendor_id = chip->rtsx->pci->vendor;
679 chip->product_id = chip->rtsx->pci->device;
680 chip->adma_mode = 1;
681 chip->lun_mc = 0;
682 chip->driver_first_load = 1;
683#ifdef HW_AUTO_SWITCH_SD_BUS
684 chip->sdio_in_charge = 0;
685#endif
686
687 chip->mspro_formatter_enable = 1;
688 chip->ignore_sd = 0;
689 chip->use_hw_setting = 0;
690 chip->lun_mode = DEFAULT_SINGLE;
691 chip->auto_delink_en = auto_delink_en;
692 chip->ss_en = ss_en;
693 chip->ss_idle_period = ss_interval * 1000;
694 chip->remote_wakeup_en = 0;
695 chip->aspm_l0s_l1_en = aspm_l0s_l1_en;
696 chip->dynamic_aspm = 1;
697 chip->fpga_sd_sdr104_clk = CLK_200;
698 chip->fpga_sd_ddr50_clk = CLK_100;
699 chip->fpga_sd_sdr50_clk = CLK_100;
700 chip->fpga_sd_hs_clk = CLK_100;
701 chip->fpga_mmc_52m_clk = CLK_80;
702 chip->fpga_ms_hg_clk = CLK_80;
703 chip->fpga_ms_4bit_clk = CLK_80;
704 chip->fpga_ms_1bit_clk = CLK_40;
705 chip->asic_sd_sdr104_clk = 203;
706 chip->asic_sd_sdr50_clk = 98;
707 chip->asic_sd_ddr50_clk = 98;
708 chip->asic_sd_hs_clk = 98;
709 chip->asic_mmc_52m_clk = 98;
710 chip->asic_ms_hg_clk = 117;
711 chip->asic_ms_4bit_clk = 78;
712 chip->asic_ms_1bit_clk = 39;
713 chip->ssc_depth_sd_sdr104 = SSC_DEPTH_2M;
714 chip->ssc_depth_sd_sdr50 = SSC_DEPTH_2M;
715 chip->ssc_depth_sd_ddr50 = SSC_DEPTH_1M;
716 chip->ssc_depth_sd_hs = SSC_DEPTH_1M;
717 chip->ssc_depth_mmc_52m = SSC_DEPTH_1M;
718 chip->ssc_depth_ms_hg = SSC_DEPTH_1M;
719 chip->ssc_depth_ms_4bit = SSC_DEPTH_512K;
720 chip->ssc_depth_low_speed = SSC_DEPTH_512K;
721 chip->ssc_en = 1;
722 chip->sd_speed_prior = 0x01040203;
723 chip->sd_current_prior = 0x00010203;
724 chip->sd_ctl = SD_PUSH_POINT_AUTO |
725 SD_SAMPLE_POINT_AUTO |
726 SUPPORT_MMC_DDR_MODE;
727 chip->sd_ddr_tx_phase = 0;
728 chip->mmc_ddr_tx_phase = 1;
729 chip->sd_default_tx_phase = 15;
730 chip->sd_default_rx_phase = 15;
731 chip->pmos_pwr_on_interval = 200;
732 chip->sd_voltage_switch_delay = 1000;
733 chip->ms_power_class_en = 3;
734
735 chip->sd_400mA_ocp_thd = 1;
736 chip->sd_800mA_ocp_thd = 5;
737 chip->ms_ocp_thd = 2;
738
739 chip->card_drive_sel = 0x55;
740 chip->sd30_drive_sel_1v8 = 0x03;
741 chip->sd30_drive_sel_3v3 = 0x01;
742
743 chip->do_delink_before_power_down = 1;
744 chip->auto_power_down = 1;
745 chip->polling_config = 0;
746
747 chip->force_clkreq_0 = 1;
748 chip->ft2_fast_mode = 0;
749
750 chip->sdio_retry_cnt = 1;
751
752 chip->xd_timeout = 2000;
753 chip->sd_timeout = 10000;
754 chip->ms_timeout = 2000;
755 chip->mspro_timeout = 15000;
756
757 chip->power_down_in_ss = 1;
758
759 chip->sdr104_en = 1;
760 chip->sdr50_en = 1;
761 chip->ddr50_en = 1;
762
763 chip->delink_stage1_step = 100;
764 chip->delink_stage2_step = 40;
765 chip->delink_stage3_step = 20;
766
767 chip->auto_delink_in_L1 = 1;
768 chip->blink_led = 1;
769 chip->msi_en = msi_en;
770 chip->hp_watch_bios_hotplug = 0;
771 chip->max_payload = 0;
772 chip->phy_voltage = 0;
773
774 chip->support_ms_8bit = 1;
775 chip->s3_pwr_off_delay = 1000;
776}
777
778static int rtsx_probe(struct pci_dev *pci,
779 const struct pci_device_id *pci_id)
780{
781 struct Scsi_Host *host;
782 struct rtsx_dev *dev;
783 int err = 0;
784 struct task_struct *th;
785
786 dev_dbg(&pci->dev, "Realtek PCI-E card reader detected\n");
787
788 err = pcim_enable_device(pdev: pci);
789 if (err < 0) {
790 dev_err(&pci->dev, "PCI enable device failed!\n");
791 return err;
792 }
793
794 err = pci_request_regions(pci, CR_DRIVER_NAME);
795 if (err < 0) {
796 dev_err(&pci->dev, "PCI request regions for %s failed!\n",
797 CR_DRIVER_NAME);
798 return err;
799 }
800
801 /*
802 * Ask the SCSI layer to allocate a host structure, with extra
803 * space at the end for our private rtsx_dev structure.
804 */
805 host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
806 if (!host) {
807 dev_err(&pci->dev, "Unable to allocate the scsi host\n");
808 err = -ENOMEM;
809 goto scsi_host_alloc_fail;
810 }
811
812 dev = host_to_rtsx(host);
813 memset(dev, 0, sizeof(struct rtsx_dev));
814
815 dev->chip = kzalloc(size: sizeof(*dev->chip), GFP_KERNEL);
816 if (!dev->chip) {
817 err = -ENOMEM;
818 goto chip_alloc_fail;
819 }
820
821 spin_lock_init(&dev->reg_lock);
822 mutex_init(&dev->dev_mutex);
823 init_completion(x: &dev->cmnd_ready);
824 init_completion(x: &dev->control_exit);
825 init_completion(x: &dev->polling_exit);
826 init_completion(x: &dev->notify);
827 init_completion(x: &dev->scanning_done);
828 init_waitqueue_head(&dev->delay_wait);
829
830 dev->pci = pci;
831 dev->irq = -1;
832
833 dev_info(&pci->dev, "Resource length: 0x%x\n",
834 (unsigned int)pci_resource_len(pci, 0));
835 dev->addr = pci_resource_start(pci, 0);
836 dev->remap_addr = ioremap(offset: dev->addr, pci_resource_len(pci, 0));
837 if (!dev->remap_addr) {
838 dev_err(&pci->dev, "ioremap error\n");
839 err = -ENXIO;
840 goto ioremap_fail;
841 }
842
843 /*
844 * Using "unsigned long" cast here to eliminate gcc warning in
845 * 64-bit system
846 */
847 dev_info(&pci->dev, "Original address: 0x%lx, remapped address: 0x%lx\n",
848 (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));
849
850 dev->rtsx_resv_buf = dmam_alloc_coherent(dev: &pci->dev, RTSX_RESV_BUF_LEN,
851 dma_handle: &dev->rtsx_resv_buf_addr,
852 GFP_KERNEL);
853 if (!dev->rtsx_resv_buf) {
854 dev_err(&pci->dev, "alloc dma buffer fail\n");
855 err = -ENXIO;
856 goto dma_alloc_fail;
857 }
858 dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
859 dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
860 dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
861 dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
862 HOST_CMDS_BUF_LEN;
863
864 dev->chip->rtsx = dev;
865
866 rtsx_init_options(chip: dev->chip);
867
868 dev_info(&pci->dev, "pci->irq = %d\n", pci->irq);
869
870 if (dev->chip->msi_en) {
871 if (pci_alloc_irq_vectors(dev: pci, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI) < 0)
872 dev->chip->msi_en = 0;
873 }
874
875 if (rtsx_acquire_irq(dev) < 0) {
876 err = -EBUSY;
877 goto irq_acquire_fail;
878 }
879
880 pci_set_master(dev: pci);
881 synchronize_irq(irq: dev->irq);
882
883 rtsx_init_chip(chip: dev->chip);
884
885 /*
886 * set the supported max_lun and max_id for the scsi host
887 * NOTE: the minimal value of max_id is 1
888 */
889 host->max_id = 1;
890 host->max_lun = dev->chip->max_lun;
891
892 /* Start up our control thread */
893 th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
894 if (IS_ERR(ptr: th)) {
895 dev_err(&pci->dev, "Unable to start control thread\n");
896 err = PTR_ERR(ptr: th);
897 goto control_thread_fail;
898 }
899 dev->ctl_thread = th;
900
901 err = scsi_add_host(host, dev: &pci->dev);
902 if (err) {
903 dev_err(&pci->dev, "Unable to add the scsi host\n");
904 goto scsi_add_host_fail;
905 }
906
907 /* Start up the thread for delayed SCSI-device scanning */
908 th = kthread_run(rtsx_scan_thread, dev, "rtsx-scan");
909 if (IS_ERR(ptr: th)) {
910 dev_err(&pci->dev, "Unable to start the device-scanning thread\n");
911 complete(&dev->scanning_done);
912 err = PTR_ERR(ptr: th);
913 goto scan_thread_fail;
914 }
915
916 /* Start up the thread for polling thread */
917 th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
918 if (IS_ERR(ptr: th)) {
919 dev_err(&pci->dev, "Unable to start the device-polling thread\n");
920 err = PTR_ERR(ptr: th);
921 goto scan_thread_fail;
922 }
923 dev->polling_thread = th;
924
925 pci_set_drvdata(pdev: pci, data: dev);
926
927 return 0;
928
929 /* We come here if there are any problems */
930scan_thread_fail:
931 quiesce_and_remove_host(dev);
932scsi_add_host_fail:
933 complete(&dev->cmnd_ready);
934 wait_for_completion(&dev->control_exit);
935control_thread_fail:
936 free_irq(dev->irq, (void *)dev);
937 rtsx_release_chip(chip: dev->chip);
938irq_acquire_fail:
939 dev->chip->host_cmds_ptr = NULL;
940 dev->chip->host_sg_tbl_ptr = NULL;
941 if (dev->chip->msi_en)
942 pci_free_irq_vectors(dev: dev->pci);
943dma_alloc_fail:
944 iounmap(addr: dev->remap_addr);
945ioremap_fail:
946 kfree(objp: dev->chip);
947chip_alloc_fail:
948 dev_err(&pci->dev, "%s failed\n", __func__);
949 scsi_host_put(t: host);
950scsi_host_alloc_fail:
951 pci_release_regions(pci);
952 return err;
953}
954
955static void rtsx_remove(struct pci_dev *pci)
956{
957 struct rtsx_dev *dev = pci_get_drvdata(pdev: pci);
958
959 quiesce_and_remove_host(dev);
960 release_everything(dev);
961 pci_release_regions(pci);
962}
963
964/* PCI IDs */
965static const struct pci_device_id rtsx_ids[] = {
966 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5208),
967 PCI_CLASS_OTHERS << 16, 0xFF0000 },
968 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x5288),
969 PCI_CLASS_OTHERS << 16, 0xFF0000 },
970 { 0, },
971};
972
973MODULE_DEVICE_TABLE(pci, rtsx_ids);
974
975static SIMPLE_DEV_PM_OPS(rtsx_pm_ops, rtsx_suspend, rtsx_resume);
976
977/* pci_driver definition */
978static struct pci_driver rtsx_driver = {
979 .name = CR_DRIVER_NAME,
980 .id_table = rtsx_ids,
981 .probe = rtsx_probe,
982 .remove = rtsx_remove,
983 .driver.pm = &rtsx_pm_ops,
984 .shutdown = rtsx_shutdown,
985};
986
987module_pci_driver(rtsx_driver);
988

source code of linux/drivers/staging/rts5208/rtsx.c