1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Block driver for media (i.e., flash cards) |
4 | * |
5 | * Copyright 2002 Hewlett-Packard Company |
6 | * Copyright 2005-2008 Pierre Ossman |
7 | * |
8 | * Use consistent with the GNU GPL is permitted, |
9 | * provided that this copyright notice is |
10 | * preserved in its entirety in all copies and derived works. |
11 | * |
12 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, |
13 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS |
14 | * FITNESS FOR ANY PARTICULAR PURPOSE. |
15 | * |
16 | * Many thanks to Alessandro Rubini and Jonathan Corbet! |
17 | * |
18 | * Author: Andrew Christian |
19 | * 28 May 2002 |
20 | */ |
21 | #include <linux/moduleparam.h> |
22 | #include <linux/module.h> |
23 | #include <linux/init.h> |
24 | |
25 | #include <linux/kernel.h> |
26 | #include <linux/fs.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/errno.h> |
29 | #include <linux/hdreg.h> |
30 | #include <linux/kdev_t.h> |
31 | #include <linux/kref.h> |
32 | #include <linux/blkdev.h> |
33 | #include <linux/cdev.h> |
34 | #include <linux/mutex.h> |
35 | #include <linux/scatterlist.h> |
36 | #include <linux/string_helpers.h> |
37 | #include <linux/delay.h> |
38 | #include <linux/capability.h> |
39 | #include <linux/compat.h> |
40 | #include <linux/pm_runtime.h> |
41 | #include <linux/idr.h> |
42 | #include <linux/debugfs.h> |
43 | |
44 | #include <linux/mmc/ioctl.h> |
45 | #include <linux/mmc/card.h> |
46 | #include <linux/mmc/host.h> |
47 | #include <linux/mmc/mmc.h> |
48 | #include <linux/mmc/sd.h> |
49 | |
50 | #include <linux/uaccess.h> |
51 | |
52 | #include "queue.h" |
53 | #include "block.h" |
54 | #include "core.h" |
55 | #include "card.h" |
56 | #include "crypto.h" |
57 | #include "host.h" |
58 | #include "bus.h" |
59 | #include "mmc_ops.h" |
60 | #include "quirks.h" |
61 | #include "sd_ops.h" |
62 | |
63 | MODULE_ALIAS("mmc:block" ); |
64 | #ifdef MODULE_PARAM_PREFIX |
65 | #undef MODULE_PARAM_PREFIX |
66 | #endif |
67 | #define MODULE_PARAM_PREFIX "mmcblk." |
68 | |
69 | /* |
70 | * Set a 10 second timeout for polling write request busy state. Note, mmc core |
71 | * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 |
72 | * second software timer to timeout the whole request, so 10 seconds should be |
73 | * ample. |
74 | */ |
75 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) |
76 | #define (x) ((x & 0x00FF0000) >> 16) |
77 | #define (x) ((x & 0x0000FF00) >> 8) |
78 | |
79 | static DEFINE_MUTEX(block_mutex); |
80 | |
81 | /* |
82 | * The defaults come from config options but can be overriden by module |
83 | * or bootarg options. |
84 | */ |
85 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; |
86 | |
87 | /* |
88 | * We've only got one major, so number of mmcblk devices is |
89 | * limited to (1 << 20) / number of minors per device. It is also |
90 | * limited by the MAX_DEVICES below. |
91 | */ |
92 | static int max_devices; |
93 | |
94 | #define MAX_DEVICES 256 |
95 | |
96 | static DEFINE_IDA(mmc_blk_ida); |
97 | static DEFINE_IDA(mmc_rpmb_ida); |
98 | |
99 | struct mmc_blk_busy_data { |
100 | struct mmc_card *card; |
101 | u32 status; |
102 | }; |
103 | |
104 | /* |
105 | * There is one mmc_blk_data per slot. |
106 | */ |
107 | struct mmc_blk_data { |
108 | struct device *parent; |
109 | struct gendisk *disk; |
110 | struct mmc_queue queue; |
111 | struct list_head part; |
112 | struct list_head rpmbs; |
113 | |
114 | unsigned int flags; |
115 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ |
116 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ |
117 | |
118 | struct kref kref; |
119 | unsigned int read_only; |
120 | unsigned int part_type; |
121 | unsigned int reset_done; |
122 | #define MMC_BLK_READ BIT(0) |
123 | #define MMC_BLK_WRITE BIT(1) |
124 | #define MMC_BLK_DISCARD BIT(2) |
125 | #define MMC_BLK_SECDISCARD BIT(3) |
126 | #define MMC_BLK_CQE_RECOVERY BIT(4) |
127 | #define MMC_BLK_TRIM BIT(5) |
128 | |
129 | /* |
130 | * Only set in main mmc_blk_data associated |
131 | * with mmc_card with dev_set_drvdata, and keeps |
132 | * track of the current selected device partition. |
133 | */ |
134 | unsigned int part_curr; |
135 | #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */ |
136 | int area_type; |
137 | |
138 | /* debugfs files (only in main mmc_blk_data) */ |
139 | struct dentry *status_dentry; |
140 | struct dentry *ext_csd_dentry; |
141 | }; |
142 | |
143 | /* Device type for RPMB character devices */ |
144 | static dev_t mmc_rpmb_devt; |
145 | |
146 | /* Bus type for RPMB character devices */ |
147 | static struct bus_type mmc_rpmb_bus_type = { |
148 | .name = "mmc_rpmb" , |
149 | }; |
150 | |
151 | /** |
152 | * struct mmc_rpmb_data - special RPMB device type for these areas |
153 | * @dev: the device for the RPMB area |
154 | * @chrdev: character device for the RPMB area |
155 | * @id: unique device ID number |
156 | * @part_index: partition index (0 on first) |
157 | * @md: parent MMC block device |
158 | * @node: list item, so we can put this device on a list |
159 | */ |
160 | struct mmc_rpmb_data { |
161 | struct device dev; |
162 | struct cdev chrdev; |
163 | int id; |
164 | unsigned int part_index; |
165 | struct mmc_blk_data *md; |
166 | struct list_head node; |
167 | }; |
168 | |
169 | static DEFINE_MUTEX(open_lock); |
170 | |
171 | module_param(perdev_minors, int, 0444); |
172 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device" ); |
173 | |
174 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
175 | unsigned int part_type); |
176 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
177 | struct mmc_card *card, |
178 | int recovery_mode, |
179 | struct mmc_queue *mq); |
180 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq); |
181 | static int mmc_spi_err_check(struct mmc_card *card); |
182 | static int mmc_blk_busy_cb(void *cb_data, bool *busy); |
183 | |
184 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
185 | { |
186 | struct mmc_blk_data *md; |
187 | |
188 | mutex_lock(&open_lock); |
189 | md = disk->private_data; |
190 | if (md && !kref_get_unless_zero(kref: &md->kref)) |
191 | md = NULL; |
192 | mutex_unlock(lock: &open_lock); |
193 | |
194 | return md; |
195 | } |
196 | |
197 | static inline int mmc_get_devidx(struct gendisk *disk) |
198 | { |
199 | int devidx = disk->first_minor / perdev_minors; |
200 | return devidx; |
201 | } |
202 | |
203 | static void mmc_blk_kref_release(struct kref *ref) |
204 | { |
205 | struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); |
206 | int devidx; |
207 | |
208 | devidx = mmc_get_devidx(disk: md->disk); |
209 | ida_simple_remove(&mmc_blk_ida, devidx); |
210 | |
211 | mutex_lock(&open_lock); |
212 | md->disk->private_data = NULL; |
213 | mutex_unlock(lock: &open_lock); |
214 | |
215 | put_disk(disk: md->disk); |
216 | kfree(objp: md); |
217 | } |
218 | |
219 | static void mmc_blk_put(struct mmc_blk_data *md) |
220 | { |
221 | kref_put(kref: &md->kref, release: mmc_blk_kref_release); |
222 | } |
223 | |
224 | static ssize_t power_ro_lock_show(struct device *dev, |
225 | struct device_attribute *attr, char *buf) |
226 | { |
227 | int ret; |
228 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
229 | struct mmc_card *card = md->queue.card; |
230 | int locked = 0; |
231 | |
232 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) |
233 | locked = 2; |
234 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) |
235 | locked = 1; |
236 | |
237 | ret = snprintf(buf, PAGE_SIZE, fmt: "%d\n" , locked); |
238 | |
239 | mmc_blk_put(md); |
240 | |
241 | return ret; |
242 | } |
243 | |
244 | static ssize_t power_ro_lock_store(struct device *dev, |
245 | struct device_attribute *attr, const char *buf, size_t count) |
246 | { |
247 | int ret; |
248 | struct mmc_blk_data *md, *part_md; |
249 | struct mmc_queue *mq; |
250 | struct request *req; |
251 | unsigned long set; |
252 | |
253 | if (kstrtoul(s: buf, base: 0, res: &set)) |
254 | return -EINVAL; |
255 | |
256 | if (set != 1) |
257 | return count; |
258 | |
259 | md = mmc_blk_get(dev_to_disk(dev)); |
260 | mq = &md->queue; |
261 | |
262 | /* Dispatch locking to the block layer */ |
263 | req = blk_mq_alloc_request(q: mq->queue, opf: REQ_OP_DRV_OUT, flags: 0); |
264 | if (IS_ERR(ptr: req)) { |
265 | count = PTR_ERR(ptr: req); |
266 | goto out_put; |
267 | } |
268 | req_to_mmc_queue_req(rq: req)->drv_op = MMC_DRV_OP_BOOT_WP; |
269 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
270 | blk_execute_rq(rq: req, at_head: false); |
271 | ret = req_to_mmc_queue_req(rq: req)->drv_op_result; |
272 | blk_mq_free_request(rq: req); |
273 | |
274 | if (!ret) { |
275 | pr_info("%s: Locking boot partition ro until next power on\n" , |
276 | md->disk->disk_name); |
277 | set_disk_ro(disk: md->disk, read_only: 1); |
278 | |
279 | list_for_each_entry(part_md, &md->part, part) |
280 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { |
281 | pr_info("%s: Locking boot partition ro until next power on\n" , part_md->disk->disk_name); |
282 | set_disk_ro(disk: part_md->disk, read_only: 1); |
283 | } |
284 | } |
285 | out_put: |
286 | mmc_blk_put(md); |
287 | return count; |
288 | } |
289 | |
290 | static DEVICE_ATTR(ro_lock_until_next_power_on, 0, |
291 | power_ro_lock_show, power_ro_lock_store); |
292 | |
293 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
294 | char *buf) |
295 | { |
296 | int ret; |
297 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
298 | |
299 | ret = snprintf(buf, PAGE_SIZE, fmt: "%d\n" , |
300 | get_disk_ro(dev_to_disk(dev)) ^ |
301 | md->read_only); |
302 | mmc_blk_put(md); |
303 | return ret; |
304 | } |
305 | |
306 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, |
307 | const char *buf, size_t count) |
308 | { |
309 | int ret; |
310 | char *end; |
311 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
312 | unsigned long set = simple_strtoul(buf, &end, 0); |
313 | if (end == buf) { |
314 | ret = -EINVAL; |
315 | goto out; |
316 | } |
317 | |
318 | set_disk_ro(dev_to_disk(dev), read_only: set || md->read_only); |
319 | ret = count; |
320 | out: |
321 | mmc_blk_put(md); |
322 | return ret; |
323 | } |
324 | |
325 | static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store); |
326 | |
327 | static struct attribute *mmc_disk_attrs[] = { |
328 | &dev_attr_force_ro.attr, |
329 | &dev_attr_ro_lock_until_next_power_on.attr, |
330 | NULL, |
331 | }; |
332 | |
333 | static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj, |
334 | struct attribute *a, int n) |
335 | { |
336 | struct device *dev = kobj_to_dev(kobj); |
337 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
338 | umode_t mode = a->mode; |
339 | |
340 | if (a == &dev_attr_ro_lock_until_next_power_on.attr && |
341 | (md->area_type & MMC_BLK_DATA_AREA_BOOT) && |
342 | md->queue.card->ext_csd.boot_ro_lockable) { |
343 | mode = S_IRUGO; |
344 | if (!(md->queue.card->ext_csd.boot_ro_lock & |
345 | EXT_CSD_BOOT_WP_B_PWR_WP_DIS)) |
346 | mode |= S_IWUSR; |
347 | } |
348 | |
349 | mmc_blk_put(md); |
350 | return mode; |
351 | } |
352 | |
353 | static const struct attribute_group mmc_disk_attr_group = { |
354 | .is_visible = mmc_disk_attrs_is_visible, |
355 | .attrs = mmc_disk_attrs, |
356 | }; |
357 | |
358 | static const struct attribute_group *mmc_disk_attr_groups[] = { |
359 | &mmc_disk_attr_group, |
360 | NULL, |
361 | }; |
362 | |
363 | static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode) |
364 | { |
365 | struct mmc_blk_data *md = mmc_blk_get(disk); |
366 | int ret = -ENXIO; |
367 | |
368 | mutex_lock(&block_mutex); |
369 | if (md) { |
370 | ret = 0; |
371 | if ((mode & BLK_OPEN_WRITE) && md->read_only) { |
372 | mmc_blk_put(md); |
373 | ret = -EROFS; |
374 | } |
375 | } |
376 | mutex_unlock(lock: &block_mutex); |
377 | |
378 | return ret; |
379 | } |
380 | |
381 | static void mmc_blk_release(struct gendisk *disk) |
382 | { |
383 | struct mmc_blk_data *md = disk->private_data; |
384 | |
385 | mutex_lock(&block_mutex); |
386 | mmc_blk_put(md); |
387 | mutex_unlock(lock: &block_mutex); |
388 | } |
389 | |
390 | static int |
391 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
392 | { |
393 | geo->cylinders = get_capacity(disk: bdev->bd_disk) / (4 * 16); |
394 | geo->heads = 4; |
395 | geo->sectors = 16; |
396 | return 0; |
397 | } |
398 | |
399 | struct mmc_blk_ioc_data { |
400 | struct mmc_ioc_cmd ic; |
401 | unsigned char *buf; |
402 | u64 buf_bytes; |
403 | struct mmc_rpmb_data *rpmb; |
404 | }; |
405 | |
406 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( |
407 | struct mmc_ioc_cmd __user *user) |
408 | { |
409 | struct mmc_blk_ioc_data *idata; |
410 | int err; |
411 | |
412 | idata = kmalloc(size: sizeof(*idata), GFP_KERNEL); |
413 | if (!idata) { |
414 | err = -ENOMEM; |
415 | goto out; |
416 | } |
417 | |
418 | if (copy_from_user(to: &idata->ic, from: user, n: sizeof(idata->ic))) { |
419 | err = -EFAULT; |
420 | goto idata_err; |
421 | } |
422 | |
423 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; |
424 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { |
425 | err = -EOVERFLOW; |
426 | goto idata_err; |
427 | } |
428 | |
429 | if (!idata->buf_bytes) { |
430 | idata->buf = NULL; |
431 | return idata; |
432 | } |
433 | |
434 | idata->buf = memdup_user((void __user *)(unsigned long) |
435 | idata->ic.data_ptr, idata->buf_bytes); |
436 | if (IS_ERR(ptr: idata->buf)) { |
437 | err = PTR_ERR(ptr: idata->buf); |
438 | goto idata_err; |
439 | } |
440 | |
441 | return idata; |
442 | |
443 | idata_err: |
444 | kfree(objp: idata); |
445 | out: |
446 | return ERR_PTR(error: err); |
447 | } |
448 | |
449 | static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, |
450 | struct mmc_blk_ioc_data *idata) |
451 | { |
452 | struct mmc_ioc_cmd *ic = &idata->ic; |
453 | |
454 | if (copy_to_user(to: &(ic_ptr->response), from: ic->response, |
455 | n: sizeof(ic->response))) |
456 | return -EFAULT; |
457 | |
458 | if (!idata->ic.write_flag) { |
459 | if (copy_to_user(to: (void __user *)(unsigned long)ic->data_ptr, |
460 | from: idata->buf, n: idata->buf_bytes)) |
461 | return -EFAULT; |
462 | } |
463 | |
464 | return 0; |
465 | } |
466 | |
467 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, |
468 | struct mmc_blk_ioc_data *idata) |
469 | { |
470 | struct mmc_command cmd = {}, sbc = {}; |
471 | struct mmc_data data = {}; |
472 | struct mmc_request mrq = {}; |
473 | struct scatterlist sg; |
474 | bool r1b_resp; |
475 | unsigned int busy_timeout_ms; |
476 | int err; |
477 | unsigned int target_part; |
478 | |
479 | if (!card || !md || !idata) |
480 | return -EINVAL; |
481 | |
482 | /* |
483 | * The RPMB accesses comes in from the character device, so we |
484 | * need to target these explicitly. Else we just target the |
485 | * partition type for the block device the ioctl() was issued |
486 | * on. |
487 | */ |
488 | if (idata->rpmb) { |
489 | /* Support multiple RPMB partitions */ |
490 | target_part = idata->rpmb->part_index; |
491 | target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; |
492 | } else { |
493 | target_part = md->part_type; |
494 | } |
495 | |
496 | cmd.opcode = idata->ic.opcode; |
497 | cmd.arg = idata->ic.arg; |
498 | cmd.flags = idata->ic.flags; |
499 | |
500 | if (idata->buf_bytes) { |
501 | data.sg = &sg; |
502 | data.sg_len = 1; |
503 | data.blksz = idata->ic.blksz; |
504 | data.blocks = idata->ic.blocks; |
505 | |
506 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); |
507 | |
508 | if (idata->ic.write_flag) |
509 | data.flags = MMC_DATA_WRITE; |
510 | else |
511 | data.flags = MMC_DATA_READ; |
512 | |
513 | /* data.flags must already be set before doing this. */ |
514 | mmc_set_data_timeout(data: &data, card); |
515 | |
516 | /* Allow overriding the timeout_ns for empirical tuning. */ |
517 | if (idata->ic.data_timeout_ns) |
518 | data.timeout_ns = idata->ic.data_timeout_ns; |
519 | |
520 | mrq.data = &data; |
521 | } |
522 | |
523 | mrq.cmd = &cmd; |
524 | |
525 | err = mmc_blk_part_switch(card, part_type: target_part); |
526 | if (err) |
527 | return err; |
528 | |
529 | if (idata->ic.is_acmd) { |
530 | err = mmc_app_cmd(host: card->host, card); |
531 | if (err) |
532 | return err; |
533 | } |
534 | |
535 | if (idata->rpmb) { |
536 | sbc.opcode = MMC_SET_BLOCK_COUNT; |
537 | /* |
538 | * We don't do any blockcount validation because the max size |
539 | * may be increased by a future standard. We just copy the |
540 | * 'Reliable Write' bit here. |
541 | */ |
542 | sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); |
543 | sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
544 | mrq.sbc = &sbc; |
545 | } |
546 | |
547 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && |
548 | (cmd.opcode == MMC_SWITCH)) |
549 | return mmc_sanitize(card, timeout_ms: idata->ic.cmd_timeout_ms); |
550 | |
551 | /* If it's an R1B response we need some more preparations. */ |
552 | busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS; |
553 | r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B; |
554 | if (r1b_resp) |
555 | mmc_prepare_busy_cmd(host: card->host, cmd: &cmd, timeout_ms: busy_timeout_ms); |
556 | |
557 | mmc_wait_for_req(host: card->host, mrq: &mrq); |
558 | memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); |
559 | |
560 | if (cmd.error) { |
561 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n" , |
562 | __func__, cmd.error); |
563 | return cmd.error; |
564 | } |
565 | if (data.error) { |
566 | dev_err(mmc_dev(card->host), "%s: data error %d\n" , |
567 | __func__, data.error); |
568 | return data.error; |
569 | } |
570 | |
571 | /* |
572 | * Make sure the cache of the PARTITION_CONFIG register and |
573 | * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write |
574 | * changed it successfully. |
575 | */ |
576 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && |
577 | (cmd.opcode == MMC_SWITCH)) { |
578 | struct mmc_blk_data *main_md = dev_get_drvdata(dev: &card->dev); |
579 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); |
580 | |
581 | /* |
582 | * Update cache so the next mmc_blk_part_switch call operates |
583 | * on up-to-date data. |
584 | */ |
585 | card->ext_csd.part_config = value; |
586 | main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; |
587 | } |
588 | |
589 | /* |
590 | * Make sure to update CACHE_CTRL in case it was changed. The cache |
591 | * will get turned back on if the card is re-initialized, e.g. |
592 | * suspend/resume or hw reset in recovery. |
593 | */ |
594 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) && |
595 | (cmd.opcode == MMC_SWITCH)) { |
596 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1; |
597 | |
598 | card->ext_csd.cache_ctrl = value; |
599 | } |
600 | |
601 | /* |
602 | * According to the SD specs, some commands require a delay after |
603 | * issuing the command. |
604 | */ |
605 | if (idata->ic.postsleep_min_us) |
606 | usleep_range(min: idata->ic.postsleep_min_us, max: idata->ic.postsleep_max_us); |
607 | |
608 | if (mmc_host_is_spi(card->host)) { |
609 | if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY) |
610 | return mmc_spi_err_check(card); |
611 | return err; |
612 | } |
613 | |
614 | /* |
615 | * Ensure RPMB, writes and R1B responses are completed by polling with |
616 | * CMD13. Note that, usually we don't need to poll when using HW busy |
617 | * detection, but here it's needed since some commands may indicate the |
618 | * error through the R1 status bits. |
619 | */ |
620 | if (idata->rpmb || idata->ic.write_flag || r1b_resp) { |
621 | struct mmc_blk_busy_data cb_data = { |
622 | .card = card, |
623 | }; |
624 | |
625 | err = __mmc_poll_for_busy(host: card->host, period_us: 0, timeout_ms: busy_timeout_ms, |
626 | busy_cb: &mmc_blk_busy_cb, cb_data: &cb_data); |
627 | |
628 | idata->ic.response[0] = cb_data.status; |
629 | } |
630 | |
631 | return err; |
632 | } |
633 | |
634 | static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, |
635 | struct mmc_ioc_cmd __user *ic_ptr, |
636 | struct mmc_rpmb_data *rpmb) |
637 | { |
638 | struct mmc_blk_ioc_data *idata; |
639 | struct mmc_blk_ioc_data *idatas[1]; |
640 | struct mmc_queue *mq; |
641 | struct mmc_card *card; |
642 | int err = 0, ioc_err = 0; |
643 | struct request *req; |
644 | |
645 | idata = mmc_blk_ioctl_copy_from_user(user: ic_ptr); |
646 | if (IS_ERR(ptr: idata)) |
647 | return PTR_ERR(ptr: idata); |
648 | /* This will be NULL on non-RPMB ioctl():s */ |
649 | idata->rpmb = rpmb; |
650 | |
651 | card = md->queue.card; |
652 | if (IS_ERR(ptr: card)) { |
653 | err = PTR_ERR(ptr: card); |
654 | goto cmd_done; |
655 | } |
656 | |
657 | /* |
658 | * Dispatch the ioctl() into the block request queue. |
659 | */ |
660 | mq = &md->queue; |
661 | req = blk_mq_alloc_request(q: mq->queue, |
662 | opf: idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, flags: 0); |
663 | if (IS_ERR(ptr: req)) { |
664 | err = PTR_ERR(ptr: req); |
665 | goto cmd_done; |
666 | } |
667 | idatas[0] = idata; |
668 | req_to_mmc_queue_req(rq: req)->drv_op = |
669 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; |
670 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
671 | req_to_mmc_queue_req(rq: req)->drv_op_data = idatas; |
672 | req_to_mmc_queue_req(rq: req)->ioc_count = 1; |
673 | blk_execute_rq(rq: req, at_head: false); |
674 | ioc_err = req_to_mmc_queue_req(rq: req)->drv_op_result; |
675 | err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); |
676 | blk_mq_free_request(rq: req); |
677 | |
678 | cmd_done: |
679 | kfree(objp: idata->buf); |
680 | kfree(objp: idata); |
681 | return ioc_err ? ioc_err : err; |
682 | } |
683 | |
684 | static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, |
685 | struct mmc_ioc_multi_cmd __user *user, |
686 | struct mmc_rpmb_data *rpmb) |
687 | { |
688 | struct mmc_blk_ioc_data **idata = NULL; |
689 | struct mmc_ioc_cmd __user *cmds = user->cmds; |
690 | struct mmc_card *card; |
691 | struct mmc_queue *mq; |
692 | int err = 0, ioc_err = 0; |
693 | __u64 num_of_cmds; |
694 | unsigned int i, n; |
695 | struct request *req; |
696 | |
697 | if (copy_from_user(to: &num_of_cmds, from: &user->num_of_cmds, |
698 | n: sizeof(num_of_cmds))) |
699 | return -EFAULT; |
700 | |
701 | if (!num_of_cmds) |
702 | return 0; |
703 | |
704 | if (num_of_cmds > MMC_IOC_MAX_CMDS) |
705 | return -EINVAL; |
706 | |
707 | n = num_of_cmds; |
708 | idata = kcalloc(n, size: sizeof(*idata), GFP_KERNEL); |
709 | if (!idata) |
710 | return -ENOMEM; |
711 | |
712 | for (i = 0; i < n; i++) { |
713 | idata[i] = mmc_blk_ioctl_copy_from_user(user: &cmds[i]); |
714 | if (IS_ERR(ptr: idata[i])) { |
715 | err = PTR_ERR(ptr: idata[i]); |
716 | n = i; |
717 | goto cmd_err; |
718 | } |
719 | /* This will be NULL on non-RPMB ioctl():s */ |
720 | idata[i]->rpmb = rpmb; |
721 | } |
722 | |
723 | card = md->queue.card; |
724 | if (IS_ERR(ptr: card)) { |
725 | err = PTR_ERR(ptr: card); |
726 | goto cmd_err; |
727 | } |
728 | |
729 | |
730 | /* |
731 | * Dispatch the ioctl()s into the block request queue. |
732 | */ |
733 | mq = &md->queue; |
734 | req = blk_mq_alloc_request(q: mq->queue, |
735 | opf: idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, flags: 0); |
736 | if (IS_ERR(ptr: req)) { |
737 | err = PTR_ERR(ptr: req); |
738 | goto cmd_err; |
739 | } |
740 | req_to_mmc_queue_req(rq: req)->drv_op = |
741 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; |
742 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
743 | req_to_mmc_queue_req(rq: req)->drv_op_data = idata; |
744 | req_to_mmc_queue_req(rq: req)->ioc_count = n; |
745 | blk_execute_rq(rq: req, at_head: false); |
746 | ioc_err = req_to_mmc_queue_req(rq: req)->drv_op_result; |
747 | |
748 | /* copy to user if data and response */ |
749 | for (i = 0; i < n && !err; i++) |
750 | err = mmc_blk_ioctl_copy_to_user(ic_ptr: &cmds[i], idata: idata[i]); |
751 | |
752 | blk_mq_free_request(rq: req); |
753 | |
754 | cmd_err: |
755 | for (i = 0; i < n; i++) { |
756 | kfree(objp: idata[i]->buf); |
757 | kfree(objp: idata[i]); |
758 | } |
759 | kfree(objp: idata); |
760 | return ioc_err ? ioc_err : err; |
761 | } |
762 | |
763 | static int mmc_blk_check_blkdev(struct block_device *bdev) |
764 | { |
765 | /* |
766 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the |
767 | * whole block device, not on a partition. This prevents overspray |
768 | * between sibling partitions. |
769 | */ |
770 | if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) |
771 | return -EPERM; |
772 | return 0; |
773 | } |
774 | |
775 | static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode, |
776 | unsigned int cmd, unsigned long arg) |
777 | { |
778 | struct mmc_blk_data *md; |
779 | int ret; |
780 | |
781 | switch (cmd) { |
782 | case MMC_IOC_CMD: |
783 | ret = mmc_blk_check_blkdev(bdev); |
784 | if (ret) |
785 | return ret; |
786 | md = mmc_blk_get(disk: bdev->bd_disk); |
787 | if (!md) |
788 | return -EINVAL; |
789 | ret = mmc_blk_ioctl_cmd(md, |
790 | ic_ptr: (struct mmc_ioc_cmd __user *)arg, |
791 | NULL); |
792 | mmc_blk_put(md); |
793 | return ret; |
794 | case MMC_IOC_MULTI_CMD: |
795 | ret = mmc_blk_check_blkdev(bdev); |
796 | if (ret) |
797 | return ret; |
798 | md = mmc_blk_get(disk: bdev->bd_disk); |
799 | if (!md) |
800 | return -EINVAL; |
801 | ret = mmc_blk_ioctl_multi_cmd(md, |
802 | user: (struct mmc_ioc_multi_cmd __user *)arg, |
803 | NULL); |
804 | mmc_blk_put(md); |
805 | return ret; |
806 | default: |
807 | return -EINVAL; |
808 | } |
809 | } |
810 | |
811 | #ifdef CONFIG_COMPAT |
812 | static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode, |
813 | unsigned int cmd, unsigned long arg) |
814 | { |
815 | return mmc_blk_ioctl(bdev, mode, cmd, arg: (unsigned long) compat_ptr(uptr: arg)); |
816 | } |
817 | #endif |
818 | |
819 | static int mmc_blk_alternative_gpt_sector(struct gendisk *disk, |
820 | sector_t *sector) |
821 | { |
822 | struct mmc_blk_data *md; |
823 | int ret; |
824 | |
825 | md = mmc_blk_get(disk); |
826 | if (!md) |
827 | return -EINVAL; |
828 | |
829 | if (md->queue.card) |
830 | ret = mmc_card_alternative_gpt_sector(card: md->queue.card, sector); |
831 | else |
832 | ret = -ENODEV; |
833 | |
834 | mmc_blk_put(md); |
835 | |
836 | return ret; |
837 | } |
838 | |
839 | static const struct block_device_operations mmc_bdops = { |
840 | .open = mmc_blk_open, |
841 | .release = mmc_blk_release, |
842 | .getgeo = mmc_blk_getgeo, |
843 | .owner = THIS_MODULE, |
844 | .ioctl = mmc_blk_ioctl, |
845 | #ifdef CONFIG_COMPAT |
846 | .compat_ioctl = mmc_blk_compat_ioctl, |
847 | #endif |
848 | .alternative_gpt_sector = mmc_blk_alternative_gpt_sector, |
849 | }; |
850 | |
851 | static int mmc_blk_part_switch_pre(struct mmc_card *card, |
852 | unsigned int part_type) |
853 | { |
854 | int ret = 0; |
855 | |
856 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { |
857 | if (card->ext_csd.cmdq_en) { |
858 | ret = mmc_cmdq_disable(card); |
859 | if (ret) |
860 | return ret; |
861 | } |
862 | mmc_retune_pause(host: card->host); |
863 | } |
864 | |
865 | return ret; |
866 | } |
867 | |
868 | static int mmc_blk_part_switch_post(struct mmc_card *card, |
869 | unsigned int part_type) |
870 | { |
871 | int ret = 0; |
872 | |
873 | if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { |
874 | mmc_retune_unpause(host: card->host); |
875 | if (card->reenable_cmdq && !card->ext_csd.cmdq_en) |
876 | ret = mmc_cmdq_enable(card); |
877 | } |
878 | |
879 | return ret; |
880 | } |
881 | |
882 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
883 | unsigned int part_type) |
884 | { |
885 | int ret = 0; |
886 | struct mmc_blk_data *main_md = dev_get_drvdata(dev: &card->dev); |
887 | |
888 | if (main_md->part_curr == part_type) |
889 | return 0; |
890 | |
891 | if (mmc_card_mmc(card)) { |
892 | u8 part_config = card->ext_csd.part_config; |
893 | |
894 | ret = mmc_blk_part_switch_pre(card, part_type); |
895 | if (ret) |
896 | return ret; |
897 | |
898 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; |
899 | part_config |= part_type; |
900 | |
901 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
902 | EXT_CSD_PART_CONFIG, value: part_config, |
903 | timeout_ms: card->ext_csd.part_time); |
904 | if (ret) { |
905 | mmc_blk_part_switch_post(card, part_type); |
906 | return ret; |
907 | } |
908 | |
909 | card->ext_csd.part_config = part_config; |
910 | |
911 | ret = mmc_blk_part_switch_post(card, part_type: main_md->part_curr); |
912 | } |
913 | |
914 | main_md->part_curr = part_type; |
915 | return ret; |
916 | } |
917 | |
918 | static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) |
919 | { |
920 | int err; |
921 | u32 result; |
922 | __be32 *blocks; |
923 | |
924 | struct mmc_request mrq = {}; |
925 | struct mmc_command cmd = {}; |
926 | struct mmc_data data = {}; |
927 | |
928 | struct scatterlist sg; |
929 | |
930 | err = mmc_app_cmd(host: card->host, card); |
931 | if (err) |
932 | return err; |
933 | |
934 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; |
935 | cmd.arg = 0; |
936 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
937 | |
938 | data.blksz = 4; |
939 | data.blocks = 1; |
940 | data.flags = MMC_DATA_READ; |
941 | data.sg = &sg; |
942 | data.sg_len = 1; |
943 | mmc_set_data_timeout(data: &data, card); |
944 | |
945 | mrq.cmd = &cmd; |
946 | mrq.data = &data; |
947 | |
948 | blocks = kmalloc(size: 4, GFP_KERNEL); |
949 | if (!blocks) |
950 | return -ENOMEM; |
951 | |
952 | sg_init_one(&sg, blocks, 4); |
953 | |
954 | mmc_wait_for_req(host: card->host, mrq: &mrq); |
955 | |
956 | result = ntohl(*blocks); |
957 | kfree(objp: blocks); |
958 | |
959 | if (cmd.error || data.error) |
960 | return -EIO; |
961 | |
962 | *written_blocks = result; |
963 | |
964 | return 0; |
965 | } |
966 | |
967 | static unsigned int mmc_blk_clock_khz(struct mmc_host *host) |
968 | { |
969 | if (host->actual_clock) |
970 | return host->actual_clock / 1000; |
971 | |
972 | /* Clock may be subject to a divisor, fudge it by a factor of 2. */ |
973 | if (host->ios.clock) |
974 | return host->ios.clock / 2000; |
975 | |
976 | /* How can there be no clock */ |
977 | WARN_ON_ONCE(1); |
978 | return 100; /* 100 kHz is minimum possible value */ |
979 | } |
980 | |
981 | static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, |
982 | struct mmc_data *data) |
983 | { |
984 | unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); |
985 | unsigned int khz; |
986 | |
987 | if (data->timeout_clks) { |
988 | khz = mmc_blk_clock_khz(host); |
989 | ms += DIV_ROUND_UP(data->timeout_clks, khz); |
990 | } |
991 | |
992 | return ms; |
993 | } |
994 | |
995 | /* |
996 | * Attempts to reset the card and get back to the requested partition. |
997 | * Therefore any error here must result in cancelling the block layer |
998 | * request, it must not be reattempted without going through the mmc_blk |
999 | * partition sanity checks. |
1000 | */ |
1001 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, |
1002 | int type) |
1003 | { |
1004 | int err; |
1005 | struct mmc_blk_data *main_md = dev_get_drvdata(dev: &host->card->dev); |
1006 | |
1007 | if (md->reset_done & type) |
1008 | return -EEXIST; |
1009 | |
1010 | md->reset_done |= type; |
1011 | err = mmc_hw_reset(card: host->card); |
1012 | /* |
1013 | * A successful reset will leave the card in the main partition, but |
1014 | * upon failure it might not be, so set it to MMC_BLK_PART_INVALID |
1015 | * in that case. |
1016 | */ |
1017 | main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type; |
1018 | if (err) |
1019 | return err; |
1020 | /* Ensure we switch back to the correct partition */ |
1021 | if (mmc_blk_part_switch(card: host->card, part_type: md->part_type)) |
1022 | /* |
1023 | * We have failed to get back into the correct |
1024 | * partition, so we need to abort the whole request. |
1025 | */ |
1026 | return -ENODEV; |
1027 | return 0; |
1028 | } |
1029 | |
1030 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) |
1031 | { |
1032 | md->reset_done &= ~type; |
1033 | } |
1034 | |
1035 | /* |
1036 | * The non-block commands come back from the block layer after it queued it and |
1037 | * processed it with all other requests and then they get issued in this |
1038 | * function. |
1039 | */ |
1040 | static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) |
1041 | { |
1042 | struct mmc_queue_req *mq_rq; |
1043 | struct mmc_card *card = mq->card; |
1044 | struct mmc_blk_data *md = mq->blkdata; |
1045 | struct mmc_blk_ioc_data **idata; |
1046 | bool rpmb_ioctl; |
1047 | u8 **ext_csd; |
1048 | u32 status; |
1049 | int ret; |
1050 | int i; |
1051 | |
1052 | mq_rq = req_to_mmc_queue_req(rq: req); |
1053 | rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); |
1054 | |
1055 | switch (mq_rq->drv_op) { |
1056 | case MMC_DRV_OP_IOCTL: |
1057 | if (card->ext_csd.cmdq_en) { |
1058 | ret = mmc_cmdq_disable(card); |
1059 | if (ret) |
1060 | break; |
1061 | } |
1062 | fallthrough; |
1063 | case MMC_DRV_OP_IOCTL_RPMB: |
1064 | idata = mq_rq->drv_op_data; |
1065 | for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { |
1066 | ret = __mmc_blk_ioctl_cmd(card, md, idata: idata[i]); |
1067 | if (ret) |
1068 | break; |
1069 | } |
1070 | /* Always switch back to main area after RPMB access */ |
1071 | if (rpmb_ioctl) |
1072 | mmc_blk_part_switch(card, part_type: 0); |
1073 | else if (card->reenable_cmdq && !card->ext_csd.cmdq_en) |
1074 | mmc_cmdq_enable(card); |
1075 | break; |
1076 | case MMC_DRV_OP_BOOT_WP: |
1077 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, |
1078 | value: card->ext_csd.boot_ro_lock | |
1079 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, |
1080 | timeout_ms: card->ext_csd.part_time); |
1081 | if (ret) |
1082 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n" , |
1083 | md->disk->disk_name, ret); |
1084 | else |
1085 | card->ext_csd.boot_ro_lock |= |
1086 | EXT_CSD_BOOT_WP_B_PWR_WP_EN; |
1087 | break; |
1088 | case MMC_DRV_OP_GET_CARD_STATUS: |
1089 | ret = mmc_send_status(card, status: &status); |
1090 | if (!ret) |
1091 | ret = status; |
1092 | break; |
1093 | case MMC_DRV_OP_GET_EXT_CSD: |
1094 | ext_csd = mq_rq->drv_op_data; |
1095 | ret = mmc_get_ext_csd(card, new_ext_csd: ext_csd); |
1096 | break; |
1097 | default: |
1098 | pr_err("%s: unknown driver specific operation\n" , |
1099 | md->disk->disk_name); |
1100 | ret = -EINVAL; |
1101 | break; |
1102 | } |
1103 | mq_rq->drv_op_result = ret; |
1104 | blk_mq_end_request(rq: req, error: ret ? BLK_STS_IOERR : BLK_STS_OK); |
1105 | } |
1106 | |
1107 | static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req, |
1108 | int type, unsigned int erase_arg) |
1109 | { |
1110 | struct mmc_blk_data *md = mq->blkdata; |
1111 | struct mmc_card *card = md->queue.card; |
1112 | unsigned int from, nr; |
1113 | int err = 0; |
1114 | blk_status_t status = BLK_STS_OK; |
1115 | |
1116 | if (!mmc_can_erase(card)) { |
1117 | status = BLK_STS_NOTSUPP; |
1118 | goto fail; |
1119 | } |
1120 | |
1121 | from = blk_rq_pos(rq: req); |
1122 | nr = blk_rq_sectors(rq: req); |
1123 | |
1124 | do { |
1125 | err = 0; |
1126 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1127 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1128 | INAND_CMD38_ARG_EXT_CSD, |
1129 | value: erase_arg == MMC_TRIM_ARG ? |
1130 | INAND_CMD38_ARG_TRIM : |
1131 | INAND_CMD38_ARG_ERASE, |
1132 | timeout_ms: card->ext_csd.generic_cmd6_time); |
1133 | } |
1134 | if (!err) |
1135 | err = mmc_erase(card, from, nr, arg: erase_arg); |
1136 | } while (err == -EIO && !mmc_blk_reset(md, host: card->host, type)); |
1137 | if (err) |
1138 | status = BLK_STS_IOERR; |
1139 | else |
1140 | mmc_blk_reset_success(md, type); |
1141 | fail: |
1142 | blk_mq_end_request(rq: req, error: status); |
1143 | } |
1144 | |
1145 | static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req) |
1146 | { |
1147 | mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG); |
1148 | } |
1149 | |
1150 | static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
1151 | { |
1152 | struct mmc_blk_data *md = mq->blkdata; |
1153 | struct mmc_card *card = md->queue.card; |
1154 | unsigned int arg = card->erase_arg; |
1155 | |
1156 | if (mmc_card_broken_sd_discard(c: card)) |
1157 | arg = SD_ERASE_ARG; |
1158 | |
1159 | mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, erase_arg: arg); |
1160 | } |
1161 | |
1162 | static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
1163 | struct request *req) |
1164 | { |
1165 | struct mmc_blk_data *md = mq->blkdata; |
1166 | struct mmc_card *card = md->queue.card; |
1167 | unsigned int from, nr, arg; |
1168 | int err = 0, type = MMC_BLK_SECDISCARD; |
1169 | blk_status_t status = BLK_STS_OK; |
1170 | |
1171 | if (!(mmc_can_secure_erase_trim(card))) { |
1172 | status = BLK_STS_NOTSUPP; |
1173 | goto out; |
1174 | } |
1175 | |
1176 | from = blk_rq_pos(rq: req); |
1177 | nr = blk_rq_sectors(rq: req); |
1178 | |
1179 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) |
1180 | arg = MMC_SECURE_TRIM1_ARG; |
1181 | else |
1182 | arg = MMC_SECURE_ERASE_ARG; |
1183 | |
1184 | retry: |
1185 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1186 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1187 | INAND_CMD38_ARG_EXT_CSD, |
1188 | value: arg == MMC_SECURE_TRIM1_ARG ? |
1189 | INAND_CMD38_ARG_SECTRIM1 : |
1190 | INAND_CMD38_ARG_SECERASE, |
1191 | timeout_ms: card->ext_csd.generic_cmd6_time); |
1192 | if (err) |
1193 | goto out_retry; |
1194 | } |
1195 | |
1196 | err = mmc_erase(card, from, nr, arg); |
1197 | if (err == -EIO) |
1198 | goto out_retry; |
1199 | if (err) { |
1200 | status = BLK_STS_IOERR; |
1201 | goto out; |
1202 | } |
1203 | |
1204 | if (arg == MMC_SECURE_TRIM1_ARG) { |
1205 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1206 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1207 | INAND_CMD38_ARG_EXT_CSD, |
1208 | INAND_CMD38_ARG_SECTRIM2, |
1209 | timeout_ms: card->ext_csd.generic_cmd6_time); |
1210 | if (err) |
1211 | goto out_retry; |
1212 | } |
1213 | |
1214 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
1215 | if (err == -EIO) |
1216 | goto out_retry; |
1217 | if (err) { |
1218 | status = BLK_STS_IOERR; |
1219 | goto out; |
1220 | } |
1221 | } |
1222 | |
1223 | out_retry: |
1224 | if (err && !mmc_blk_reset(md, host: card->host, type)) |
1225 | goto retry; |
1226 | if (!err) |
1227 | mmc_blk_reset_success(md, type); |
1228 | out: |
1229 | blk_mq_end_request(rq: req, error: status); |
1230 | } |
1231 | |
1232 | static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
1233 | { |
1234 | struct mmc_blk_data *md = mq->blkdata; |
1235 | struct mmc_card *card = md->queue.card; |
1236 | int ret = 0; |
1237 | |
1238 | ret = mmc_flush_cache(host: card->host); |
1239 | blk_mq_end_request(rq: req, error: ret ? BLK_STS_IOERR : BLK_STS_OK); |
1240 | } |
1241 | |
1242 | /* |
1243 | * Reformat current write as a reliable write, supporting |
1244 | * both legacy and the enhanced reliable write MMC cards. |
1245 | * In each transfer we'll handle only as much as a single |
1246 | * reliable write can handle, thus finish the request in |
1247 | * partial completions. |
1248 | */ |
1249 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, |
1250 | struct mmc_card *card, |
1251 | struct request *req) |
1252 | { |
1253 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { |
1254 | /* Legacy mode imposes restrictions on transfers. */ |
1255 | if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) |
1256 | brq->data.blocks = 1; |
1257 | |
1258 | if (brq->data.blocks > card->ext_csd.rel_sectors) |
1259 | brq->data.blocks = card->ext_csd.rel_sectors; |
1260 | else if (brq->data.blocks < card->ext_csd.rel_sectors) |
1261 | brq->data.blocks = 1; |
1262 | } |
1263 | } |
1264 | |
1265 | #define CMD_ERRORS_EXCL_OOR \ |
1266 | (R1_ADDRESS_ERROR | /* Misaligned address */ \ |
1267 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ |
1268 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ |
1269 | R1_CARD_ECC_FAILED | /* Card ECC failed */ \ |
1270 | R1_CC_ERROR | /* Card controller error */ \ |
1271 | R1_ERROR) /* General/unknown error */ |
1272 | |
1273 | #define CMD_ERRORS \ |
1274 | (CMD_ERRORS_EXCL_OOR | \ |
1275 | R1_OUT_OF_RANGE) /* Command argument out of range */ \ |
1276 | |
1277 | static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) |
1278 | { |
1279 | u32 val; |
1280 | |
1281 | /* |
1282 | * Per the SD specification(physical layer version 4.10)[1], |
1283 | * section 4.3.3, it explicitly states that "When the last |
1284 | * block of user area is read using CMD18, the host should |
1285 | * ignore OUT_OF_RANGE error that may occur even the sequence |
1286 | * is correct". And JESD84-B51 for eMMC also has a similar |
1287 | * statement on section 6.8.3. |
1288 | * |
1289 | * Multiple block read/write could be done by either predefined |
1290 | * method, namely CMD23, or open-ending mode. For open-ending mode, |
1291 | * we should ignore the OUT_OF_RANGE error as it's normal behaviour. |
1292 | * |
1293 | * However the spec[1] doesn't tell us whether we should also |
1294 | * ignore that for predefined method. But per the spec[1], section |
1295 | * 4.15 Set Block Count Command, it says"If illegal block count |
1296 | * is set, out of range error will be indicated during read/write |
1297 | * operation (For example, data transfer is stopped at user area |
1298 | * boundary)." In another word, we could expect a out of range error |
1299 | * in the response for the following CMD18/25. And if argument of |
1300 | * CMD23 + the argument of CMD18/25 exceed the max number of blocks, |
1301 | * we could also expect to get a -ETIMEDOUT or any error number from |
1302 | * the host drivers due to missing data response(for write)/data(for |
1303 | * read), as the cards will stop the data transfer by itself per the |
1304 | * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. |
1305 | */ |
1306 | |
1307 | if (!brq->stop.error) { |
1308 | bool oor_with_open_end; |
1309 | /* If there is no error yet, check R1 response */ |
1310 | |
1311 | val = brq->stop.resp[0] & CMD_ERRORS; |
1312 | oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; |
1313 | |
1314 | if (val && !oor_with_open_end) |
1315 | brq->stop.error = -EIO; |
1316 | } |
1317 | } |
1318 | |
1319 | static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, |
1320 | int recovery_mode, bool *do_rel_wr_p, |
1321 | bool *do_data_tag_p) |
1322 | { |
1323 | struct mmc_blk_data *md = mq->blkdata; |
1324 | struct mmc_card *card = md->queue.card; |
1325 | struct mmc_blk_request *brq = &mqrq->brq; |
1326 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
1327 | bool do_rel_wr, do_data_tag; |
1328 | |
1329 | /* |
1330 | * Reliable writes are used to implement Forced Unit Access and |
1331 | * are supported only on MMCs. |
1332 | */ |
1333 | do_rel_wr = (req->cmd_flags & REQ_FUA) && |
1334 | rq_data_dir(req) == WRITE && |
1335 | (md->flags & MMC_BLK_REL_WR); |
1336 | |
1337 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
1338 | |
1339 | mmc_crypto_prepare_req(mqrq); |
1340 | |
1341 | brq->mrq.data = &brq->data; |
1342 | brq->mrq.tag = req->tag; |
1343 | |
1344 | brq->stop.opcode = MMC_STOP_TRANSMISSION; |
1345 | brq->stop.arg = 0; |
1346 | |
1347 | if (rq_data_dir(req) == READ) { |
1348 | brq->data.flags = MMC_DATA_READ; |
1349 | brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
1350 | } else { |
1351 | brq->data.flags = MMC_DATA_WRITE; |
1352 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; |
1353 | } |
1354 | |
1355 | brq->data.blksz = 512; |
1356 | brq->data.blocks = blk_rq_sectors(rq: req); |
1357 | brq->data.blk_addr = blk_rq_pos(rq: req); |
1358 | |
1359 | /* |
1360 | * The command queue supports 2 priorities: "high" (1) and "simple" (0). |
1361 | * The eMMC will give "high" priority tasks priority over "simple" |
1362 | * priority tasks. Here we always set "simple" priority by not setting |
1363 | * MMC_DATA_PRIO. |
1364 | */ |
1365 | |
1366 | /* |
1367 | * The block layer doesn't support all sector count |
1368 | * restrictions, so we need to be prepared for too big |
1369 | * requests. |
1370 | */ |
1371 | if (brq->data.blocks > card->host->max_blk_count) |
1372 | brq->data.blocks = card->host->max_blk_count; |
1373 | |
1374 | if (brq->data.blocks > 1) { |
1375 | /* |
1376 | * Some SD cards in SPI mode return a CRC error or even lock up |
1377 | * completely when trying to read the last block using a |
1378 | * multiblock read command. |
1379 | */ |
1380 | if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && |
1381 | (blk_rq_pos(rq: req) + blk_rq_sectors(rq: req) == |
1382 | get_capacity(disk: md->disk))) |
1383 | brq->data.blocks--; |
1384 | |
1385 | /* |
1386 | * After a read error, we redo the request one (native) sector |
1387 | * at a time in order to accurately determine which |
1388 | * sectors can be read successfully. |
1389 | */ |
1390 | if (recovery_mode) |
1391 | brq->data.blocks = queue_physical_block_size(q: mq->queue) >> 9; |
1392 | |
1393 | /* |
1394 | * Some controllers have HW issues while operating |
1395 | * in multiple I/O mode |
1396 | */ |
1397 | if (card->host->ops->multi_io_quirk) |
1398 | brq->data.blocks = card->host->ops->multi_io_quirk(card, |
1399 | (rq_data_dir(req) == READ) ? |
1400 | MMC_DATA_READ : MMC_DATA_WRITE, |
1401 | brq->data.blocks); |
1402 | } |
1403 | |
1404 | if (do_rel_wr) { |
1405 | mmc_apply_rel_rw(brq, card, req); |
1406 | brq->data.flags |= MMC_DATA_REL_WR; |
1407 | } |
1408 | |
1409 | /* |
1410 | * Data tag is used only during writing meta data to speed |
1411 | * up write and any subsequent read of this meta data |
1412 | */ |
1413 | do_data_tag = card->ext_csd.data_tag_unit_size && |
1414 | (req->cmd_flags & REQ_META) && |
1415 | (rq_data_dir(req) == WRITE) && |
1416 | ((brq->data.blocks * brq->data.blksz) >= |
1417 | card->ext_csd.data_tag_unit_size); |
1418 | |
1419 | if (do_data_tag) |
1420 | brq->data.flags |= MMC_DATA_DAT_TAG; |
1421 | |
1422 | mmc_set_data_timeout(data: &brq->data, card); |
1423 | |
1424 | brq->data.sg = mqrq->sg; |
1425 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); |
1426 | |
1427 | /* |
1428 | * Adjust the sg list so it is the same size as the |
1429 | * request. |
1430 | */ |
1431 | if (brq->data.blocks != blk_rq_sectors(rq: req)) { |
1432 | int i, data_size = brq->data.blocks << 9; |
1433 | struct scatterlist *sg; |
1434 | |
1435 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { |
1436 | data_size -= sg->length; |
1437 | if (data_size <= 0) { |
1438 | sg->length += data_size; |
1439 | i++; |
1440 | break; |
1441 | } |
1442 | } |
1443 | brq->data.sg_len = i; |
1444 | } |
1445 | |
1446 | if (do_rel_wr_p) |
1447 | *do_rel_wr_p = do_rel_wr; |
1448 | |
1449 | if (do_data_tag_p) |
1450 | *do_data_tag_p = do_data_tag; |
1451 | } |
1452 | |
1453 | #define MMC_CQE_RETRIES 2 |
1454 | |
1455 | static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) |
1456 | { |
1457 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1458 | struct mmc_request *mrq = &mqrq->brq.mrq; |
1459 | struct request_queue *q = req->q; |
1460 | struct mmc_host *host = mq->card->host; |
1461 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
1462 | unsigned long flags; |
1463 | bool put_card; |
1464 | int err; |
1465 | |
1466 | mmc_cqe_post_req(host, mrq); |
1467 | |
1468 | if (mrq->cmd && mrq->cmd->error) |
1469 | err = mrq->cmd->error; |
1470 | else if (mrq->data && mrq->data->error) |
1471 | err = mrq->data->error; |
1472 | else |
1473 | err = 0; |
1474 | |
1475 | if (err) { |
1476 | if (mqrq->retries++ < MMC_CQE_RETRIES) |
1477 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
1478 | else |
1479 | blk_mq_end_request(rq: req, BLK_STS_IOERR); |
1480 | } else if (mrq->data) { |
1481 | if (blk_update_request(rq: req, BLK_STS_OK, nr_bytes: mrq->data->bytes_xfered)) |
1482 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
1483 | else |
1484 | __blk_mq_end_request(rq: req, BLK_STS_OK); |
1485 | } else { |
1486 | blk_mq_end_request(rq: req, BLK_STS_OK); |
1487 | } |
1488 | |
1489 | spin_lock_irqsave(&mq->lock, flags); |
1490 | |
1491 | mq->in_flight[issue_type] -= 1; |
1492 | |
1493 | put_card = (mmc_tot_in_flight(mq) == 0); |
1494 | |
1495 | mmc_cqe_check_busy(mq); |
1496 | |
1497 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
1498 | |
1499 | if (!mq->cqe_busy) |
1500 | blk_mq_run_hw_queues(q, async: true); |
1501 | |
1502 | if (put_card) |
1503 | mmc_put_card(card: mq->card, ctx: &mq->ctx); |
1504 | } |
1505 | |
1506 | void mmc_blk_cqe_recovery(struct mmc_queue *mq) |
1507 | { |
1508 | struct mmc_card *card = mq->card; |
1509 | struct mmc_host *host = card->host; |
1510 | int err; |
1511 | |
1512 | pr_debug("%s: CQE recovery start\n" , mmc_hostname(host)); |
1513 | |
1514 | err = mmc_cqe_recovery(host); |
1515 | if (err) |
1516 | mmc_blk_reset(md: mq->blkdata, host, MMC_BLK_CQE_RECOVERY); |
1517 | mmc_blk_reset_success(md: mq->blkdata, MMC_BLK_CQE_RECOVERY); |
1518 | |
1519 | pr_debug("%s: CQE recovery done\n" , mmc_hostname(host)); |
1520 | } |
1521 | |
1522 | static void mmc_blk_cqe_req_done(struct mmc_request *mrq) |
1523 | { |
1524 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, |
1525 | brq.mrq); |
1526 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
1527 | struct request_queue *q = req->q; |
1528 | struct mmc_queue *mq = q->queuedata; |
1529 | |
1530 | /* |
1531 | * Block layer timeouts race with completions which means the normal |
1532 | * completion path cannot be used during recovery. |
1533 | */ |
1534 | if (mq->in_recovery) |
1535 | mmc_blk_cqe_complete_rq(mq, req); |
1536 | else if (likely(!blk_should_fake_timeout(req->q))) |
1537 | blk_mq_complete_request(rq: req); |
1538 | } |
1539 | |
1540 | static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) |
1541 | { |
1542 | mrq->done = mmc_blk_cqe_req_done; |
1543 | mrq->recovery_notifier = mmc_cqe_recovery_notifier; |
1544 | |
1545 | return mmc_cqe_start_req(host, mrq); |
1546 | } |
1547 | |
1548 | static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, |
1549 | struct request *req) |
1550 | { |
1551 | struct mmc_blk_request *brq = &mqrq->brq; |
1552 | |
1553 | memset(brq, 0, sizeof(*brq)); |
1554 | |
1555 | brq->mrq.cmd = &brq->cmd; |
1556 | brq->mrq.tag = req->tag; |
1557 | |
1558 | return &brq->mrq; |
1559 | } |
1560 | |
1561 | static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) |
1562 | { |
1563 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1564 | struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); |
1565 | |
1566 | mrq->cmd->opcode = MMC_SWITCH; |
1567 | mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | |
1568 | (EXT_CSD_FLUSH_CACHE << 16) | |
1569 | (1 << 8) | |
1570 | EXT_CSD_CMD_SET_NORMAL; |
1571 | mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; |
1572 | |
1573 | return mmc_blk_cqe_start_req(host: mq->card->host, mrq); |
1574 | } |
1575 | |
1576 | static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) |
1577 | { |
1578 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1579 | struct mmc_host *host = mq->card->host; |
1580 | int err; |
1581 | |
1582 | mmc_blk_rw_rq_prep(mqrq, card: mq->card, recovery_mode: 0, mq); |
1583 | mqrq->brq.mrq.done = mmc_blk_hsq_req_done; |
1584 | mmc_pre_req(host, mrq: &mqrq->brq.mrq); |
1585 | |
1586 | err = mmc_cqe_start_req(host, mrq: &mqrq->brq.mrq); |
1587 | if (err) |
1588 | mmc_post_req(host, mrq: &mqrq->brq.mrq, err); |
1589 | |
1590 | return err; |
1591 | } |
1592 | |
1593 | static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) |
1594 | { |
1595 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1596 | struct mmc_host *host = mq->card->host; |
1597 | |
1598 | if (host->hsq_enabled) |
1599 | return mmc_blk_hsq_issue_rw_rq(mq, req); |
1600 | |
1601 | mmc_blk_data_prep(mq, mqrq, recovery_mode: 0, NULL, NULL); |
1602 | |
1603 | return mmc_blk_cqe_start_req(host: mq->card->host, mrq: &mqrq->brq.mrq); |
1604 | } |
1605 | |
1606 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
1607 | struct mmc_card *card, |
1608 | int recovery_mode, |
1609 | struct mmc_queue *mq) |
1610 | { |
1611 | u32 readcmd, writecmd; |
1612 | struct mmc_blk_request *brq = &mqrq->brq; |
1613 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
1614 | struct mmc_blk_data *md = mq->blkdata; |
1615 | bool do_rel_wr, do_data_tag; |
1616 | |
1617 | mmc_blk_data_prep(mq, mqrq, recovery_mode, do_rel_wr_p: &do_rel_wr, do_data_tag_p: &do_data_tag); |
1618 | |
1619 | brq->mrq.cmd = &brq->cmd; |
1620 | |
1621 | brq->cmd.arg = blk_rq_pos(rq: req); |
1622 | if (!mmc_card_blockaddr(card)) |
1623 | brq->cmd.arg <<= 9; |
1624 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
1625 | |
1626 | if (brq->data.blocks > 1 || do_rel_wr) { |
1627 | /* SPI multiblock writes terminate using a special |
1628 | * token, not a STOP_TRANSMISSION request. |
1629 | */ |
1630 | if (!mmc_host_is_spi(card->host) || |
1631 | rq_data_dir(req) == READ) |
1632 | brq->mrq.stop = &brq->stop; |
1633 | readcmd = MMC_READ_MULTIPLE_BLOCK; |
1634 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; |
1635 | } else { |
1636 | brq->mrq.stop = NULL; |
1637 | readcmd = MMC_READ_SINGLE_BLOCK; |
1638 | writecmd = MMC_WRITE_BLOCK; |
1639 | } |
1640 | brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; |
1641 | |
1642 | /* |
1643 | * Pre-defined multi-block transfers are preferable to |
1644 | * open ended-ones (and necessary for reliable writes). |
1645 | * However, it is not sufficient to just send CMD23, |
1646 | * and avoid the final CMD12, as on an error condition |
1647 | * CMD12 (stop) needs to be sent anyway. This, coupled |
1648 | * with Auto-CMD23 enhancements provided by some |
1649 | * hosts, means that the complexity of dealing |
1650 | * with this is best left to the host. If CMD23 is |
1651 | * supported by card and host, we'll fill sbc in and let |
1652 | * the host deal with handling it correctly. This means |
1653 | * that for hosts that don't expose MMC_CAP_CMD23, no |
1654 | * change of behavior will be observed. |
1655 | * |
1656 | * N.B: Some MMC cards experience perf degradation. |
1657 | * We'll avoid using CMD23-bounded multiblock writes for |
1658 | * these, while retaining features like reliable writes. |
1659 | */ |
1660 | if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(opcode: brq->cmd.opcode) && |
1661 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || |
1662 | do_data_tag)) { |
1663 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; |
1664 | brq->sbc.arg = brq->data.blocks | |
1665 | (do_rel_wr ? (1 << 31) : 0) | |
1666 | (do_data_tag ? (1 << 29) : 0); |
1667 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
1668 | brq->mrq.sbc = &brq->sbc; |
1669 | } |
1670 | } |
1671 | |
1672 | #define MMC_MAX_RETRIES 5 |
1673 | #define MMC_DATA_RETRIES 2 |
1674 | #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) |
1675 | |
1676 | static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) |
1677 | { |
1678 | struct mmc_command cmd = { |
1679 | .opcode = MMC_STOP_TRANSMISSION, |
1680 | .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, |
1681 | /* Some hosts wait for busy anyway, so provide a busy timeout */ |
1682 | .busy_timeout = timeout, |
1683 | }; |
1684 | |
1685 | return mmc_wait_for_cmd(host: card->host, cmd: &cmd, retries: 5); |
1686 | } |
1687 | |
1688 | static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) |
1689 | { |
1690 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1691 | struct mmc_blk_request *brq = &mqrq->brq; |
1692 | unsigned int timeout = mmc_blk_data_timeout_ms(host: card->host, data: &brq->data); |
1693 | int err; |
1694 | |
1695 | mmc_retune_hold_now(host: card->host); |
1696 | |
1697 | mmc_blk_send_stop(card, timeout); |
1698 | |
1699 | err = mmc_poll_for_busy(card, timeout_ms: timeout, retry_crc_err: false, busy_cmd: MMC_BUSY_IO); |
1700 | |
1701 | mmc_retune_release(host: card->host); |
1702 | |
1703 | return err; |
1704 | } |
1705 | |
1706 | #define MMC_READ_SINGLE_RETRIES 2 |
1707 | |
1708 | /* Single (native) sector read during recovery */ |
1709 | static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) |
1710 | { |
1711 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1712 | struct mmc_request *mrq = &mqrq->brq.mrq; |
1713 | struct mmc_card *card = mq->card; |
1714 | struct mmc_host *host = card->host; |
1715 | blk_status_t error = BLK_STS_OK; |
1716 | size_t bytes_per_read = queue_physical_block_size(q: mq->queue); |
1717 | |
1718 | do { |
1719 | u32 status; |
1720 | int err; |
1721 | int retries = 0; |
1722 | |
1723 | while (retries++ <= MMC_READ_SINGLE_RETRIES) { |
1724 | mmc_blk_rw_rq_prep(mqrq, card, recovery_mode: 1, mq); |
1725 | |
1726 | mmc_wait_for_req(host, mrq); |
1727 | |
1728 | err = mmc_send_status(card, status: &status); |
1729 | if (err) |
1730 | goto error_exit; |
1731 | |
1732 | if (!mmc_host_is_spi(host) && |
1733 | !mmc_ready_for_data(status)) { |
1734 | err = mmc_blk_fix_state(card, req); |
1735 | if (err) |
1736 | goto error_exit; |
1737 | } |
1738 | |
1739 | if (!mrq->cmd->error) |
1740 | break; |
1741 | } |
1742 | |
1743 | if (mrq->cmd->error || |
1744 | mrq->data->error || |
1745 | (!mmc_host_is_spi(host) && |
1746 | (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) |
1747 | error = BLK_STS_IOERR; |
1748 | else |
1749 | error = BLK_STS_OK; |
1750 | |
1751 | } while (blk_update_request(rq: req, error, nr_bytes: bytes_per_read)); |
1752 | |
1753 | return; |
1754 | |
1755 | error_exit: |
1756 | mrq->data->bytes_xfered = 0; |
1757 | blk_update_request(rq: req, BLK_STS_IOERR, nr_bytes: bytes_per_read); |
1758 | /* Let it try the remaining request again */ |
1759 | if (mqrq->retries > MMC_MAX_RETRIES - 1) |
1760 | mqrq->retries = MMC_MAX_RETRIES - 1; |
1761 | } |
1762 | |
1763 | static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) |
1764 | { |
1765 | return !!brq->mrq.sbc; |
1766 | } |
1767 | |
1768 | static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) |
1769 | { |
1770 | return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; |
1771 | } |
1772 | |
1773 | /* |
1774 | * Check for errors the host controller driver might not have seen such as |
1775 | * response mode errors or invalid card state. |
1776 | */ |
1777 | static bool mmc_blk_status_error(struct request *req, u32 status) |
1778 | { |
1779 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1780 | struct mmc_blk_request *brq = &mqrq->brq; |
1781 | struct mmc_queue *mq = req->q->queuedata; |
1782 | u32 stop_err_bits; |
1783 | |
1784 | if (mmc_host_is_spi(mq->card->host)) |
1785 | return false; |
1786 | |
1787 | stop_err_bits = mmc_blk_stop_err_bits(brq); |
1788 | |
1789 | return brq->cmd.resp[0] & CMD_ERRORS || |
1790 | brq->stop.resp[0] & stop_err_bits || |
1791 | status & stop_err_bits || |
1792 | (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); |
1793 | } |
1794 | |
1795 | static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) |
1796 | { |
1797 | return !brq->sbc.error && !brq->cmd.error && |
1798 | !(brq->cmd.resp[0] & CMD_ERRORS); |
1799 | } |
1800 | |
1801 | /* |
1802 | * Requests are completed by mmc_blk_mq_complete_rq() which sets simple |
1803 | * policy: |
1804 | * 1. A request that has transferred at least some data is considered |
1805 | * successful and will be requeued if there is remaining data to |
1806 | * transfer. |
1807 | * 2. Otherwise the number of retries is incremented and the request |
1808 | * will be requeued if there are remaining retries. |
1809 | * 3. Otherwise the request will be errored out. |
1810 | * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and |
1811 | * mqrq->retries. So there are only 4 possible actions here: |
1812 | * 1. do not accept the bytes_xfered value i.e. set it to zero |
1813 | * 2. change mqrq->retries to determine the number of retries |
1814 | * 3. try to reset the card |
1815 | * 4. read one sector at a time |
1816 | */ |
1817 | static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) |
1818 | { |
1819 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; |
1820 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1821 | struct mmc_blk_request *brq = &mqrq->brq; |
1822 | struct mmc_blk_data *md = mq->blkdata; |
1823 | struct mmc_card *card = mq->card; |
1824 | u32 status; |
1825 | u32 blocks; |
1826 | int err; |
1827 | |
1828 | /* |
1829 | * Some errors the host driver might not have seen. Set the number of |
1830 | * bytes transferred to zero in that case. |
1831 | */ |
1832 | err = __mmc_send_status(card, status: &status, retries: 0); |
1833 | if (err || mmc_blk_status_error(req, status)) |
1834 | brq->data.bytes_xfered = 0; |
1835 | |
1836 | mmc_retune_release(host: card->host); |
1837 | |
1838 | /* |
1839 | * Try again to get the status. This also provides an opportunity for |
1840 | * re-tuning. |
1841 | */ |
1842 | if (err) |
1843 | err = __mmc_send_status(card, status: &status, retries: 0); |
1844 | |
1845 | /* |
1846 | * Nothing more to do after the number of bytes transferred has been |
1847 | * updated and there is no card. |
1848 | */ |
1849 | if (err && mmc_detect_card_removed(host: card->host)) |
1850 | return; |
1851 | |
1852 | /* Try to get back to "tran" state */ |
1853 | if (!mmc_host_is_spi(mq->card->host) && |
1854 | (err || !mmc_ready_for_data(status))) |
1855 | err = mmc_blk_fix_state(card: mq->card, req); |
1856 | |
1857 | /* |
1858 | * Special case for SD cards where the card might record the number of |
1859 | * blocks written. |
1860 | */ |
1861 | if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && |
1862 | rq_data_dir(req) == WRITE) { |
1863 | if (mmc_sd_num_wr_blocks(card, written_blocks: &blocks)) |
1864 | brq->data.bytes_xfered = 0; |
1865 | else |
1866 | brq->data.bytes_xfered = blocks << 9; |
1867 | } |
1868 | |
1869 | /* Reset if the card is in a bad state */ |
1870 | if (!mmc_host_is_spi(mq->card->host) && |
1871 | err && mmc_blk_reset(md, host: card->host, type)) { |
1872 | pr_err("%s: recovery failed!\n" , req->q->disk->disk_name); |
1873 | mqrq->retries = MMC_NO_RETRIES; |
1874 | return; |
1875 | } |
1876 | |
1877 | /* |
1878 | * If anything was done, just return and if there is anything remaining |
1879 | * on the request it will get requeued. |
1880 | */ |
1881 | if (brq->data.bytes_xfered) |
1882 | return; |
1883 | |
1884 | /* Reset before last retry */ |
1885 | if (mqrq->retries + 1 == MMC_MAX_RETRIES && |
1886 | mmc_blk_reset(md, host: card->host, type)) |
1887 | return; |
1888 | |
1889 | /* Command errors fail fast, so use all MMC_MAX_RETRIES */ |
1890 | if (brq->sbc.error || brq->cmd.error) |
1891 | return; |
1892 | |
1893 | /* Reduce the remaining retries for data errors */ |
1894 | if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { |
1895 | mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; |
1896 | return; |
1897 | } |
1898 | |
1899 | if (rq_data_dir(req) == READ && brq->data.blocks > |
1900 | queue_physical_block_size(q: mq->queue) >> 9) { |
1901 | /* Read one (native) sector at a time */ |
1902 | mmc_blk_read_single(mq, req); |
1903 | return; |
1904 | } |
1905 | } |
1906 | |
1907 | static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) |
1908 | { |
1909 | mmc_blk_eval_resp_error(brq); |
1910 | |
1911 | return brq->sbc.error || brq->cmd.error || brq->stop.error || |
1912 | brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; |
1913 | } |
1914 | |
1915 | static int mmc_spi_err_check(struct mmc_card *card) |
1916 | { |
1917 | u32 status = 0; |
1918 | int err; |
1919 | |
1920 | /* |
1921 | * SPI does not have a TRAN state we have to wait on, instead the |
1922 | * card is ready again when it no longer holds the line LOW. |
1923 | * We still have to ensure two things here before we know the write |
1924 | * was successful: |
1925 | * 1. The card has not disconnected during busy and we actually read our |
1926 | * own pull-up, thinking it was still connected, so ensure it |
1927 | * still responds. |
1928 | * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a |
1929 | * just reconnected card after being disconnected during busy. |
1930 | */ |
1931 | err = __mmc_send_status(card, status: &status, retries: 0); |
1932 | if (err) |
1933 | return err; |
1934 | /* All R1 and R2 bits of SPI are errors in our case */ |
1935 | if (status) |
1936 | return -EIO; |
1937 | return 0; |
1938 | } |
1939 | |
1940 | static int mmc_blk_busy_cb(void *cb_data, bool *busy) |
1941 | { |
1942 | struct mmc_blk_busy_data *data = cb_data; |
1943 | u32 status = 0; |
1944 | int err; |
1945 | |
1946 | err = mmc_send_status(card: data->card, status: &status); |
1947 | if (err) |
1948 | return err; |
1949 | |
1950 | /* Accumulate response error bits. */ |
1951 | data->status |= status; |
1952 | |
1953 | *busy = !mmc_ready_for_data(status); |
1954 | return 0; |
1955 | } |
1956 | |
1957 | static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) |
1958 | { |
1959 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1960 | struct mmc_blk_busy_data cb_data; |
1961 | int err; |
1962 | |
1963 | if (rq_data_dir(req) == READ) |
1964 | return 0; |
1965 | |
1966 | if (mmc_host_is_spi(card->host)) { |
1967 | err = mmc_spi_err_check(card); |
1968 | if (err) |
1969 | mqrq->brq.data.bytes_xfered = 0; |
1970 | return err; |
1971 | } |
1972 | |
1973 | cb_data.card = card; |
1974 | cb_data.status = 0; |
1975 | err = __mmc_poll_for_busy(host: card->host, period_us: 0, MMC_BLK_TIMEOUT_MS, |
1976 | busy_cb: &mmc_blk_busy_cb, cb_data: &cb_data); |
1977 | |
1978 | /* |
1979 | * Do not assume data transferred correctly if there are any error bits |
1980 | * set. |
1981 | */ |
1982 | if (cb_data.status & mmc_blk_stop_err_bits(brq: &mqrq->brq)) { |
1983 | mqrq->brq.data.bytes_xfered = 0; |
1984 | err = err ? err : -EIO; |
1985 | } |
1986 | |
1987 | /* Copy the exception bit so it will be seen later on */ |
1988 | if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT) |
1989 | mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; |
1990 | |
1991 | return err; |
1992 | } |
1993 | |
1994 | static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, |
1995 | struct request *req) |
1996 | { |
1997 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; |
1998 | |
1999 | mmc_blk_reset_success(md: mq->blkdata, type); |
2000 | } |
2001 | |
2002 | static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) |
2003 | { |
2004 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2005 | unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; |
2006 | |
2007 | if (nr_bytes) { |
2008 | if (blk_update_request(rq: req, BLK_STS_OK, nr_bytes)) |
2009 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
2010 | else |
2011 | __blk_mq_end_request(rq: req, BLK_STS_OK); |
2012 | } else if (!blk_rq_bytes(rq: req)) { |
2013 | __blk_mq_end_request(rq: req, BLK_STS_IOERR); |
2014 | } else if (mqrq->retries++ < MMC_MAX_RETRIES) { |
2015 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
2016 | } else { |
2017 | if (mmc_card_removed(mq->card)) |
2018 | req->rq_flags |= RQF_QUIET; |
2019 | blk_mq_end_request(rq: req, BLK_STS_IOERR); |
2020 | } |
2021 | } |
2022 | |
2023 | static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, |
2024 | struct mmc_queue_req *mqrq) |
2025 | { |
2026 | return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && |
2027 | (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || |
2028 | mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); |
2029 | } |
2030 | |
2031 | static void mmc_blk_urgent_bkops(struct mmc_queue *mq, |
2032 | struct mmc_queue_req *mqrq) |
2033 | { |
2034 | if (mmc_blk_urgent_bkops_needed(mq, mqrq)) |
2035 | mmc_run_bkops(card: mq->card); |
2036 | } |
2037 | |
2038 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq) |
2039 | { |
2040 | struct mmc_queue_req *mqrq = |
2041 | container_of(mrq, struct mmc_queue_req, brq.mrq); |
2042 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
2043 | struct request_queue *q = req->q; |
2044 | struct mmc_queue *mq = q->queuedata; |
2045 | struct mmc_host *host = mq->card->host; |
2046 | unsigned long flags; |
2047 | |
2048 | if (mmc_blk_rq_error(brq: &mqrq->brq) || |
2049 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { |
2050 | spin_lock_irqsave(&mq->lock, flags); |
2051 | mq->recovery_needed = true; |
2052 | mq->recovery_req = req; |
2053 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2054 | |
2055 | host->cqe_ops->cqe_recovery_start(host); |
2056 | |
2057 | schedule_work(work: &mq->recovery_work); |
2058 | return; |
2059 | } |
2060 | |
2061 | mmc_blk_rw_reset_success(mq, req); |
2062 | |
2063 | /* |
2064 | * Block layer timeouts race with completions which means the normal |
2065 | * completion path cannot be used during recovery. |
2066 | */ |
2067 | if (mq->in_recovery) |
2068 | mmc_blk_cqe_complete_rq(mq, req); |
2069 | else if (likely(!blk_should_fake_timeout(req->q))) |
2070 | blk_mq_complete_request(rq: req); |
2071 | } |
2072 | |
2073 | void mmc_blk_mq_complete(struct request *req) |
2074 | { |
2075 | struct mmc_queue *mq = req->q->queuedata; |
2076 | struct mmc_host *host = mq->card->host; |
2077 | |
2078 | if (host->cqe_enabled) |
2079 | mmc_blk_cqe_complete_rq(mq, req); |
2080 | else if (likely(!blk_should_fake_timeout(req->q))) |
2081 | mmc_blk_mq_complete_rq(mq, req); |
2082 | } |
2083 | |
2084 | static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, |
2085 | struct request *req) |
2086 | { |
2087 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2088 | struct mmc_host *host = mq->card->host; |
2089 | |
2090 | if (mmc_blk_rq_error(brq: &mqrq->brq) || |
2091 | mmc_blk_card_busy(card: mq->card, req)) { |
2092 | mmc_blk_mq_rw_recovery(mq, req); |
2093 | } else { |
2094 | mmc_blk_rw_reset_success(mq, req); |
2095 | mmc_retune_release(host); |
2096 | } |
2097 | |
2098 | mmc_blk_urgent_bkops(mq, mqrq); |
2099 | } |
2100 | |
2101 | static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) |
2102 | { |
2103 | unsigned long flags; |
2104 | bool put_card; |
2105 | |
2106 | spin_lock_irqsave(&mq->lock, flags); |
2107 | |
2108 | mq->in_flight[issue_type] -= 1; |
2109 | |
2110 | put_card = (mmc_tot_in_flight(mq) == 0); |
2111 | |
2112 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2113 | |
2114 | if (put_card) |
2115 | mmc_put_card(card: mq->card, ctx: &mq->ctx); |
2116 | } |
2117 | |
2118 | static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, |
2119 | bool can_sleep) |
2120 | { |
2121 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
2122 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2123 | struct mmc_request *mrq = &mqrq->brq.mrq; |
2124 | struct mmc_host *host = mq->card->host; |
2125 | |
2126 | mmc_post_req(host, mrq, err: 0); |
2127 | |
2128 | /* |
2129 | * Block layer timeouts race with completions which means the normal |
2130 | * completion path cannot be used during recovery. |
2131 | */ |
2132 | if (mq->in_recovery) { |
2133 | mmc_blk_mq_complete_rq(mq, req); |
2134 | } else if (likely(!blk_should_fake_timeout(req->q))) { |
2135 | if (can_sleep) |
2136 | blk_mq_complete_request_direct(rq: req, complete: mmc_blk_mq_complete); |
2137 | else |
2138 | blk_mq_complete_request(rq: req); |
2139 | } |
2140 | |
2141 | mmc_blk_mq_dec_in_flight(mq, issue_type); |
2142 | } |
2143 | |
2144 | void mmc_blk_mq_recovery(struct mmc_queue *mq) |
2145 | { |
2146 | struct request *req = mq->recovery_req; |
2147 | struct mmc_host *host = mq->card->host; |
2148 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2149 | |
2150 | mq->recovery_req = NULL; |
2151 | mq->rw_wait = false; |
2152 | |
2153 | if (mmc_blk_rq_error(brq: &mqrq->brq)) { |
2154 | mmc_retune_hold_now(host); |
2155 | mmc_blk_mq_rw_recovery(mq, req); |
2156 | } |
2157 | |
2158 | mmc_blk_urgent_bkops(mq, mqrq); |
2159 | |
2160 | mmc_blk_mq_post_req(mq, req, can_sleep: true); |
2161 | } |
2162 | |
2163 | static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, |
2164 | struct request **prev_req) |
2165 | { |
2166 | if (mmc_host_done_complete(host: mq->card->host)) |
2167 | return; |
2168 | |
2169 | mutex_lock(&mq->complete_lock); |
2170 | |
2171 | if (!mq->complete_req) |
2172 | goto out_unlock; |
2173 | |
2174 | mmc_blk_mq_poll_completion(mq, req: mq->complete_req); |
2175 | |
2176 | if (prev_req) |
2177 | *prev_req = mq->complete_req; |
2178 | else |
2179 | mmc_blk_mq_post_req(mq, req: mq->complete_req, can_sleep: true); |
2180 | |
2181 | mq->complete_req = NULL; |
2182 | |
2183 | out_unlock: |
2184 | mutex_unlock(lock: &mq->complete_lock); |
2185 | } |
2186 | |
2187 | void mmc_blk_mq_complete_work(struct work_struct *work) |
2188 | { |
2189 | struct mmc_queue *mq = container_of(work, struct mmc_queue, |
2190 | complete_work); |
2191 | |
2192 | mmc_blk_mq_complete_prev_req(mq, NULL); |
2193 | } |
2194 | |
2195 | static void mmc_blk_mq_req_done(struct mmc_request *mrq) |
2196 | { |
2197 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, |
2198 | brq.mrq); |
2199 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
2200 | struct request_queue *q = req->q; |
2201 | struct mmc_queue *mq = q->queuedata; |
2202 | struct mmc_host *host = mq->card->host; |
2203 | unsigned long flags; |
2204 | |
2205 | if (!mmc_host_done_complete(host)) { |
2206 | bool waiting; |
2207 | |
2208 | /* |
2209 | * We cannot complete the request in this context, so record |
2210 | * that there is a request to complete, and that a following |
2211 | * request does not need to wait (although it does need to |
2212 | * complete complete_req first). |
2213 | */ |
2214 | spin_lock_irqsave(&mq->lock, flags); |
2215 | mq->complete_req = req; |
2216 | mq->rw_wait = false; |
2217 | waiting = mq->waiting; |
2218 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2219 | |
2220 | /* |
2221 | * If 'waiting' then the waiting task will complete this |
2222 | * request, otherwise queue a work to do it. Note that |
2223 | * complete_work may still race with the dispatch of a following |
2224 | * request. |
2225 | */ |
2226 | if (waiting) |
2227 | wake_up(&mq->wait); |
2228 | else |
2229 | queue_work(wq: mq->card->complete_wq, work: &mq->complete_work); |
2230 | |
2231 | return; |
2232 | } |
2233 | |
2234 | /* Take the recovery path for errors or urgent background operations */ |
2235 | if (mmc_blk_rq_error(brq: &mqrq->brq) || |
2236 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { |
2237 | spin_lock_irqsave(&mq->lock, flags); |
2238 | mq->recovery_needed = true; |
2239 | mq->recovery_req = req; |
2240 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2241 | wake_up(&mq->wait); |
2242 | schedule_work(work: &mq->recovery_work); |
2243 | return; |
2244 | } |
2245 | |
2246 | mmc_blk_rw_reset_success(mq, req); |
2247 | |
2248 | mq->rw_wait = false; |
2249 | wake_up(&mq->wait); |
2250 | |
2251 | /* context unknown */ |
2252 | mmc_blk_mq_post_req(mq, req, can_sleep: false); |
2253 | } |
2254 | |
2255 | static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) |
2256 | { |
2257 | unsigned long flags; |
2258 | bool done; |
2259 | |
2260 | /* |
2261 | * Wait while there is another request in progress, but not if recovery |
2262 | * is needed. Also indicate whether there is a request waiting to start. |
2263 | */ |
2264 | spin_lock_irqsave(&mq->lock, flags); |
2265 | if (mq->recovery_needed) { |
2266 | *err = -EBUSY; |
2267 | done = true; |
2268 | } else { |
2269 | done = !mq->rw_wait; |
2270 | } |
2271 | mq->waiting = !done; |
2272 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2273 | |
2274 | return done; |
2275 | } |
2276 | |
2277 | static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) |
2278 | { |
2279 | int err = 0; |
2280 | |
2281 | wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); |
2282 | |
2283 | /* Always complete the previous request if there is one */ |
2284 | mmc_blk_mq_complete_prev_req(mq, prev_req); |
2285 | |
2286 | return err; |
2287 | } |
2288 | |
2289 | static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, |
2290 | struct request *req) |
2291 | { |
2292 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2293 | struct mmc_host *host = mq->card->host; |
2294 | struct request *prev_req = NULL; |
2295 | int err = 0; |
2296 | |
2297 | mmc_blk_rw_rq_prep(mqrq, card: mq->card, recovery_mode: 0, mq); |
2298 | |
2299 | mqrq->brq.mrq.done = mmc_blk_mq_req_done; |
2300 | |
2301 | mmc_pre_req(host, mrq: &mqrq->brq.mrq); |
2302 | |
2303 | err = mmc_blk_rw_wait(mq, prev_req: &prev_req); |
2304 | if (err) |
2305 | goto out_post_req; |
2306 | |
2307 | mq->rw_wait = true; |
2308 | |
2309 | err = mmc_start_request(host, mrq: &mqrq->brq.mrq); |
2310 | |
2311 | if (prev_req) |
2312 | mmc_blk_mq_post_req(mq, req: prev_req, can_sleep: true); |
2313 | |
2314 | if (err) |
2315 | mq->rw_wait = false; |
2316 | |
2317 | /* Release re-tuning here where there is no synchronization required */ |
2318 | if (err || mmc_host_done_complete(host)) |
2319 | mmc_retune_release(host); |
2320 | |
2321 | out_post_req: |
2322 | if (err) |
2323 | mmc_post_req(host, mrq: &mqrq->brq.mrq, err); |
2324 | |
2325 | return err; |
2326 | } |
2327 | |
2328 | static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) |
2329 | { |
2330 | if (host->cqe_enabled) |
2331 | return host->cqe_ops->cqe_wait_for_idle(host); |
2332 | |
2333 | return mmc_blk_rw_wait(mq, NULL); |
2334 | } |
2335 | |
2336 | enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) |
2337 | { |
2338 | struct mmc_blk_data *md = mq->blkdata; |
2339 | struct mmc_card *card = md->queue.card; |
2340 | struct mmc_host *host = card->host; |
2341 | int ret; |
2342 | |
2343 | ret = mmc_blk_part_switch(card, part_type: md->part_type); |
2344 | if (ret) |
2345 | return MMC_REQ_FAILED_TO_START; |
2346 | |
2347 | switch (mmc_issue_type(mq, req)) { |
2348 | case MMC_ISSUE_SYNC: |
2349 | ret = mmc_blk_wait_for_idle(mq, host); |
2350 | if (ret) |
2351 | return MMC_REQ_BUSY; |
2352 | switch (req_op(req)) { |
2353 | case REQ_OP_DRV_IN: |
2354 | case REQ_OP_DRV_OUT: |
2355 | mmc_blk_issue_drv_op(mq, req); |
2356 | break; |
2357 | case REQ_OP_DISCARD: |
2358 | mmc_blk_issue_discard_rq(mq, req); |
2359 | break; |
2360 | case REQ_OP_SECURE_ERASE: |
2361 | mmc_blk_issue_secdiscard_rq(mq, req); |
2362 | break; |
2363 | case REQ_OP_WRITE_ZEROES: |
2364 | mmc_blk_issue_trim_rq(mq, req); |
2365 | break; |
2366 | case REQ_OP_FLUSH: |
2367 | mmc_blk_issue_flush(mq, req); |
2368 | break; |
2369 | default: |
2370 | WARN_ON_ONCE(1); |
2371 | return MMC_REQ_FAILED_TO_START; |
2372 | } |
2373 | return MMC_REQ_FINISHED; |
2374 | case MMC_ISSUE_DCMD: |
2375 | case MMC_ISSUE_ASYNC: |
2376 | switch (req_op(req)) { |
2377 | case REQ_OP_FLUSH: |
2378 | if (!mmc_cache_enabled(host)) { |
2379 | blk_mq_end_request(rq: req, BLK_STS_OK); |
2380 | return MMC_REQ_FINISHED; |
2381 | } |
2382 | ret = mmc_blk_cqe_issue_flush(mq, req); |
2383 | break; |
2384 | case REQ_OP_READ: |
2385 | case REQ_OP_WRITE: |
2386 | if (host->cqe_enabled) |
2387 | ret = mmc_blk_cqe_issue_rw_rq(mq, req); |
2388 | else |
2389 | ret = mmc_blk_mq_issue_rw_rq(mq, req); |
2390 | break; |
2391 | default: |
2392 | WARN_ON_ONCE(1); |
2393 | ret = -EINVAL; |
2394 | } |
2395 | if (!ret) |
2396 | return MMC_REQ_STARTED; |
2397 | return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; |
2398 | default: |
2399 | WARN_ON_ONCE(1); |
2400 | return MMC_REQ_FAILED_TO_START; |
2401 | } |
2402 | } |
2403 | |
2404 | static inline int mmc_blk_readonly(struct mmc_card *card) |
2405 | { |
2406 | return mmc_card_readonly(card) || |
2407 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); |
2408 | } |
2409 | |
2410 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, |
2411 | struct device *parent, |
2412 | sector_t size, |
2413 | bool default_ro, |
2414 | const char *subname, |
2415 | int area_type, |
2416 | unsigned int part_type) |
2417 | { |
2418 | struct mmc_blk_data *md; |
2419 | int devidx, ret; |
2420 | char cap_str[10]; |
2421 | bool cache_enabled = false; |
2422 | bool fua_enabled = false; |
2423 | |
2424 | devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); |
2425 | if (devidx < 0) { |
2426 | /* |
2427 | * We get -ENOSPC because there are no more any available |
2428 | * devidx. The reason may be that, either userspace haven't yet |
2429 | * unmounted the partitions, which postpones mmc_blk_release() |
2430 | * from being called, or the device has more partitions than |
2431 | * what we support. |
2432 | */ |
2433 | if (devidx == -ENOSPC) |
2434 | dev_err(mmc_dev(card->host), |
2435 | "no more device IDs available\n" ); |
2436 | |
2437 | return ERR_PTR(error: devidx); |
2438 | } |
2439 | |
2440 | md = kzalloc(size: sizeof(struct mmc_blk_data), GFP_KERNEL); |
2441 | if (!md) { |
2442 | ret = -ENOMEM; |
2443 | goto out; |
2444 | } |
2445 | |
2446 | md->area_type = area_type; |
2447 | |
2448 | /* |
2449 | * Set the read-only status based on the supported commands |
2450 | * and the write protect switch. |
2451 | */ |
2452 | md->read_only = mmc_blk_readonly(card); |
2453 | |
2454 | md->disk = mmc_init_queue(mq: &md->queue, card); |
2455 | if (IS_ERR(ptr: md->disk)) { |
2456 | ret = PTR_ERR(ptr: md->disk); |
2457 | goto err_kfree; |
2458 | } |
2459 | |
2460 | INIT_LIST_HEAD(list: &md->part); |
2461 | INIT_LIST_HEAD(list: &md->rpmbs); |
2462 | kref_init(kref: &md->kref); |
2463 | |
2464 | md->queue.blkdata = md; |
2465 | md->part_type = part_type; |
2466 | |
2467 | md->disk->major = MMC_BLOCK_MAJOR; |
2468 | md->disk->minors = perdev_minors; |
2469 | md->disk->first_minor = devidx * perdev_minors; |
2470 | md->disk->fops = &mmc_bdops; |
2471 | md->disk->private_data = md; |
2472 | md->parent = parent; |
2473 | set_disk_ro(disk: md->disk, read_only: md->read_only || default_ro); |
2474 | if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) |
2475 | md->disk->flags |= GENHD_FL_NO_PART; |
2476 | |
2477 | /* |
2478 | * As discussed on lkml, GENHD_FL_REMOVABLE should: |
2479 | * |
2480 | * - be set for removable media with permanent block devices |
2481 | * - be unset for removable block devices with permanent media |
2482 | * |
2483 | * Since MMC block devices clearly fall under the second |
2484 | * case, we do not set GENHD_FL_REMOVABLE. Userspace |
2485 | * should use the block device creation/destruction hotplug |
2486 | * messages to tell when the card is present. |
2487 | */ |
2488 | |
2489 | snprintf(buf: md->disk->disk_name, size: sizeof(md->disk->disk_name), |
2490 | fmt: "mmcblk%u%s" , card->host->index, subname ? subname : "" ); |
2491 | |
2492 | set_capacity(disk: md->disk, size); |
2493 | |
2494 | if (mmc_host_cmd23(host: card->host)) { |
2495 | if ((mmc_card_mmc(card) && |
2496 | card->csd.mmca_vsn >= CSD_SPEC_VER_3) || |
2497 | (mmc_card_sd(card) && |
2498 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) |
2499 | md->flags |= MMC_BLK_CMD23; |
2500 | } |
2501 | |
2502 | if (md->flags & MMC_BLK_CMD23 && |
2503 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || |
2504 | card->ext_csd.rel_sectors)) { |
2505 | md->flags |= MMC_BLK_REL_WR; |
2506 | fua_enabled = true; |
2507 | cache_enabled = true; |
2508 | } |
2509 | if (mmc_cache_enabled(host: card->host)) |
2510 | cache_enabled = true; |
2511 | |
2512 | blk_queue_write_cache(q: md->queue.queue, enabled: cache_enabled, fua: fua_enabled); |
2513 | |
2514 | string_get_size(size: (u64)size, blk_size: 512, units: STRING_UNITS_2, |
2515 | buf: cap_str, len: sizeof(cap_str)); |
2516 | pr_info("%s: %s %s %s%s\n" , |
2517 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
2518 | cap_str, md->read_only ? " (ro)" : "" ); |
2519 | |
2520 | /* used in ->open, must be set before add_disk: */ |
2521 | if (area_type == MMC_BLK_DATA_AREA_MAIN) |
2522 | dev_set_drvdata(dev: &card->dev, data: md); |
2523 | ret = device_add_disk(parent: md->parent, disk: md->disk, groups: mmc_disk_attr_groups); |
2524 | if (ret) |
2525 | goto err_put_disk; |
2526 | return md; |
2527 | |
2528 | err_put_disk: |
2529 | put_disk(disk: md->disk); |
2530 | blk_mq_free_tag_set(set: &md->queue.tag_set); |
2531 | err_kfree: |
2532 | kfree(objp: md); |
2533 | out: |
2534 | ida_simple_remove(&mmc_blk_ida, devidx); |
2535 | return ERR_PTR(error: ret); |
2536 | } |
2537 | |
2538 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) |
2539 | { |
2540 | sector_t size; |
2541 | |
2542 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { |
2543 | /* |
2544 | * The EXT_CSD sector count is in number or 512 byte |
2545 | * sectors. |
2546 | */ |
2547 | size = card->ext_csd.sectors; |
2548 | } else { |
2549 | /* |
2550 | * The CSD capacity field is in units of read_blkbits. |
2551 | * set_capacity takes units of 512 bytes. |
2552 | */ |
2553 | size = (typeof(sector_t))card->csd.capacity |
2554 | << (card->csd.read_blkbits - 9); |
2555 | } |
2556 | |
2557 | return mmc_blk_alloc_req(card, parent: &card->dev, size, default_ro: false, NULL, |
2558 | MMC_BLK_DATA_AREA_MAIN, part_type: 0); |
2559 | } |
2560 | |
2561 | static int mmc_blk_alloc_part(struct mmc_card *card, |
2562 | struct mmc_blk_data *md, |
2563 | unsigned int part_type, |
2564 | sector_t size, |
2565 | bool default_ro, |
2566 | const char *subname, |
2567 | int area_type) |
2568 | { |
2569 | struct mmc_blk_data *part_md; |
2570 | |
2571 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, |
2572 | subname, area_type, part_type); |
2573 | if (IS_ERR(ptr: part_md)) |
2574 | return PTR_ERR(ptr: part_md); |
2575 | list_add(new: &part_md->part, head: &md->part); |
2576 | |
2577 | return 0; |
2578 | } |
2579 | |
2580 | /** |
2581 | * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev |
2582 | * @filp: the character device file |
2583 | * @cmd: the ioctl() command |
2584 | * @arg: the argument from userspace |
2585 | * |
2586 | * This will essentially just redirect the ioctl()s coming in over to |
2587 | * the main block device spawning the RPMB character device. |
2588 | */ |
2589 | static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, |
2590 | unsigned long arg) |
2591 | { |
2592 | struct mmc_rpmb_data *rpmb = filp->private_data; |
2593 | int ret; |
2594 | |
2595 | switch (cmd) { |
2596 | case MMC_IOC_CMD: |
2597 | ret = mmc_blk_ioctl_cmd(md: rpmb->md, |
2598 | ic_ptr: (struct mmc_ioc_cmd __user *)arg, |
2599 | rpmb); |
2600 | break; |
2601 | case MMC_IOC_MULTI_CMD: |
2602 | ret = mmc_blk_ioctl_multi_cmd(md: rpmb->md, |
2603 | user: (struct mmc_ioc_multi_cmd __user *)arg, |
2604 | rpmb); |
2605 | break; |
2606 | default: |
2607 | ret = -EINVAL; |
2608 | break; |
2609 | } |
2610 | |
2611 | return ret; |
2612 | } |
2613 | |
2614 | #ifdef CONFIG_COMPAT |
2615 | static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, |
2616 | unsigned long arg) |
2617 | { |
2618 | return mmc_rpmb_ioctl(filp, cmd, arg: (unsigned long)compat_ptr(uptr: arg)); |
2619 | } |
2620 | #endif |
2621 | |
2622 | static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) |
2623 | { |
2624 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, |
2625 | struct mmc_rpmb_data, chrdev); |
2626 | |
2627 | get_device(dev: &rpmb->dev); |
2628 | filp->private_data = rpmb; |
2629 | mmc_blk_get(disk: rpmb->md->disk); |
2630 | |
2631 | return nonseekable_open(inode, filp); |
2632 | } |
2633 | |
2634 | static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) |
2635 | { |
2636 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, |
2637 | struct mmc_rpmb_data, chrdev); |
2638 | |
2639 | mmc_blk_put(md: rpmb->md); |
2640 | put_device(dev: &rpmb->dev); |
2641 | |
2642 | return 0; |
2643 | } |
2644 | |
2645 | static const struct file_operations mmc_rpmb_fileops = { |
2646 | .release = mmc_rpmb_chrdev_release, |
2647 | .open = mmc_rpmb_chrdev_open, |
2648 | .owner = THIS_MODULE, |
2649 | .llseek = no_llseek, |
2650 | .unlocked_ioctl = mmc_rpmb_ioctl, |
2651 | #ifdef CONFIG_COMPAT |
2652 | .compat_ioctl = mmc_rpmb_ioctl_compat, |
2653 | #endif |
2654 | }; |
2655 | |
2656 | static void mmc_blk_rpmb_device_release(struct device *dev) |
2657 | { |
2658 | struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); |
2659 | |
2660 | ida_simple_remove(&mmc_rpmb_ida, rpmb->id); |
2661 | kfree(objp: rpmb); |
2662 | } |
2663 | |
2664 | static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, |
2665 | struct mmc_blk_data *md, |
2666 | unsigned int part_index, |
2667 | sector_t size, |
2668 | const char *subname) |
2669 | { |
2670 | int devidx, ret; |
2671 | char rpmb_name[DISK_NAME_LEN]; |
2672 | char cap_str[10]; |
2673 | struct mmc_rpmb_data *rpmb; |
2674 | |
2675 | /* This creates the minor number for the RPMB char device */ |
2676 | devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); |
2677 | if (devidx < 0) |
2678 | return devidx; |
2679 | |
2680 | rpmb = kzalloc(size: sizeof(*rpmb), GFP_KERNEL); |
2681 | if (!rpmb) { |
2682 | ida_simple_remove(&mmc_rpmb_ida, devidx); |
2683 | return -ENOMEM; |
2684 | } |
2685 | |
2686 | snprintf(buf: rpmb_name, size: sizeof(rpmb_name), |
2687 | fmt: "mmcblk%u%s" , card->host->index, subname ? subname : "" ); |
2688 | |
2689 | rpmb->id = devidx; |
2690 | rpmb->part_index = part_index; |
2691 | rpmb->dev.init_name = rpmb_name; |
2692 | rpmb->dev.bus = &mmc_rpmb_bus_type; |
2693 | rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); |
2694 | rpmb->dev.parent = &card->dev; |
2695 | rpmb->dev.release = mmc_blk_rpmb_device_release; |
2696 | device_initialize(dev: &rpmb->dev); |
2697 | dev_set_drvdata(dev: &rpmb->dev, data: rpmb); |
2698 | rpmb->md = md; |
2699 | |
2700 | cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); |
2701 | rpmb->chrdev.owner = THIS_MODULE; |
2702 | ret = cdev_device_add(cdev: &rpmb->chrdev, dev: &rpmb->dev); |
2703 | if (ret) { |
2704 | pr_err("%s: could not add character device\n" , rpmb_name); |
2705 | goto out_put_device; |
2706 | } |
2707 | |
2708 | list_add(new: &rpmb->node, head: &md->rpmbs); |
2709 | |
2710 | string_get_size(size: (u64)size, blk_size: 512, units: STRING_UNITS_2, |
2711 | buf: cap_str, len: sizeof(cap_str)); |
2712 | |
2713 | pr_info("%s: %s %s %s, chardev (%d:%d)\n" , |
2714 | rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str, |
2715 | MAJOR(mmc_rpmb_devt), rpmb->id); |
2716 | |
2717 | return 0; |
2718 | |
2719 | out_put_device: |
2720 | put_device(dev: &rpmb->dev); |
2721 | return ret; |
2722 | } |
2723 | |
2724 | static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) |
2725 | |
2726 | { |
2727 | cdev_device_del(cdev: &rpmb->chrdev, dev: &rpmb->dev); |
2728 | put_device(dev: &rpmb->dev); |
2729 | } |
2730 | |
2731 | /* MMC Physical partitions consist of two boot partitions and |
2732 | * up to four general purpose partitions. |
2733 | * For each partition enabled in EXT_CSD a block device will be allocatedi |
2734 | * to provide access to the partition. |
2735 | */ |
2736 | |
2737 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
2738 | { |
2739 | int idx, ret; |
2740 | |
2741 | if (!mmc_card_mmc(card)) |
2742 | return 0; |
2743 | |
2744 | for (idx = 0; idx < card->nr_parts; idx++) { |
2745 | if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { |
2746 | /* |
2747 | * RPMB partitions does not provide block access, they |
2748 | * are only accessed using ioctl():s. Thus create |
2749 | * special RPMB block devices that do not have a |
2750 | * backing block queue for these. |
2751 | */ |
2752 | ret = mmc_blk_alloc_rpmb_part(card, md, |
2753 | part_index: card->part[idx].part_cfg, |
2754 | size: card->part[idx].size >> 9, |
2755 | subname: card->part[idx].name); |
2756 | if (ret) |
2757 | return ret; |
2758 | } else if (card->part[idx].size) { |
2759 | ret = mmc_blk_alloc_part(card, md, |
2760 | part_type: card->part[idx].part_cfg, |
2761 | size: card->part[idx].size >> 9, |
2762 | default_ro: card->part[idx].force_ro, |
2763 | subname: card->part[idx].name, |
2764 | area_type: card->part[idx].area_type); |
2765 | if (ret) |
2766 | return ret; |
2767 | } |
2768 | } |
2769 | |
2770 | return 0; |
2771 | } |
2772 | |
2773 | static void mmc_blk_remove_req(struct mmc_blk_data *md) |
2774 | { |
2775 | /* |
2776 | * Flush remaining requests and free queues. It is freeing the queue |
2777 | * that stops new requests from being accepted. |
2778 | */ |
2779 | del_gendisk(gp: md->disk); |
2780 | mmc_cleanup_queue(&md->queue); |
2781 | mmc_blk_put(md); |
2782 | } |
2783 | |
2784 | static void mmc_blk_remove_parts(struct mmc_card *card, |
2785 | struct mmc_blk_data *md) |
2786 | { |
2787 | struct list_head *pos, *q; |
2788 | struct mmc_blk_data *part_md; |
2789 | struct mmc_rpmb_data *rpmb; |
2790 | |
2791 | /* Remove RPMB partitions */ |
2792 | list_for_each_safe(pos, q, &md->rpmbs) { |
2793 | rpmb = list_entry(pos, struct mmc_rpmb_data, node); |
2794 | list_del(entry: pos); |
2795 | mmc_blk_remove_rpmb_part(rpmb); |
2796 | } |
2797 | /* Remove block partitions */ |
2798 | list_for_each_safe(pos, q, &md->part) { |
2799 | part_md = list_entry(pos, struct mmc_blk_data, part); |
2800 | list_del(entry: pos); |
2801 | mmc_blk_remove_req(md: part_md); |
2802 | } |
2803 | } |
2804 | |
2805 | #ifdef CONFIG_DEBUG_FS |
2806 | |
2807 | static int mmc_dbg_card_status_get(void *data, u64 *val) |
2808 | { |
2809 | struct mmc_card *card = data; |
2810 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
2811 | struct mmc_queue *mq = &md->queue; |
2812 | struct request *req; |
2813 | int ret; |
2814 | |
2815 | /* Ask the block layer about the card status */ |
2816 | req = blk_mq_alloc_request(q: mq->queue, opf: REQ_OP_DRV_IN, flags: 0); |
2817 | if (IS_ERR(ptr: req)) |
2818 | return PTR_ERR(ptr: req); |
2819 | req_to_mmc_queue_req(rq: req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; |
2820 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
2821 | blk_execute_rq(rq: req, at_head: false); |
2822 | ret = req_to_mmc_queue_req(rq: req)->drv_op_result; |
2823 | if (ret >= 0) { |
2824 | *val = ret; |
2825 | ret = 0; |
2826 | } |
2827 | blk_mq_free_request(rq: req); |
2828 | |
2829 | return ret; |
2830 | } |
2831 | DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, |
2832 | NULL, "%08llx\n" ); |
2833 | |
2834 | /* That is two digits * 512 + 1 for newline */ |
2835 | #define EXT_CSD_STR_LEN 1025 |
2836 | |
2837 | static int mmc_ext_csd_open(struct inode *inode, struct file *filp) |
2838 | { |
2839 | struct mmc_card *card = inode->i_private; |
2840 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
2841 | struct mmc_queue *mq = &md->queue; |
2842 | struct request *req; |
2843 | char *buf; |
2844 | ssize_t n = 0; |
2845 | u8 *ext_csd; |
2846 | int err, i; |
2847 | |
2848 | buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); |
2849 | if (!buf) |
2850 | return -ENOMEM; |
2851 | |
2852 | /* Ask the block layer for the EXT CSD */ |
2853 | req = blk_mq_alloc_request(q: mq->queue, opf: REQ_OP_DRV_IN, flags: 0); |
2854 | if (IS_ERR(ptr: req)) { |
2855 | err = PTR_ERR(ptr: req); |
2856 | goto out_free; |
2857 | } |
2858 | req_to_mmc_queue_req(rq: req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; |
2859 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
2860 | req_to_mmc_queue_req(rq: req)->drv_op_data = &ext_csd; |
2861 | blk_execute_rq(rq: req, at_head: false); |
2862 | err = req_to_mmc_queue_req(rq: req)->drv_op_result; |
2863 | blk_mq_free_request(rq: req); |
2864 | if (err) { |
2865 | pr_err("FAILED %d\n" , err); |
2866 | goto out_free; |
2867 | } |
2868 | |
2869 | for (i = 0; i < 512; i++) |
2870 | n += sprintf(buf: buf + n, fmt: "%02x" , ext_csd[i]); |
2871 | n += sprintf(buf: buf + n, fmt: "\n" ); |
2872 | |
2873 | if (n != EXT_CSD_STR_LEN) { |
2874 | err = -EINVAL; |
2875 | kfree(objp: ext_csd); |
2876 | goto out_free; |
2877 | } |
2878 | |
2879 | filp->private_data = buf; |
2880 | kfree(objp: ext_csd); |
2881 | return 0; |
2882 | |
2883 | out_free: |
2884 | kfree(objp: buf); |
2885 | return err; |
2886 | } |
2887 | |
2888 | static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, |
2889 | size_t cnt, loff_t *ppos) |
2890 | { |
2891 | char *buf = filp->private_data; |
2892 | |
2893 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
2894 | from: buf, EXT_CSD_STR_LEN); |
2895 | } |
2896 | |
2897 | static int mmc_ext_csd_release(struct inode *inode, struct file *file) |
2898 | { |
2899 | kfree(objp: file->private_data); |
2900 | return 0; |
2901 | } |
2902 | |
2903 | static const struct file_operations mmc_dbg_ext_csd_fops = { |
2904 | .open = mmc_ext_csd_open, |
2905 | .read = mmc_ext_csd_read, |
2906 | .release = mmc_ext_csd_release, |
2907 | .llseek = default_llseek, |
2908 | }; |
2909 | |
2910 | static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
2911 | { |
2912 | struct dentry *root; |
2913 | |
2914 | if (!card->debugfs_root) |
2915 | return; |
2916 | |
2917 | root = card->debugfs_root; |
2918 | |
2919 | if (mmc_card_mmc(card) || mmc_card_sd(card)) { |
2920 | md->status_dentry = |
2921 | debugfs_create_file_unsafe(name: "status" , mode: 0400, parent: root, |
2922 | data: card, |
2923 | fops: &mmc_dbg_card_status_fops); |
2924 | } |
2925 | |
2926 | if (mmc_card_mmc(card)) { |
2927 | md->ext_csd_dentry = |
2928 | debugfs_create_file(name: "ext_csd" , S_IRUSR, parent: root, data: card, |
2929 | fops: &mmc_dbg_ext_csd_fops); |
2930 | } |
2931 | } |
2932 | |
2933 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2934 | struct mmc_blk_data *md) |
2935 | { |
2936 | if (!card->debugfs_root) |
2937 | return; |
2938 | |
2939 | debugfs_remove(dentry: md->status_dentry); |
2940 | md->status_dentry = NULL; |
2941 | |
2942 | debugfs_remove(dentry: md->ext_csd_dentry); |
2943 | md->ext_csd_dentry = NULL; |
2944 | } |
2945 | |
2946 | #else |
2947 | |
2948 | static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
2949 | { |
2950 | } |
2951 | |
2952 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2953 | struct mmc_blk_data *md) |
2954 | { |
2955 | } |
2956 | |
2957 | #endif /* CONFIG_DEBUG_FS */ |
2958 | |
2959 | static int mmc_blk_probe(struct mmc_card *card) |
2960 | { |
2961 | struct mmc_blk_data *md; |
2962 | int ret = 0; |
2963 | |
2964 | /* |
2965 | * Check that the card supports the command class(es) we need. |
2966 | */ |
2967 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) |
2968 | return -ENODEV; |
2969 | |
2970 | mmc_fixup_device(card, table: mmc_blk_fixups); |
2971 | |
2972 | card->complete_wq = alloc_workqueue(fmt: "mmc_complete" , |
2973 | flags: WQ_MEM_RECLAIM | WQ_HIGHPRI, max_active: 0); |
2974 | if (!card->complete_wq) { |
2975 | pr_err("Failed to create mmc completion workqueue" ); |
2976 | return -ENOMEM; |
2977 | } |
2978 | |
2979 | md = mmc_blk_alloc(card); |
2980 | if (IS_ERR(ptr: md)) { |
2981 | ret = PTR_ERR(ptr: md); |
2982 | goto out_free; |
2983 | } |
2984 | |
2985 | ret = mmc_blk_alloc_parts(card, md); |
2986 | if (ret) |
2987 | goto out; |
2988 | |
2989 | /* Add two debugfs entries */ |
2990 | mmc_blk_add_debugfs(card, md); |
2991 | |
2992 | pm_runtime_set_autosuspend_delay(dev: &card->dev, delay: 3000); |
2993 | pm_runtime_use_autosuspend(dev: &card->dev); |
2994 | |
2995 | /* |
2996 | * Don't enable runtime PM for SD-combo cards here. Leave that |
2997 | * decision to be taken during the SDIO init sequence instead. |
2998 | */ |
2999 | if (!mmc_card_sd_combo(card)) { |
3000 | pm_runtime_set_active(dev: &card->dev); |
3001 | pm_runtime_enable(dev: &card->dev); |
3002 | } |
3003 | |
3004 | return 0; |
3005 | |
3006 | out: |
3007 | mmc_blk_remove_parts(card, md); |
3008 | mmc_blk_remove_req(md); |
3009 | out_free: |
3010 | destroy_workqueue(wq: card->complete_wq); |
3011 | return ret; |
3012 | } |
3013 | |
3014 | static void mmc_blk_remove(struct mmc_card *card) |
3015 | { |
3016 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
3017 | |
3018 | mmc_blk_remove_debugfs(card, md); |
3019 | mmc_blk_remove_parts(card, md); |
3020 | pm_runtime_get_sync(dev: &card->dev); |
3021 | if (md->part_curr != md->part_type) { |
3022 | mmc_claim_host(host: card->host); |
3023 | mmc_blk_part_switch(card, part_type: md->part_type); |
3024 | mmc_release_host(host: card->host); |
3025 | } |
3026 | if (!mmc_card_sd_combo(card)) |
3027 | pm_runtime_disable(dev: &card->dev); |
3028 | pm_runtime_put_noidle(dev: &card->dev); |
3029 | mmc_blk_remove_req(md); |
3030 | destroy_workqueue(wq: card->complete_wq); |
3031 | } |
3032 | |
3033 | static int _mmc_blk_suspend(struct mmc_card *card) |
3034 | { |
3035 | struct mmc_blk_data *part_md; |
3036 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
3037 | |
3038 | if (md) { |
3039 | mmc_queue_suspend(&md->queue); |
3040 | list_for_each_entry(part_md, &md->part, part) { |
3041 | mmc_queue_suspend(&part_md->queue); |
3042 | } |
3043 | } |
3044 | return 0; |
3045 | } |
3046 | |
3047 | static void mmc_blk_shutdown(struct mmc_card *card) |
3048 | { |
3049 | _mmc_blk_suspend(card); |
3050 | } |
3051 | |
3052 | #ifdef CONFIG_PM_SLEEP |
3053 | static int mmc_blk_suspend(struct device *dev) |
3054 | { |
3055 | struct mmc_card *card = mmc_dev_to_card(dev); |
3056 | |
3057 | return _mmc_blk_suspend(card); |
3058 | } |
3059 | |
3060 | static int mmc_blk_resume(struct device *dev) |
3061 | { |
3062 | struct mmc_blk_data *part_md; |
3063 | struct mmc_blk_data *md = dev_get_drvdata(dev); |
3064 | |
3065 | if (md) { |
3066 | /* |
3067 | * Resume involves the card going into idle state, |
3068 | * so current partition is always the main one. |
3069 | */ |
3070 | md->part_curr = md->part_type; |
3071 | mmc_queue_resume(&md->queue); |
3072 | list_for_each_entry(part_md, &md->part, part) { |
3073 | mmc_queue_resume(&part_md->queue); |
3074 | } |
3075 | } |
3076 | return 0; |
3077 | } |
3078 | #endif |
3079 | |
3080 | static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); |
3081 | |
3082 | static struct mmc_driver mmc_driver = { |
3083 | .drv = { |
3084 | .name = "mmcblk" , |
3085 | .pm = &mmc_blk_pm_ops, |
3086 | }, |
3087 | .probe = mmc_blk_probe, |
3088 | .remove = mmc_blk_remove, |
3089 | .shutdown = mmc_blk_shutdown, |
3090 | }; |
3091 | |
3092 | static int __init mmc_blk_init(void) |
3093 | { |
3094 | int res; |
3095 | |
3096 | res = bus_register(bus: &mmc_rpmb_bus_type); |
3097 | if (res < 0) { |
3098 | pr_err("mmcblk: could not register RPMB bus type\n" ); |
3099 | return res; |
3100 | } |
3101 | res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb" ); |
3102 | if (res < 0) { |
3103 | pr_err("mmcblk: failed to allocate rpmb chrdev region\n" ); |
3104 | goto out_bus_unreg; |
3105 | } |
3106 | |
3107 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) |
3108 | pr_info("mmcblk: using %d minors per device\n" , perdev_minors); |
3109 | |
3110 | max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); |
3111 | |
3112 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc" ); |
3113 | if (res) |
3114 | goto out_chrdev_unreg; |
3115 | |
3116 | res = mmc_register_driver(drv: &mmc_driver); |
3117 | if (res) |
3118 | goto out_blkdev_unreg; |
3119 | |
3120 | return 0; |
3121 | |
3122 | out_blkdev_unreg: |
3123 | unregister_blkdev(MMC_BLOCK_MAJOR, name: "mmc" ); |
3124 | out_chrdev_unreg: |
3125 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); |
3126 | out_bus_unreg: |
3127 | bus_unregister(bus: &mmc_rpmb_bus_type); |
3128 | return res; |
3129 | } |
3130 | |
3131 | static void __exit mmc_blk_exit(void) |
3132 | { |
3133 | mmc_unregister_driver(drv: &mmc_driver); |
3134 | unregister_blkdev(MMC_BLOCK_MAJOR, name: "mmc" ); |
3135 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); |
3136 | bus_unregister(bus: &mmc_rpmb_bus_type); |
3137 | } |
3138 | |
3139 | module_init(mmc_blk_init); |
3140 | module_exit(mmc_blk_exit); |
3141 | |
3142 | MODULE_LICENSE("GPL" ); |
3143 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver" ); |
3144 | |
3145 | |