1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Block driver for media (i.e., flash cards) |
4 | * |
5 | * Copyright 2002 Hewlett-Packard Company |
6 | * Copyright 2005-2008 Pierre Ossman |
7 | * |
8 | * Use consistent with the GNU GPL is permitted, |
9 | * provided that this copyright notice is |
10 | * preserved in its entirety in all copies and derived works. |
11 | * |
12 | * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, |
13 | * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS |
14 | * FITNESS FOR ANY PARTICULAR PURPOSE. |
15 | * |
16 | * Many thanks to Alessandro Rubini and Jonathan Corbet! |
17 | * |
18 | * Author: Andrew Christian |
19 | * 28 May 2002 |
20 | */ |
21 | #include <linux/moduleparam.h> |
22 | #include <linux/module.h> |
23 | #include <linux/init.h> |
24 | |
25 | #include <linux/kernel.h> |
26 | #include <linux/fs.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/errno.h> |
29 | #include <linux/hdreg.h> |
30 | #include <linux/kdev_t.h> |
31 | #include <linux/kref.h> |
32 | #include <linux/blkdev.h> |
33 | #include <linux/cdev.h> |
34 | #include <linux/mutex.h> |
35 | #include <linux/scatterlist.h> |
36 | #include <linux/string_helpers.h> |
37 | #include <linux/delay.h> |
38 | #include <linux/capability.h> |
39 | #include <linux/compat.h> |
40 | #include <linux/pm_runtime.h> |
41 | #include <linux/idr.h> |
42 | #include <linux/debugfs.h> |
43 | |
44 | #include <linux/mmc/ioctl.h> |
45 | #include <linux/mmc/card.h> |
46 | #include <linux/mmc/host.h> |
47 | #include <linux/mmc/mmc.h> |
48 | #include <linux/mmc/sd.h> |
49 | |
50 | #include <linux/uaccess.h> |
51 | |
52 | #include "queue.h" |
53 | #include "block.h" |
54 | #include "core.h" |
55 | #include "card.h" |
56 | #include "crypto.h" |
57 | #include "host.h" |
58 | #include "bus.h" |
59 | #include "mmc_ops.h" |
60 | #include "quirks.h" |
61 | #include "sd_ops.h" |
62 | |
63 | MODULE_ALIAS("mmc:block" ); |
64 | #ifdef MODULE_PARAM_PREFIX |
65 | #undef MODULE_PARAM_PREFIX |
66 | #endif |
67 | #define MODULE_PARAM_PREFIX "mmcblk." |
68 | |
69 | /* |
70 | * Set a 10 second timeout for polling write request busy state. Note, mmc core |
71 | * is setting a 3 second timeout for SD cards, and SDHCI has long had a 10 |
72 | * second software timer to timeout the whole request, so 10 seconds should be |
73 | * ample. |
74 | */ |
75 | #define MMC_BLK_TIMEOUT_MS (10 * 1000) |
76 | #define (x) ((x & 0x00FF0000) >> 16) |
77 | #define (x) ((x & 0x0000FF00) >> 8) |
78 | |
79 | static DEFINE_MUTEX(block_mutex); |
80 | |
81 | /* |
82 | * The defaults come from config options but can be overriden by module |
83 | * or bootarg options. |
84 | */ |
85 | static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; |
86 | |
87 | /* |
88 | * We've only got one major, so number of mmcblk devices is |
89 | * limited to (1 << 20) / number of minors per device. It is also |
90 | * limited by the MAX_DEVICES below. |
91 | */ |
92 | static int max_devices; |
93 | |
94 | #define MAX_DEVICES 256 |
95 | |
96 | static DEFINE_IDA(mmc_blk_ida); |
97 | static DEFINE_IDA(mmc_rpmb_ida); |
98 | |
99 | struct mmc_blk_busy_data { |
100 | struct mmc_card *card; |
101 | u32 status; |
102 | }; |
103 | |
104 | /* |
105 | * There is one mmc_blk_data per slot. |
106 | */ |
107 | struct mmc_blk_data { |
108 | struct device *parent; |
109 | struct gendisk *disk; |
110 | struct mmc_queue queue; |
111 | struct list_head part; |
112 | struct list_head rpmbs; |
113 | |
114 | unsigned int flags; |
115 | #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ |
116 | #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ |
117 | |
118 | struct kref kref; |
119 | unsigned int read_only; |
120 | unsigned int part_type; |
121 | unsigned int reset_done; |
122 | #define MMC_BLK_READ BIT(0) |
123 | #define MMC_BLK_WRITE BIT(1) |
124 | #define MMC_BLK_DISCARD BIT(2) |
125 | #define MMC_BLK_SECDISCARD BIT(3) |
126 | #define MMC_BLK_CQE_RECOVERY BIT(4) |
127 | #define MMC_BLK_TRIM BIT(5) |
128 | |
129 | /* |
130 | * Only set in main mmc_blk_data associated |
131 | * with mmc_card with dev_set_drvdata, and keeps |
132 | * track of the current selected device partition. |
133 | */ |
134 | unsigned int part_curr; |
135 | #define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */ |
136 | int area_type; |
137 | |
138 | /* debugfs files (only in main mmc_blk_data) */ |
139 | struct dentry *status_dentry; |
140 | struct dentry *ext_csd_dentry; |
141 | }; |
142 | |
143 | /* Device type for RPMB character devices */ |
144 | static dev_t mmc_rpmb_devt; |
145 | |
146 | /* Bus type for RPMB character devices */ |
147 | static const struct bus_type mmc_rpmb_bus_type = { |
148 | .name = "mmc_rpmb" , |
149 | }; |
150 | |
151 | /** |
152 | * struct mmc_rpmb_data - special RPMB device type for these areas |
153 | * @dev: the device for the RPMB area |
154 | * @chrdev: character device for the RPMB area |
155 | * @id: unique device ID number |
156 | * @part_index: partition index (0 on first) |
157 | * @md: parent MMC block device |
158 | * @node: list item, so we can put this device on a list |
159 | */ |
160 | struct mmc_rpmb_data { |
161 | struct device dev; |
162 | struct cdev chrdev; |
163 | int id; |
164 | unsigned int part_index; |
165 | struct mmc_blk_data *md; |
166 | struct list_head node; |
167 | }; |
168 | |
169 | static DEFINE_MUTEX(open_lock); |
170 | |
171 | module_param(perdev_minors, int, 0444); |
172 | MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device" ); |
173 | |
174 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
175 | unsigned int part_type); |
176 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
177 | struct mmc_card *card, |
178 | int recovery_mode, |
179 | struct mmc_queue *mq); |
180 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq); |
181 | static int mmc_spi_err_check(struct mmc_card *card); |
182 | static int mmc_blk_busy_cb(void *cb_data, bool *busy); |
183 | |
184 | static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) |
185 | { |
186 | struct mmc_blk_data *md; |
187 | |
188 | mutex_lock(&open_lock); |
189 | md = disk->private_data; |
190 | if (md && !kref_get_unless_zero(kref: &md->kref)) |
191 | md = NULL; |
192 | mutex_unlock(lock: &open_lock); |
193 | |
194 | return md; |
195 | } |
196 | |
197 | static inline int mmc_get_devidx(struct gendisk *disk) |
198 | { |
199 | int devidx = disk->first_minor / perdev_minors; |
200 | return devidx; |
201 | } |
202 | |
203 | static void mmc_blk_kref_release(struct kref *ref) |
204 | { |
205 | struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); |
206 | int devidx; |
207 | |
208 | devidx = mmc_get_devidx(disk: md->disk); |
209 | ida_free(&mmc_blk_ida, id: devidx); |
210 | |
211 | mutex_lock(&open_lock); |
212 | md->disk->private_data = NULL; |
213 | mutex_unlock(lock: &open_lock); |
214 | |
215 | put_disk(disk: md->disk); |
216 | kfree(objp: md); |
217 | } |
218 | |
219 | static void mmc_blk_put(struct mmc_blk_data *md) |
220 | { |
221 | kref_put(kref: &md->kref, release: mmc_blk_kref_release); |
222 | } |
223 | |
224 | static ssize_t power_ro_lock_show(struct device *dev, |
225 | struct device_attribute *attr, char *buf) |
226 | { |
227 | int ret; |
228 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
229 | struct mmc_card *card = md->queue.card; |
230 | int locked = 0; |
231 | |
232 | if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) |
233 | locked = 2; |
234 | else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) |
235 | locked = 1; |
236 | |
237 | ret = snprintf(buf, PAGE_SIZE, fmt: "%d\n" , locked); |
238 | |
239 | mmc_blk_put(md); |
240 | |
241 | return ret; |
242 | } |
243 | |
244 | static ssize_t power_ro_lock_store(struct device *dev, |
245 | struct device_attribute *attr, const char *buf, size_t count) |
246 | { |
247 | int ret; |
248 | struct mmc_blk_data *md, *part_md; |
249 | struct mmc_queue *mq; |
250 | struct request *req; |
251 | unsigned long set; |
252 | |
253 | if (kstrtoul(s: buf, base: 0, res: &set)) |
254 | return -EINVAL; |
255 | |
256 | if (set != 1) |
257 | return count; |
258 | |
259 | md = mmc_blk_get(dev_to_disk(dev)); |
260 | mq = &md->queue; |
261 | |
262 | /* Dispatch locking to the block layer */ |
263 | req = blk_mq_alloc_request(q: mq->queue, opf: REQ_OP_DRV_OUT, flags: 0); |
264 | if (IS_ERR(ptr: req)) { |
265 | count = PTR_ERR(ptr: req); |
266 | goto out_put; |
267 | } |
268 | req_to_mmc_queue_req(rq: req)->drv_op = MMC_DRV_OP_BOOT_WP; |
269 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
270 | blk_execute_rq(rq: req, at_head: false); |
271 | ret = req_to_mmc_queue_req(rq: req)->drv_op_result; |
272 | blk_mq_free_request(rq: req); |
273 | |
274 | if (!ret) { |
275 | pr_info("%s: Locking boot partition ro until next power on\n" , |
276 | md->disk->disk_name); |
277 | set_disk_ro(disk: md->disk, read_only: 1); |
278 | |
279 | list_for_each_entry(part_md, &md->part, part) |
280 | if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { |
281 | pr_info("%s: Locking boot partition ro until next power on\n" , part_md->disk->disk_name); |
282 | set_disk_ro(disk: part_md->disk, read_only: 1); |
283 | } |
284 | } |
285 | out_put: |
286 | mmc_blk_put(md); |
287 | return count; |
288 | } |
289 | |
290 | static DEVICE_ATTR(ro_lock_until_next_power_on, 0, |
291 | power_ro_lock_show, power_ro_lock_store); |
292 | |
293 | static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, |
294 | char *buf) |
295 | { |
296 | int ret; |
297 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
298 | |
299 | ret = snprintf(buf, PAGE_SIZE, fmt: "%d\n" , |
300 | get_disk_ro(dev_to_disk(dev)) ^ |
301 | md->read_only); |
302 | mmc_blk_put(md); |
303 | return ret; |
304 | } |
305 | |
306 | static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, |
307 | const char *buf, size_t count) |
308 | { |
309 | int ret; |
310 | char *end; |
311 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
312 | unsigned long set = simple_strtoul(buf, &end, 0); |
313 | if (end == buf) { |
314 | ret = -EINVAL; |
315 | goto out; |
316 | } |
317 | |
318 | set_disk_ro(dev_to_disk(dev), read_only: set || md->read_only); |
319 | ret = count; |
320 | out: |
321 | mmc_blk_put(md); |
322 | return ret; |
323 | } |
324 | |
325 | static DEVICE_ATTR(force_ro, 0644, force_ro_show, force_ro_store); |
326 | |
327 | static struct attribute *mmc_disk_attrs[] = { |
328 | &dev_attr_force_ro.attr, |
329 | &dev_attr_ro_lock_until_next_power_on.attr, |
330 | NULL, |
331 | }; |
332 | |
333 | static umode_t mmc_disk_attrs_is_visible(struct kobject *kobj, |
334 | struct attribute *a, int n) |
335 | { |
336 | struct device *dev = kobj_to_dev(kobj); |
337 | struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); |
338 | umode_t mode = a->mode; |
339 | |
340 | if (a == &dev_attr_ro_lock_until_next_power_on.attr && |
341 | (md->area_type & MMC_BLK_DATA_AREA_BOOT) && |
342 | md->queue.card->ext_csd.boot_ro_lockable) { |
343 | mode = S_IRUGO; |
344 | if (!(md->queue.card->ext_csd.boot_ro_lock & |
345 | EXT_CSD_BOOT_WP_B_PWR_WP_DIS)) |
346 | mode |= S_IWUSR; |
347 | } |
348 | |
349 | mmc_blk_put(md); |
350 | return mode; |
351 | } |
352 | |
353 | static const struct attribute_group mmc_disk_attr_group = { |
354 | .is_visible = mmc_disk_attrs_is_visible, |
355 | .attrs = mmc_disk_attrs, |
356 | }; |
357 | |
358 | static const struct attribute_group *mmc_disk_attr_groups[] = { |
359 | &mmc_disk_attr_group, |
360 | NULL, |
361 | }; |
362 | |
363 | static int mmc_blk_open(struct gendisk *disk, blk_mode_t mode) |
364 | { |
365 | struct mmc_blk_data *md = mmc_blk_get(disk); |
366 | int ret = -ENXIO; |
367 | |
368 | mutex_lock(&block_mutex); |
369 | if (md) { |
370 | ret = 0; |
371 | if ((mode & BLK_OPEN_WRITE) && md->read_only) { |
372 | mmc_blk_put(md); |
373 | ret = -EROFS; |
374 | } |
375 | } |
376 | mutex_unlock(lock: &block_mutex); |
377 | |
378 | return ret; |
379 | } |
380 | |
381 | static void mmc_blk_release(struct gendisk *disk) |
382 | { |
383 | struct mmc_blk_data *md = disk->private_data; |
384 | |
385 | mutex_lock(&block_mutex); |
386 | mmc_blk_put(md); |
387 | mutex_unlock(lock: &block_mutex); |
388 | } |
389 | |
390 | static int |
391 | mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
392 | { |
393 | geo->cylinders = get_capacity(disk: bdev->bd_disk) / (4 * 16); |
394 | geo->heads = 4; |
395 | geo->sectors = 16; |
396 | return 0; |
397 | } |
398 | |
399 | struct mmc_blk_ioc_data { |
400 | struct mmc_ioc_cmd ic; |
401 | unsigned char *buf; |
402 | u64 buf_bytes; |
403 | unsigned int flags; |
404 | #define MMC_BLK_IOC_DROP BIT(0) /* drop this mrq */ |
405 | #define MMC_BLK_IOC_SBC BIT(1) /* use mrq.sbc */ |
406 | |
407 | struct mmc_rpmb_data *rpmb; |
408 | }; |
409 | |
410 | static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( |
411 | struct mmc_ioc_cmd __user *user) |
412 | { |
413 | struct mmc_blk_ioc_data *idata; |
414 | int err; |
415 | |
416 | idata = kzalloc(size: sizeof(*idata), GFP_KERNEL); |
417 | if (!idata) { |
418 | err = -ENOMEM; |
419 | goto out; |
420 | } |
421 | |
422 | if (copy_from_user(to: &idata->ic, from: user, n: sizeof(idata->ic))) { |
423 | err = -EFAULT; |
424 | goto idata_err; |
425 | } |
426 | |
427 | idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; |
428 | if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { |
429 | err = -EOVERFLOW; |
430 | goto idata_err; |
431 | } |
432 | |
433 | if (!idata->buf_bytes) { |
434 | idata->buf = NULL; |
435 | return idata; |
436 | } |
437 | |
438 | idata->buf = memdup_user((void __user *)(unsigned long) |
439 | idata->ic.data_ptr, idata->buf_bytes); |
440 | if (IS_ERR(ptr: idata->buf)) { |
441 | err = PTR_ERR(ptr: idata->buf); |
442 | goto idata_err; |
443 | } |
444 | |
445 | return idata; |
446 | |
447 | idata_err: |
448 | kfree(objp: idata); |
449 | out: |
450 | return ERR_PTR(error: err); |
451 | } |
452 | |
453 | static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, |
454 | struct mmc_blk_ioc_data *idata) |
455 | { |
456 | struct mmc_ioc_cmd *ic = &idata->ic; |
457 | |
458 | if (copy_to_user(to: &(ic_ptr->response), from: ic->response, |
459 | n: sizeof(ic->response))) |
460 | return -EFAULT; |
461 | |
462 | if (!idata->ic.write_flag) { |
463 | if (copy_to_user(to: (void __user *)(unsigned long)ic->data_ptr, |
464 | from: idata->buf, n: idata->buf_bytes)) |
465 | return -EFAULT; |
466 | } |
467 | |
468 | return 0; |
469 | } |
470 | |
471 | static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, |
472 | struct mmc_blk_ioc_data **idatas, int i) |
473 | { |
474 | struct mmc_command cmd = {}, sbc = {}; |
475 | struct mmc_data data = {}; |
476 | struct mmc_request mrq = {}; |
477 | struct scatterlist sg; |
478 | bool r1b_resp; |
479 | unsigned int busy_timeout_ms; |
480 | int err; |
481 | unsigned int target_part; |
482 | struct mmc_blk_ioc_data *idata = idatas[i]; |
483 | struct mmc_blk_ioc_data *prev_idata = NULL; |
484 | |
485 | if (!card || !md || !idata) |
486 | return -EINVAL; |
487 | |
488 | if (idata->flags & MMC_BLK_IOC_DROP) |
489 | return 0; |
490 | |
491 | if (idata->flags & MMC_BLK_IOC_SBC && i > 0) |
492 | prev_idata = idatas[i - 1]; |
493 | |
494 | /* |
495 | * The RPMB accesses comes in from the character device, so we |
496 | * need to target these explicitly. Else we just target the |
497 | * partition type for the block device the ioctl() was issued |
498 | * on. |
499 | */ |
500 | if (idata->rpmb) { |
501 | /* Support multiple RPMB partitions */ |
502 | target_part = idata->rpmb->part_index; |
503 | target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; |
504 | } else { |
505 | target_part = md->part_type; |
506 | } |
507 | |
508 | cmd.opcode = idata->ic.opcode; |
509 | cmd.arg = idata->ic.arg; |
510 | cmd.flags = idata->ic.flags; |
511 | |
512 | if (idata->buf_bytes) { |
513 | data.sg = &sg; |
514 | data.sg_len = 1; |
515 | data.blksz = idata->ic.blksz; |
516 | data.blocks = idata->ic.blocks; |
517 | |
518 | sg_init_one(data.sg, idata->buf, idata->buf_bytes); |
519 | |
520 | if (idata->ic.write_flag) |
521 | data.flags = MMC_DATA_WRITE; |
522 | else |
523 | data.flags = MMC_DATA_READ; |
524 | |
525 | /* data.flags must already be set before doing this. */ |
526 | mmc_set_data_timeout(data: &data, card); |
527 | |
528 | /* Allow overriding the timeout_ns for empirical tuning. */ |
529 | if (idata->ic.data_timeout_ns) |
530 | data.timeout_ns = idata->ic.data_timeout_ns; |
531 | |
532 | mrq.data = &data; |
533 | } |
534 | |
535 | mrq.cmd = &cmd; |
536 | |
537 | err = mmc_blk_part_switch(card, part_type: target_part); |
538 | if (err) |
539 | return err; |
540 | |
541 | if (idata->ic.is_acmd) { |
542 | err = mmc_app_cmd(host: card->host, card); |
543 | if (err) |
544 | return err; |
545 | } |
546 | |
547 | if (idata->rpmb || prev_idata) { |
548 | sbc.opcode = MMC_SET_BLOCK_COUNT; |
549 | /* |
550 | * We don't do any blockcount validation because the max size |
551 | * may be increased by a future standard. We just copy the |
552 | * 'Reliable Write' bit here. |
553 | */ |
554 | sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); |
555 | if (prev_idata) |
556 | sbc.arg = prev_idata->ic.arg; |
557 | sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
558 | mrq.sbc = &sbc; |
559 | } |
560 | |
561 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && |
562 | (cmd.opcode == MMC_SWITCH)) |
563 | return mmc_sanitize(card, timeout_ms: idata->ic.cmd_timeout_ms); |
564 | |
565 | /* If it's an R1B response we need some more preparations. */ |
566 | busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS; |
567 | r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B; |
568 | if (r1b_resp) |
569 | mmc_prepare_busy_cmd(host: card->host, cmd: &cmd, timeout_ms: busy_timeout_ms); |
570 | |
571 | mmc_wait_for_req(host: card->host, mrq: &mrq); |
572 | memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp)); |
573 | |
574 | if (prev_idata) { |
575 | memcpy(&prev_idata->ic.response, sbc.resp, sizeof(sbc.resp)); |
576 | if (sbc.error) { |
577 | dev_err(mmc_dev(card->host), "%s: sbc error %d\n" , |
578 | __func__, sbc.error); |
579 | return sbc.error; |
580 | } |
581 | } |
582 | |
583 | if (cmd.error) { |
584 | dev_err(mmc_dev(card->host), "%s: cmd error %d\n" , |
585 | __func__, cmd.error); |
586 | return cmd.error; |
587 | } |
588 | if (data.error) { |
589 | dev_err(mmc_dev(card->host), "%s: data error %d\n" , |
590 | __func__, data.error); |
591 | return data.error; |
592 | } |
593 | |
594 | /* |
595 | * Make sure the cache of the PARTITION_CONFIG register and |
596 | * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write |
597 | * changed it successfully. |
598 | */ |
599 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) && |
600 | (cmd.opcode == MMC_SWITCH)) { |
601 | struct mmc_blk_data *main_md = dev_get_drvdata(dev: &card->dev); |
602 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg); |
603 | |
604 | /* |
605 | * Update cache so the next mmc_blk_part_switch call operates |
606 | * on up-to-date data. |
607 | */ |
608 | card->ext_csd.part_config = value; |
609 | main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK; |
610 | } |
611 | |
612 | /* |
613 | * Make sure to update CACHE_CTRL in case it was changed. The cache |
614 | * will get turned back on if the card is re-initialized, e.g. |
615 | * suspend/resume or hw reset in recovery. |
616 | */ |
617 | if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) && |
618 | (cmd.opcode == MMC_SWITCH)) { |
619 | u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1; |
620 | |
621 | card->ext_csd.cache_ctrl = value; |
622 | } |
623 | |
624 | /* |
625 | * According to the SD specs, some commands require a delay after |
626 | * issuing the command. |
627 | */ |
628 | if (idata->ic.postsleep_min_us) |
629 | usleep_range(min: idata->ic.postsleep_min_us, max: idata->ic.postsleep_max_us); |
630 | |
631 | if (mmc_host_is_spi(card->host)) { |
632 | if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY) |
633 | return mmc_spi_err_check(card); |
634 | return err; |
635 | } |
636 | |
637 | /* |
638 | * Ensure RPMB, writes and R1B responses are completed by polling with |
639 | * CMD13. Note that, usually we don't need to poll when using HW busy |
640 | * detection, but here it's needed since some commands may indicate the |
641 | * error through the R1 status bits. |
642 | */ |
643 | if (idata->rpmb || idata->ic.write_flag || r1b_resp) { |
644 | struct mmc_blk_busy_data cb_data = { |
645 | .card = card, |
646 | }; |
647 | |
648 | err = __mmc_poll_for_busy(host: card->host, period_us: 0, timeout_ms: busy_timeout_ms, |
649 | busy_cb: &mmc_blk_busy_cb, cb_data: &cb_data); |
650 | |
651 | idata->ic.response[0] = cb_data.status; |
652 | } |
653 | |
654 | return err; |
655 | } |
656 | |
657 | static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, |
658 | struct mmc_ioc_cmd __user *ic_ptr, |
659 | struct mmc_rpmb_data *rpmb) |
660 | { |
661 | struct mmc_blk_ioc_data *idata; |
662 | struct mmc_blk_ioc_data *idatas[1]; |
663 | struct mmc_queue *mq; |
664 | struct mmc_card *card; |
665 | int err = 0, ioc_err = 0; |
666 | struct request *req; |
667 | |
668 | idata = mmc_blk_ioctl_copy_from_user(user: ic_ptr); |
669 | if (IS_ERR(ptr: idata)) |
670 | return PTR_ERR(ptr: idata); |
671 | /* This will be NULL on non-RPMB ioctl():s */ |
672 | idata->rpmb = rpmb; |
673 | |
674 | card = md->queue.card; |
675 | if (IS_ERR(ptr: card)) { |
676 | err = PTR_ERR(ptr: card); |
677 | goto cmd_done; |
678 | } |
679 | |
680 | /* |
681 | * Dispatch the ioctl() into the block request queue. |
682 | */ |
683 | mq = &md->queue; |
684 | req = blk_mq_alloc_request(q: mq->queue, |
685 | opf: idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, flags: 0); |
686 | if (IS_ERR(ptr: req)) { |
687 | err = PTR_ERR(ptr: req); |
688 | goto cmd_done; |
689 | } |
690 | idatas[0] = idata; |
691 | req_to_mmc_queue_req(rq: req)->drv_op = |
692 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; |
693 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
694 | req_to_mmc_queue_req(rq: req)->drv_op_data = idatas; |
695 | req_to_mmc_queue_req(rq: req)->ioc_count = 1; |
696 | blk_execute_rq(rq: req, at_head: false); |
697 | ioc_err = req_to_mmc_queue_req(rq: req)->drv_op_result; |
698 | err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); |
699 | blk_mq_free_request(rq: req); |
700 | |
701 | cmd_done: |
702 | kfree(objp: idata->buf); |
703 | kfree(objp: idata); |
704 | return ioc_err ? ioc_err : err; |
705 | } |
706 | |
707 | static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, |
708 | struct mmc_ioc_multi_cmd __user *user, |
709 | struct mmc_rpmb_data *rpmb) |
710 | { |
711 | struct mmc_blk_ioc_data **idata = NULL; |
712 | struct mmc_ioc_cmd __user *cmds = user->cmds; |
713 | struct mmc_card *card; |
714 | struct mmc_queue *mq; |
715 | int err = 0, ioc_err = 0; |
716 | __u64 num_of_cmds; |
717 | unsigned int i, n; |
718 | struct request *req; |
719 | |
720 | if (copy_from_user(to: &num_of_cmds, from: &user->num_of_cmds, |
721 | n: sizeof(num_of_cmds))) |
722 | return -EFAULT; |
723 | |
724 | if (!num_of_cmds) |
725 | return 0; |
726 | |
727 | if (num_of_cmds > MMC_IOC_MAX_CMDS) |
728 | return -EINVAL; |
729 | |
730 | n = num_of_cmds; |
731 | idata = kcalloc(n, size: sizeof(*idata), GFP_KERNEL); |
732 | if (!idata) |
733 | return -ENOMEM; |
734 | |
735 | for (i = 0; i < n; i++) { |
736 | idata[i] = mmc_blk_ioctl_copy_from_user(user: &cmds[i]); |
737 | if (IS_ERR(ptr: idata[i])) { |
738 | err = PTR_ERR(ptr: idata[i]); |
739 | n = i; |
740 | goto cmd_err; |
741 | } |
742 | /* This will be NULL on non-RPMB ioctl():s */ |
743 | idata[i]->rpmb = rpmb; |
744 | } |
745 | |
746 | card = md->queue.card; |
747 | if (IS_ERR(ptr: card)) { |
748 | err = PTR_ERR(ptr: card); |
749 | goto cmd_err; |
750 | } |
751 | |
752 | |
753 | /* |
754 | * Dispatch the ioctl()s into the block request queue. |
755 | */ |
756 | mq = &md->queue; |
757 | req = blk_mq_alloc_request(q: mq->queue, |
758 | opf: idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, flags: 0); |
759 | if (IS_ERR(ptr: req)) { |
760 | err = PTR_ERR(ptr: req); |
761 | goto cmd_err; |
762 | } |
763 | req_to_mmc_queue_req(rq: req)->drv_op = |
764 | rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; |
765 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
766 | req_to_mmc_queue_req(rq: req)->drv_op_data = idata; |
767 | req_to_mmc_queue_req(rq: req)->ioc_count = n; |
768 | blk_execute_rq(rq: req, at_head: false); |
769 | ioc_err = req_to_mmc_queue_req(rq: req)->drv_op_result; |
770 | |
771 | /* copy to user if data and response */ |
772 | for (i = 0; i < n && !err; i++) |
773 | err = mmc_blk_ioctl_copy_to_user(ic_ptr: &cmds[i], idata: idata[i]); |
774 | |
775 | blk_mq_free_request(rq: req); |
776 | |
777 | cmd_err: |
778 | for (i = 0; i < n; i++) { |
779 | kfree(objp: idata[i]->buf); |
780 | kfree(objp: idata[i]); |
781 | } |
782 | kfree(objp: idata); |
783 | return ioc_err ? ioc_err : err; |
784 | } |
785 | |
786 | static int mmc_blk_check_blkdev(struct block_device *bdev) |
787 | { |
788 | /* |
789 | * The caller must have CAP_SYS_RAWIO, and must be calling this on the |
790 | * whole block device, not on a partition. This prevents overspray |
791 | * between sibling partitions. |
792 | */ |
793 | if (!capable(CAP_SYS_RAWIO) || bdev_is_partition(bdev)) |
794 | return -EPERM; |
795 | return 0; |
796 | } |
797 | |
798 | static int mmc_blk_ioctl(struct block_device *bdev, blk_mode_t mode, |
799 | unsigned int cmd, unsigned long arg) |
800 | { |
801 | struct mmc_blk_data *md; |
802 | int ret; |
803 | |
804 | switch (cmd) { |
805 | case MMC_IOC_CMD: |
806 | ret = mmc_blk_check_blkdev(bdev); |
807 | if (ret) |
808 | return ret; |
809 | md = mmc_blk_get(disk: bdev->bd_disk); |
810 | if (!md) |
811 | return -EINVAL; |
812 | ret = mmc_blk_ioctl_cmd(md, |
813 | ic_ptr: (struct mmc_ioc_cmd __user *)arg, |
814 | NULL); |
815 | mmc_blk_put(md); |
816 | return ret; |
817 | case MMC_IOC_MULTI_CMD: |
818 | ret = mmc_blk_check_blkdev(bdev); |
819 | if (ret) |
820 | return ret; |
821 | md = mmc_blk_get(disk: bdev->bd_disk); |
822 | if (!md) |
823 | return -EINVAL; |
824 | ret = mmc_blk_ioctl_multi_cmd(md, |
825 | user: (struct mmc_ioc_multi_cmd __user *)arg, |
826 | NULL); |
827 | mmc_blk_put(md); |
828 | return ret; |
829 | default: |
830 | return -EINVAL; |
831 | } |
832 | } |
833 | |
834 | #ifdef CONFIG_COMPAT |
835 | static int mmc_blk_compat_ioctl(struct block_device *bdev, blk_mode_t mode, |
836 | unsigned int cmd, unsigned long arg) |
837 | { |
838 | return mmc_blk_ioctl(bdev, mode, cmd, arg: (unsigned long) compat_ptr(uptr: arg)); |
839 | } |
840 | #endif |
841 | |
842 | static int mmc_blk_alternative_gpt_sector(struct gendisk *disk, |
843 | sector_t *sector) |
844 | { |
845 | struct mmc_blk_data *md; |
846 | int ret; |
847 | |
848 | md = mmc_blk_get(disk); |
849 | if (!md) |
850 | return -EINVAL; |
851 | |
852 | if (md->queue.card) |
853 | ret = mmc_card_alternative_gpt_sector(card: md->queue.card, sector); |
854 | else |
855 | ret = -ENODEV; |
856 | |
857 | mmc_blk_put(md); |
858 | |
859 | return ret; |
860 | } |
861 | |
862 | static const struct block_device_operations mmc_bdops = { |
863 | .open = mmc_blk_open, |
864 | .release = mmc_blk_release, |
865 | .getgeo = mmc_blk_getgeo, |
866 | .owner = THIS_MODULE, |
867 | .ioctl = mmc_blk_ioctl, |
868 | #ifdef CONFIG_COMPAT |
869 | .compat_ioctl = mmc_blk_compat_ioctl, |
870 | #endif |
871 | .alternative_gpt_sector = mmc_blk_alternative_gpt_sector, |
872 | }; |
873 | |
874 | static int mmc_blk_part_switch_pre(struct mmc_card *card, |
875 | unsigned int part_type) |
876 | { |
877 | const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; |
878 | const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; |
879 | int ret = 0; |
880 | |
881 | if ((part_type & mask) == rpmb) { |
882 | if (card->ext_csd.cmdq_en) { |
883 | ret = mmc_cmdq_disable(card); |
884 | if (ret) |
885 | return ret; |
886 | } |
887 | mmc_retune_pause(host: card->host); |
888 | } |
889 | |
890 | return ret; |
891 | } |
892 | |
893 | static int mmc_blk_part_switch_post(struct mmc_card *card, |
894 | unsigned int part_type) |
895 | { |
896 | const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK; |
897 | const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB; |
898 | int ret = 0; |
899 | |
900 | if ((part_type & mask) == rpmb) { |
901 | mmc_retune_unpause(host: card->host); |
902 | if (card->reenable_cmdq && !card->ext_csd.cmdq_en) |
903 | ret = mmc_cmdq_enable(card); |
904 | } |
905 | |
906 | return ret; |
907 | } |
908 | |
909 | static inline int mmc_blk_part_switch(struct mmc_card *card, |
910 | unsigned int part_type) |
911 | { |
912 | int ret = 0; |
913 | struct mmc_blk_data *main_md = dev_get_drvdata(dev: &card->dev); |
914 | |
915 | if (main_md->part_curr == part_type) |
916 | return 0; |
917 | |
918 | if (mmc_card_mmc(card)) { |
919 | u8 part_config = card->ext_csd.part_config; |
920 | |
921 | ret = mmc_blk_part_switch_pre(card, part_type); |
922 | if (ret) |
923 | return ret; |
924 | |
925 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; |
926 | part_config |= part_type; |
927 | |
928 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
929 | EXT_CSD_PART_CONFIG, value: part_config, |
930 | timeout_ms: card->ext_csd.part_time); |
931 | if (ret) { |
932 | mmc_blk_part_switch_post(card, part_type); |
933 | return ret; |
934 | } |
935 | |
936 | card->ext_csd.part_config = part_config; |
937 | |
938 | ret = mmc_blk_part_switch_post(card, part_type: main_md->part_curr); |
939 | } |
940 | |
941 | main_md->part_curr = part_type; |
942 | return ret; |
943 | } |
944 | |
945 | static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) |
946 | { |
947 | int err; |
948 | u32 result; |
949 | __be32 *blocks; |
950 | |
951 | struct mmc_request mrq = {}; |
952 | struct mmc_command cmd = {}; |
953 | struct mmc_data data = {}; |
954 | |
955 | struct scatterlist sg; |
956 | |
957 | err = mmc_app_cmd(host: card->host, card); |
958 | if (err) |
959 | return err; |
960 | |
961 | cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; |
962 | cmd.arg = 0; |
963 | cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
964 | |
965 | data.blksz = 4; |
966 | data.blocks = 1; |
967 | data.flags = MMC_DATA_READ; |
968 | data.sg = &sg; |
969 | data.sg_len = 1; |
970 | mmc_set_data_timeout(data: &data, card); |
971 | |
972 | mrq.cmd = &cmd; |
973 | mrq.data = &data; |
974 | |
975 | blocks = kmalloc(size: 4, GFP_KERNEL); |
976 | if (!blocks) |
977 | return -ENOMEM; |
978 | |
979 | sg_init_one(&sg, blocks, 4); |
980 | |
981 | mmc_wait_for_req(host: card->host, mrq: &mrq); |
982 | |
983 | result = ntohl(*blocks); |
984 | kfree(objp: blocks); |
985 | |
986 | if (cmd.error || data.error) |
987 | return -EIO; |
988 | |
989 | *written_blocks = result; |
990 | |
991 | return 0; |
992 | } |
993 | |
994 | static unsigned int mmc_blk_clock_khz(struct mmc_host *host) |
995 | { |
996 | if (host->actual_clock) |
997 | return host->actual_clock / 1000; |
998 | |
999 | /* Clock may be subject to a divisor, fudge it by a factor of 2. */ |
1000 | if (host->ios.clock) |
1001 | return host->ios.clock / 2000; |
1002 | |
1003 | /* How can there be no clock */ |
1004 | WARN_ON_ONCE(1); |
1005 | return 100; /* 100 kHz is minimum possible value */ |
1006 | } |
1007 | |
1008 | static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, |
1009 | struct mmc_data *data) |
1010 | { |
1011 | unsigned int ms = DIV_ROUND_UP(data->timeout_ns, 1000000); |
1012 | unsigned int khz; |
1013 | |
1014 | if (data->timeout_clks) { |
1015 | khz = mmc_blk_clock_khz(host); |
1016 | ms += DIV_ROUND_UP(data->timeout_clks, khz); |
1017 | } |
1018 | |
1019 | return ms; |
1020 | } |
1021 | |
1022 | /* |
1023 | * Attempts to reset the card and get back to the requested partition. |
1024 | * Therefore any error here must result in cancelling the block layer |
1025 | * request, it must not be reattempted without going through the mmc_blk |
1026 | * partition sanity checks. |
1027 | */ |
1028 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, |
1029 | int type) |
1030 | { |
1031 | int err; |
1032 | struct mmc_blk_data *main_md = dev_get_drvdata(dev: &host->card->dev); |
1033 | |
1034 | if (md->reset_done & type) |
1035 | return -EEXIST; |
1036 | |
1037 | md->reset_done |= type; |
1038 | err = mmc_hw_reset(card: host->card); |
1039 | /* |
1040 | * A successful reset will leave the card in the main partition, but |
1041 | * upon failure it might not be, so set it to MMC_BLK_PART_INVALID |
1042 | * in that case. |
1043 | */ |
1044 | main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type; |
1045 | if (err) |
1046 | return err; |
1047 | /* Ensure we switch back to the correct partition */ |
1048 | if (mmc_blk_part_switch(card: host->card, part_type: md->part_type)) |
1049 | /* |
1050 | * We have failed to get back into the correct |
1051 | * partition, so we need to abort the whole request. |
1052 | */ |
1053 | return -ENODEV; |
1054 | return 0; |
1055 | } |
1056 | |
1057 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) |
1058 | { |
1059 | md->reset_done &= ~type; |
1060 | } |
1061 | |
1062 | static void mmc_blk_check_sbc(struct mmc_queue_req *mq_rq) |
1063 | { |
1064 | struct mmc_blk_ioc_data **idata = mq_rq->drv_op_data; |
1065 | int i; |
1066 | |
1067 | for (i = 1; i < mq_rq->ioc_count; i++) { |
1068 | if (idata[i - 1]->ic.opcode == MMC_SET_BLOCK_COUNT && |
1069 | mmc_op_multi(opcode: idata[i]->ic.opcode)) { |
1070 | idata[i - 1]->flags |= MMC_BLK_IOC_DROP; |
1071 | idata[i]->flags |= MMC_BLK_IOC_SBC; |
1072 | } |
1073 | } |
1074 | } |
1075 | |
1076 | /* |
1077 | * The non-block commands come back from the block layer after it queued it and |
1078 | * processed it with all other requests and then they get issued in this |
1079 | * function. |
1080 | */ |
1081 | static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) |
1082 | { |
1083 | struct mmc_queue_req *mq_rq; |
1084 | struct mmc_card *card = mq->card; |
1085 | struct mmc_blk_data *md = mq->blkdata; |
1086 | struct mmc_blk_ioc_data **idata; |
1087 | bool rpmb_ioctl; |
1088 | u8 **ext_csd; |
1089 | u32 status; |
1090 | int ret; |
1091 | int i; |
1092 | |
1093 | mq_rq = req_to_mmc_queue_req(rq: req); |
1094 | rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); |
1095 | |
1096 | switch (mq_rq->drv_op) { |
1097 | case MMC_DRV_OP_IOCTL: |
1098 | if (card->ext_csd.cmdq_en) { |
1099 | ret = mmc_cmdq_disable(card); |
1100 | if (ret) |
1101 | break; |
1102 | } |
1103 | |
1104 | mmc_blk_check_sbc(mq_rq); |
1105 | |
1106 | fallthrough; |
1107 | case MMC_DRV_OP_IOCTL_RPMB: |
1108 | idata = mq_rq->drv_op_data; |
1109 | for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { |
1110 | ret = __mmc_blk_ioctl_cmd(card, md, idatas: idata, i); |
1111 | if (ret) |
1112 | break; |
1113 | } |
1114 | /* Always switch back to main area after RPMB access */ |
1115 | if (rpmb_ioctl) |
1116 | mmc_blk_part_switch(card, part_type: 0); |
1117 | else if (card->reenable_cmdq && !card->ext_csd.cmdq_en) |
1118 | mmc_cmdq_enable(card); |
1119 | break; |
1120 | case MMC_DRV_OP_BOOT_WP: |
1121 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, |
1122 | value: card->ext_csd.boot_ro_lock | |
1123 | EXT_CSD_BOOT_WP_B_PWR_WP_EN, |
1124 | timeout_ms: card->ext_csd.part_time); |
1125 | if (ret) |
1126 | pr_err("%s: Locking boot partition ro until next power on failed: %d\n" , |
1127 | md->disk->disk_name, ret); |
1128 | else |
1129 | card->ext_csd.boot_ro_lock |= |
1130 | EXT_CSD_BOOT_WP_B_PWR_WP_EN; |
1131 | break; |
1132 | case MMC_DRV_OP_GET_CARD_STATUS: |
1133 | ret = mmc_send_status(card, status: &status); |
1134 | if (!ret) |
1135 | ret = status; |
1136 | break; |
1137 | case MMC_DRV_OP_GET_EXT_CSD: |
1138 | ext_csd = mq_rq->drv_op_data; |
1139 | ret = mmc_get_ext_csd(card, new_ext_csd: ext_csd); |
1140 | break; |
1141 | default: |
1142 | pr_err("%s: unknown driver specific operation\n" , |
1143 | md->disk->disk_name); |
1144 | ret = -EINVAL; |
1145 | break; |
1146 | } |
1147 | mq_rq->drv_op_result = ret; |
1148 | blk_mq_end_request(rq: req, error: ret ? BLK_STS_IOERR : BLK_STS_OK); |
1149 | } |
1150 | |
1151 | static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req, |
1152 | int type, unsigned int erase_arg) |
1153 | { |
1154 | struct mmc_blk_data *md = mq->blkdata; |
1155 | struct mmc_card *card = md->queue.card; |
1156 | unsigned int from, nr; |
1157 | int err = 0; |
1158 | blk_status_t status = BLK_STS_OK; |
1159 | |
1160 | if (!mmc_can_erase(card)) { |
1161 | status = BLK_STS_NOTSUPP; |
1162 | goto fail; |
1163 | } |
1164 | |
1165 | from = blk_rq_pos(rq: req); |
1166 | nr = blk_rq_sectors(rq: req); |
1167 | |
1168 | do { |
1169 | err = 0; |
1170 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1171 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1172 | INAND_CMD38_ARG_EXT_CSD, |
1173 | value: erase_arg == MMC_TRIM_ARG ? |
1174 | INAND_CMD38_ARG_TRIM : |
1175 | INAND_CMD38_ARG_ERASE, |
1176 | timeout_ms: card->ext_csd.generic_cmd6_time); |
1177 | } |
1178 | if (!err) |
1179 | err = mmc_erase(card, from, nr, arg: erase_arg); |
1180 | } while (err == -EIO && !mmc_blk_reset(md, host: card->host, type)); |
1181 | if (err) |
1182 | status = BLK_STS_IOERR; |
1183 | else |
1184 | mmc_blk_reset_success(md, type); |
1185 | fail: |
1186 | blk_mq_end_request(rq: req, error: status); |
1187 | } |
1188 | |
1189 | static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req) |
1190 | { |
1191 | mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG); |
1192 | } |
1193 | |
1194 | static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
1195 | { |
1196 | struct mmc_blk_data *md = mq->blkdata; |
1197 | struct mmc_card *card = md->queue.card; |
1198 | unsigned int arg = card->erase_arg; |
1199 | |
1200 | if (mmc_card_broken_sd_discard(c: card)) |
1201 | arg = SD_ERASE_ARG; |
1202 | |
1203 | mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, erase_arg: arg); |
1204 | } |
1205 | |
1206 | static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, |
1207 | struct request *req) |
1208 | { |
1209 | struct mmc_blk_data *md = mq->blkdata; |
1210 | struct mmc_card *card = md->queue.card; |
1211 | unsigned int from, nr, arg; |
1212 | int err = 0, type = MMC_BLK_SECDISCARD; |
1213 | blk_status_t status = BLK_STS_OK; |
1214 | |
1215 | if (!(mmc_can_secure_erase_trim(card))) { |
1216 | status = BLK_STS_NOTSUPP; |
1217 | goto out; |
1218 | } |
1219 | |
1220 | from = blk_rq_pos(rq: req); |
1221 | nr = blk_rq_sectors(rq: req); |
1222 | |
1223 | if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) |
1224 | arg = MMC_SECURE_TRIM1_ARG; |
1225 | else |
1226 | arg = MMC_SECURE_ERASE_ARG; |
1227 | |
1228 | retry: |
1229 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1230 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1231 | INAND_CMD38_ARG_EXT_CSD, |
1232 | value: arg == MMC_SECURE_TRIM1_ARG ? |
1233 | INAND_CMD38_ARG_SECTRIM1 : |
1234 | INAND_CMD38_ARG_SECERASE, |
1235 | timeout_ms: card->ext_csd.generic_cmd6_time); |
1236 | if (err) |
1237 | goto out_retry; |
1238 | } |
1239 | |
1240 | err = mmc_erase(card, from, nr, arg); |
1241 | if (err == -EIO) |
1242 | goto out_retry; |
1243 | if (err) { |
1244 | status = BLK_STS_IOERR; |
1245 | goto out; |
1246 | } |
1247 | |
1248 | if (arg == MMC_SECURE_TRIM1_ARG) { |
1249 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
1250 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
1251 | INAND_CMD38_ARG_EXT_CSD, |
1252 | INAND_CMD38_ARG_SECTRIM2, |
1253 | timeout_ms: card->ext_csd.generic_cmd6_time); |
1254 | if (err) |
1255 | goto out_retry; |
1256 | } |
1257 | |
1258 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
1259 | if (err == -EIO) |
1260 | goto out_retry; |
1261 | if (err) { |
1262 | status = BLK_STS_IOERR; |
1263 | goto out; |
1264 | } |
1265 | } |
1266 | |
1267 | out_retry: |
1268 | if (err && !mmc_blk_reset(md, host: card->host, type)) |
1269 | goto retry; |
1270 | if (!err) |
1271 | mmc_blk_reset_success(md, type); |
1272 | out: |
1273 | blk_mq_end_request(rq: req, error: status); |
1274 | } |
1275 | |
1276 | static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
1277 | { |
1278 | struct mmc_blk_data *md = mq->blkdata; |
1279 | struct mmc_card *card = md->queue.card; |
1280 | int ret = 0; |
1281 | |
1282 | ret = mmc_flush_cache(host: card->host); |
1283 | blk_mq_end_request(rq: req, error: ret ? BLK_STS_IOERR : BLK_STS_OK); |
1284 | } |
1285 | |
1286 | /* |
1287 | * Reformat current write as a reliable write, supporting |
1288 | * both legacy and the enhanced reliable write MMC cards. |
1289 | * In each transfer we'll handle only as much as a single |
1290 | * reliable write can handle, thus finish the request in |
1291 | * partial completions. |
1292 | */ |
1293 | static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, |
1294 | struct mmc_card *card, |
1295 | struct request *req) |
1296 | { |
1297 | if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { |
1298 | /* Legacy mode imposes restrictions on transfers. */ |
1299 | if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) |
1300 | brq->data.blocks = 1; |
1301 | |
1302 | if (brq->data.blocks > card->ext_csd.rel_sectors) |
1303 | brq->data.blocks = card->ext_csd.rel_sectors; |
1304 | else if (brq->data.blocks < card->ext_csd.rel_sectors) |
1305 | brq->data.blocks = 1; |
1306 | } |
1307 | } |
1308 | |
1309 | #define CMD_ERRORS_EXCL_OOR \ |
1310 | (R1_ADDRESS_ERROR | /* Misaligned address */ \ |
1311 | R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ |
1312 | R1_WP_VIOLATION | /* Tried to write to protected block */ \ |
1313 | R1_CARD_ECC_FAILED | /* Card ECC failed */ \ |
1314 | R1_CC_ERROR | /* Card controller error */ \ |
1315 | R1_ERROR) /* General/unknown error */ |
1316 | |
1317 | #define CMD_ERRORS \ |
1318 | (CMD_ERRORS_EXCL_OOR | \ |
1319 | R1_OUT_OF_RANGE) /* Command argument out of range */ \ |
1320 | |
1321 | static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) |
1322 | { |
1323 | u32 val; |
1324 | |
1325 | /* |
1326 | * Per the SD specification(physical layer version 4.10)[1], |
1327 | * section 4.3.3, it explicitly states that "When the last |
1328 | * block of user area is read using CMD18, the host should |
1329 | * ignore OUT_OF_RANGE error that may occur even the sequence |
1330 | * is correct". And JESD84-B51 for eMMC also has a similar |
1331 | * statement on section 6.8.3. |
1332 | * |
1333 | * Multiple block read/write could be done by either predefined |
1334 | * method, namely CMD23, or open-ending mode. For open-ending mode, |
1335 | * we should ignore the OUT_OF_RANGE error as it's normal behaviour. |
1336 | * |
1337 | * However the spec[1] doesn't tell us whether we should also |
1338 | * ignore that for predefined method. But per the spec[1], section |
1339 | * 4.15 Set Block Count Command, it says"If illegal block count |
1340 | * is set, out of range error will be indicated during read/write |
1341 | * operation (For example, data transfer is stopped at user area |
1342 | * boundary)." In another word, we could expect a out of range error |
1343 | * in the response for the following CMD18/25. And if argument of |
1344 | * CMD23 + the argument of CMD18/25 exceed the max number of blocks, |
1345 | * we could also expect to get a -ETIMEDOUT or any error number from |
1346 | * the host drivers due to missing data response(for write)/data(for |
1347 | * read), as the cards will stop the data transfer by itself per the |
1348 | * spec. So we only need to check R1_OUT_OF_RANGE for open-ending mode. |
1349 | */ |
1350 | |
1351 | if (!brq->stop.error) { |
1352 | bool oor_with_open_end; |
1353 | /* If there is no error yet, check R1 response */ |
1354 | |
1355 | val = brq->stop.resp[0] & CMD_ERRORS; |
1356 | oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; |
1357 | |
1358 | if (val && !oor_with_open_end) |
1359 | brq->stop.error = -EIO; |
1360 | } |
1361 | } |
1362 | |
1363 | static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, |
1364 | int recovery_mode, bool *do_rel_wr_p, |
1365 | bool *do_data_tag_p) |
1366 | { |
1367 | struct mmc_blk_data *md = mq->blkdata; |
1368 | struct mmc_card *card = md->queue.card; |
1369 | struct mmc_blk_request *brq = &mqrq->brq; |
1370 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
1371 | bool do_rel_wr, do_data_tag; |
1372 | |
1373 | /* |
1374 | * Reliable writes are used to implement Forced Unit Access and |
1375 | * are supported only on MMCs. |
1376 | */ |
1377 | do_rel_wr = (req->cmd_flags & REQ_FUA) && |
1378 | rq_data_dir(req) == WRITE && |
1379 | (md->flags & MMC_BLK_REL_WR); |
1380 | |
1381 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
1382 | |
1383 | mmc_crypto_prepare_req(mqrq); |
1384 | |
1385 | brq->mrq.data = &brq->data; |
1386 | brq->mrq.tag = req->tag; |
1387 | |
1388 | brq->stop.opcode = MMC_STOP_TRANSMISSION; |
1389 | brq->stop.arg = 0; |
1390 | |
1391 | if (rq_data_dir(req) == READ) { |
1392 | brq->data.flags = MMC_DATA_READ; |
1393 | brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; |
1394 | } else { |
1395 | brq->data.flags = MMC_DATA_WRITE; |
1396 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; |
1397 | } |
1398 | |
1399 | brq->data.blksz = 512; |
1400 | brq->data.blocks = blk_rq_sectors(rq: req); |
1401 | brq->data.blk_addr = blk_rq_pos(rq: req); |
1402 | |
1403 | /* |
1404 | * The command queue supports 2 priorities: "high" (1) and "simple" (0). |
1405 | * The eMMC will give "high" priority tasks priority over "simple" |
1406 | * priority tasks. Here we always set "simple" priority by not setting |
1407 | * MMC_DATA_PRIO. |
1408 | */ |
1409 | |
1410 | /* |
1411 | * The block layer doesn't support all sector count |
1412 | * restrictions, so we need to be prepared for too big |
1413 | * requests. |
1414 | */ |
1415 | if (brq->data.blocks > card->host->max_blk_count) |
1416 | brq->data.blocks = card->host->max_blk_count; |
1417 | |
1418 | if (brq->data.blocks > 1) { |
1419 | /* |
1420 | * Some SD cards in SPI mode return a CRC error or even lock up |
1421 | * completely when trying to read the last block using a |
1422 | * multiblock read command. |
1423 | */ |
1424 | if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) && |
1425 | (blk_rq_pos(rq: req) + blk_rq_sectors(rq: req) == |
1426 | get_capacity(disk: md->disk))) |
1427 | brq->data.blocks--; |
1428 | |
1429 | /* |
1430 | * After a read error, we redo the request one (native) sector |
1431 | * at a time in order to accurately determine which |
1432 | * sectors can be read successfully. |
1433 | */ |
1434 | if (recovery_mode) |
1435 | brq->data.blocks = queue_physical_block_size(q: mq->queue) >> 9; |
1436 | |
1437 | /* |
1438 | * Some controllers have HW issues while operating |
1439 | * in multiple I/O mode |
1440 | */ |
1441 | if (card->host->ops->multi_io_quirk) |
1442 | brq->data.blocks = card->host->ops->multi_io_quirk(card, |
1443 | (rq_data_dir(req) == READ) ? |
1444 | MMC_DATA_READ : MMC_DATA_WRITE, |
1445 | brq->data.blocks); |
1446 | } |
1447 | |
1448 | if (do_rel_wr) { |
1449 | mmc_apply_rel_rw(brq, card, req); |
1450 | brq->data.flags |= MMC_DATA_REL_WR; |
1451 | } |
1452 | |
1453 | /* |
1454 | * Data tag is used only during writing meta data to speed |
1455 | * up write and any subsequent read of this meta data |
1456 | */ |
1457 | do_data_tag = card->ext_csd.data_tag_unit_size && |
1458 | (req->cmd_flags & REQ_META) && |
1459 | (rq_data_dir(req) == WRITE) && |
1460 | ((brq->data.blocks * brq->data.blksz) >= |
1461 | card->ext_csd.data_tag_unit_size); |
1462 | |
1463 | if (do_data_tag) |
1464 | brq->data.flags |= MMC_DATA_DAT_TAG; |
1465 | |
1466 | mmc_set_data_timeout(data: &brq->data, card); |
1467 | |
1468 | brq->data.sg = mqrq->sg; |
1469 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); |
1470 | |
1471 | /* |
1472 | * Adjust the sg list so it is the same size as the |
1473 | * request. |
1474 | */ |
1475 | if (brq->data.blocks != blk_rq_sectors(rq: req)) { |
1476 | int i, data_size = brq->data.blocks << 9; |
1477 | struct scatterlist *sg; |
1478 | |
1479 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { |
1480 | data_size -= sg->length; |
1481 | if (data_size <= 0) { |
1482 | sg->length += data_size; |
1483 | i++; |
1484 | break; |
1485 | } |
1486 | } |
1487 | brq->data.sg_len = i; |
1488 | } |
1489 | |
1490 | if (do_rel_wr_p) |
1491 | *do_rel_wr_p = do_rel_wr; |
1492 | |
1493 | if (do_data_tag_p) |
1494 | *do_data_tag_p = do_data_tag; |
1495 | } |
1496 | |
1497 | #define MMC_CQE_RETRIES 2 |
1498 | |
1499 | static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req) |
1500 | { |
1501 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1502 | struct mmc_request *mrq = &mqrq->brq.mrq; |
1503 | struct request_queue *q = req->q; |
1504 | struct mmc_host *host = mq->card->host; |
1505 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
1506 | unsigned long flags; |
1507 | bool put_card; |
1508 | int err; |
1509 | |
1510 | mmc_cqe_post_req(host, mrq); |
1511 | |
1512 | if (mrq->cmd && mrq->cmd->error) |
1513 | err = mrq->cmd->error; |
1514 | else if (mrq->data && mrq->data->error) |
1515 | err = mrq->data->error; |
1516 | else |
1517 | err = 0; |
1518 | |
1519 | if (err) { |
1520 | if (mqrq->retries++ < MMC_CQE_RETRIES) |
1521 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
1522 | else |
1523 | blk_mq_end_request(rq: req, BLK_STS_IOERR); |
1524 | } else if (mrq->data) { |
1525 | if (blk_update_request(rq: req, BLK_STS_OK, nr_bytes: mrq->data->bytes_xfered)) |
1526 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
1527 | else |
1528 | __blk_mq_end_request(rq: req, BLK_STS_OK); |
1529 | } else if (mq->in_recovery) { |
1530 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
1531 | } else { |
1532 | blk_mq_end_request(rq: req, BLK_STS_OK); |
1533 | } |
1534 | |
1535 | spin_lock_irqsave(&mq->lock, flags); |
1536 | |
1537 | mq->in_flight[issue_type] -= 1; |
1538 | |
1539 | put_card = (mmc_tot_in_flight(mq) == 0); |
1540 | |
1541 | mmc_cqe_check_busy(mq); |
1542 | |
1543 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
1544 | |
1545 | if (!mq->cqe_busy) |
1546 | blk_mq_run_hw_queues(q, async: true); |
1547 | |
1548 | if (put_card) |
1549 | mmc_put_card(card: mq->card, ctx: &mq->ctx); |
1550 | } |
1551 | |
1552 | void mmc_blk_cqe_recovery(struct mmc_queue *mq) |
1553 | { |
1554 | struct mmc_card *card = mq->card; |
1555 | struct mmc_host *host = card->host; |
1556 | int err; |
1557 | |
1558 | pr_debug("%s: CQE recovery start\n" , mmc_hostname(host)); |
1559 | |
1560 | err = mmc_cqe_recovery(host); |
1561 | if (err) |
1562 | mmc_blk_reset(md: mq->blkdata, host, MMC_BLK_CQE_RECOVERY); |
1563 | mmc_blk_reset_success(md: mq->blkdata, MMC_BLK_CQE_RECOVERY); |
1564 | |
1565 | pr_debug("%s: CQE recovery done\n" , mmc_hostname(host)); |
1566 | } |
1567 | |
1568 | static void mmc_blk_cqe_req_done(struct mmc_request *mrq) |
1569 | { |
1570 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, |
1571 | brq.mrq); |
1572 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
1573 | struct request_queue *q = req->q; |
1574 | struct mmc_queue *mq = q->queuedata; |
1575 | |
1576 | /* |
1577 | * Block layer timeouts race with completions which means the normal |
1578 | * completion path cannot be used during recovery. |
1579 | */ |
1580 | if (mq->in_recovery) |
1581 | mmc_blk_cqe_complete_rq(mq, req); |
1582 | else if (likely(!blk_should_fake_timeout(req->q))) |
1583 | blk_mq_complete_request(rq: req); |
1584 | } |
1585 | |
1586 | static int mmc_blk_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) |
1587 | { |
1588 | mrq->done = mmc_blk_cqe_req_done; |
1589 | mrq->recovery_notifier = mmc_cqe_recovery_notifier; |
1590 | |
1591 | return mmc_cqe_start_req(host, mrq); |
1592 | } |
1593 | |
1594 | static struct mmc_request *mmc_blk_cqe_prep_dcmd(struct mmc_queue_req *mqrq, |
1595 | struct request *req) |
1596 | { |
1597 | struct mmc_blk_request *brq = &mqrq->brq; |
1598 | |
1599 | memset(brq, 0, sizeof(*brq)); |
1600 | |
1601 | brq->mrq.cmd = &brq->cmd; |
1602 | brq->mrq.tag = req->tag; |
1603 | |
1604 | return &brq->mrq; |
1605 | } |
1606 | |
1607 | static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) |
1608 | { |
1609 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1610 | struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); |
1611 | |
1612 | mrq->cmd->opcode = MMC_SWITCH; |
1613 | mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | |
1614 | (EXT_CSD_FLUSH_CACHE << 16) | |
1615 | (1 << 8) | |
1616 | EXT_CSD_CMD_SET_NORMAL; |
1617 | mrq->cmd->flags = MMC_CMD_AC | MMC_RSP_R1B; |
1618 | |
1619 | return mmc_blk_cqe_start_req(host: mq->card->host, mrq); |
1620 | } |
1621 | |
1622 | static int mmc_blk_hsq_issue_rw_rq(struct mmc_queue *mq, struct request *req) |
1623 | { |
1624 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1625 | struct mmc_host *host = mq->card->host; |
1626 | int err; |
1627 | |
1628 | mmc_blk_rw_rq_prep(mqrq, card: mq->card, recovery_mode: 0, mq); |
1629 | mqrq->brq.mrq.done = mmc_blk_hsq_req_done; |
1630 | mmc_pre_req(host, mrq: &mqrq->brq.mrq); |
1631 | |
1632 | err = mmc_cqe_start_req(host, mrq: &mqrq->brq.mrq); |
1633 | if (err) |
1634 | mmc_post_req(host, mrq: &mqrq->brq.mrq, err); |
1635 | |
1636 | return err; |
1637 | } |
1638 | |
1639 | static int mmc_blk_cqe_issue_rw_rq(struct mmc_queue *mq, struct request *req) |
1640 | { |
1641 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1642 | struct mmc_host *host = mq->card->host; |
1643 | |
1644 | if (host->hsq_enabled) |
1645 | return mmc_blk_hsq_issue_rw_rq(mq, req); |
1646 | |
1647 | mmc_blk_data_prep(mq, mqrq, recovery_mode: 0, NULL, NULL); |
1648 | |
1649 | return mmc_blk_cqe_start_req(host: mq->card->host, mrq: &mqrq->brq.mrq); |
1650 | } |
1651 | |
1652 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
1653 | struct mmc_card *card, |
1654 | int recovery_mode, |
1655 | struct mmc_queue *mq) |
1656 | { |
1657 | u32 readcmd, writecmd; |
1658 | struct mmc_blk_request *brq = &mqrq->brq; |
1659 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
1660 | struct mmc_blk_data *md = mq->blkdata; |
1661 | bool do_rel_wr, do_data_tag; |
1662 | |
1663 | mmc_blk_data_prep(mq, mqrq, recovery_mode, do_rel_wr_p: &do_rel_wr, do_data_tag_p: &do_data_tag); |
1664 | |
1665 | brq->mrq.cmd = &brq->cmd; |
1666 | |
1667 | brq->cmd.arg = blk_rq_pos(rq: req); |
1668 | if (!mmc_card_blockaddr(card)) |
1669 | brq->cmd.arg <<= 9; |
1670 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
1671 | |
1672 | if (brq->data.blocks > 1 || do_rel_wr) { |
1673 | /* SPI multiblock writes terminate using a special |
1674 | * token, not a STOP_TRANSMISSION request. |
1675 | */ |
1676 | if (!mmc_host_is_spi(card->host) || |
1677 | rq_data_dir(req) == READ) |
1678 | brq->mrq.stop = &brq->stop; |
1679 | readcmd = MMC_READ_MULTIPLE_BLOCK; |
1680 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; |
1681 | } else { |
1682 | brq->mrq.stop = NULL; |
1683 | readcmd = MMC_READ_SINGLE_BLOCK; |
1684 | writecmd = MMC_WRITE_BLOCK; |
1685 | } |
1686 | brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; |
1687 | |
1688 | /* |
1689 | * Pre-defined multi-block transfers are preferable to |
1690 | * open ended-ones (and necessary for reliable writes). |
1691 | * However, it is not sufficient to just send CMD23, |
1692 | * and avoid the final CMD12, as on an error condition |
1693 | * CMD12 (stop) needs to be sent anyway. This, coupled |
1694 | * with Auto-CMD23 enhancements provided by some |
1695 | * hosts, means that the complexity of dealing |
1696 | * with this is best left to the host. If CMD23 is |
1697 | * supported by card and host, we'll fill sbc in and let |
1698 | * the host deal with handling it correctly. This means |
1699 | * that for hosts that don't expose MMC_CAP_CMD23, no |
1700 | * change of behavior will be observed. |
1701 | * |
1702 | * N.B: Some MMC cards experience perf degradation. |
1703 | * We'll avoid using CMD23-bounded multiblock writes for |
1704 | * these, while retaining features like reliable writes. |
1705 | */ |
1706 | if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(opcode: brq->cmd.opcode) && |
1707 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || |
1708 | do_data_tag)) { |
1709 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; |
1710 | brq->sbc.arg = brq->data.blocks | |
1711 | (do_rel_wr ? (1 << 31) : 0) | |
1712 | (do_data_tag ? (1 << 29) : 0); |
1713 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; |
1714 | brq->mrq.sbc = &brq->sbc; |
1715 | } |
1716 | } |
1717 | |
1718 | #define MMC_MAX_RETRIES 5 |
1719 | #define MMC_DATA_RETRIES 2 |
1720 | #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) |
1721 | |
1722 | static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout) |
1723 | { |
1724 | struct mmc_command cmd = { |
1725 | .opcode = MMC_STOP_TRANSMISSION, |
1726 | .flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC, |
1727 | /* Some hosts wait for busy anyway, so provide a busy timeout */ |
1728 | .busy_timeout = timeout, |
1729 | }; |
1730 | |
1731 | return mmc_wait_for_cmd(host: card->host, cmd: &cmd, retries: 5); |
1732 | } |
1733 | |
1734 | static int mmc_blk_fix_state(struct mmc_card *card, struct request *req) |
1735 | { |
1736 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1737 | struct mmc_blk_request *brq = &mqrq->brq; |
1738 | unsigned int timeout = mmc_blk_data_timeout_ms(host: card->host, data: &brq->data); |
1739 | int err; |
1740 | |
1741 | mmc_retune_hold_now(host: card->host); |
1742 | |
1743 | mmc_blk_send_stop(card, timeout); |
1744 | |
1745 | err = mmc_poll_for_busy(card, timeout_ms: timeout, retry_crc_err: false, busy_cmd: MMC_BUSY_IO); |
1746 | |
1747 | mmc_retune_release(host: card->host); |
1748 | |
1749 | return err; |
1750 | } |
1751 | |
1752 | #define MMC_READ_SINGLE_RETRIES 2 |
1753 | |
1754 | /* Single (native) sector read during recovery */ |
1755 | static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) |
1756 | { |
1757 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1758 | struct mmc_request *mrq = &mqrq->brq.mrq; |
1759 | struct mmc_card *card = mq->card; |
1760 | struct mmc_host *host = card->host; |
1761 | blk_status_t error = BLK_STS_OK; |
1762 | size_t bytes_per_read = queue_physical_block_size(q: mq->queue); |
1763 | |
1764 | do { |
1765 | u32 status; |
1766 | int err; |
1767 | int retries = 0; |
1768 | |
1769 | while (retries++ <= MMC_READ_SINGLE_RETRIES) { |
1770 | mmc_blk_rw_rq_prep(mqrq, card, recovery_mode: 1, mq); |
1771 | |
1772 | mmc_wait_for_req(host, mrq); |
1773 | |
1774 | err = mmc_send_status(card, status: &status); |
1775 | if (err) |
1776 | goto error_exit; |
1777 | |
1778 | if (!mmc_host_is_spi(host) && |
1779 | !mmc_ready_for_data(status)) { |
1780 | err = mmc_blk_fix_state(card, req); |
1781 | if (err) |
1782 | goto error_exit; |
1783 | } |
1784 | |
1785 | if (!mrq->cmd->error) |
1786 | break; |
1787 | } |
1788 | |
1789 | if (mrq->cmd->error || |
1790 | mrq->data->error || |
1791 | (!mmc_host_is_spi(host) && |
1792 | (mrq->cmd->resp[0] & CMD_ERRORS || status & CMD_ERRORS))) |
1793 | error = BLK_STS_IOERR; |
1794 | else |
1795 | error = BLK_STS_OK; |
1796 | |
1797 | } while (blk_update_request(rq: req, error, nr_bytes: bytes_per_read)); |
1798 | |
1799 | return; |
1800 | |
1801 | error_exit: |
1802 | mrq->data->bytes_xfered = 0; |
1803 | blk_update_request(rq: req, BLK_STS_IOERR, nr_bytes: bytes_per_read); |
1804 | /* Let it try the remaining request again */ |
1805 | if (mqrq->retries > MMC_MAX_RETRIES - 1) |
1806 | mqrq->retries = MMC_MAX_RETRIES - 1; |
1807 | } |
1808 | |
1809 | static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) |
1810 | { |
1811 | return !!brq->mrq.sbc; |
1812 | } |
1813 | |
1814 | static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) |
1815 | { |
1816 | return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; |
1817 | } |
1818 | |
1819 | /* |
1820 | * Check for errors the host controller driver might not have seen such as |
1821 | * response mode errors or invalid card state. |
1822 | */ |
1823 | static bool mmc_blk_status_error(struct request *req, u32 status) |
1824 | { |
1825 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1826 | struct mmc_blk_request *brq = &mqrq->brq; |
1827 | struct mmc_queue *mq = req->q->queuedata; |
1828 | u32 stop_err_bits; |
1829 | |
1830 | if (mmc_host_is_spi(mq->card->host)) |
1831 | return false; |
1832 | |
1833 | stop_err_bits = mmc_blk_stop_err_bits(brq); |
1834 | |
1835 | return brq->cmd.resp[0] & CMD_ERRORS || |
1836 | brq->stop.resp[0] & stop_err_bits || |
1837 | status & stop_err_bits || |
1838 | (rq_data_dir(req) == WRITE && !mmc_ready_for_data(status)); |
1839 | } |
1840 | |
1841 | static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) |
1842 | { |
1843 | return !brq->sbc.error && !brq->cmd.error && |
1844 | !(brq->cmd.resp[0] & CMD_ERRORS); |
1845 | } |
1846 | |
1847 | /* |
1848 | * Requests are completed by mmc_blk_mq_complete_rq() which sets simple |
1849 | * policy: |
1850 | * 1. A request that has transferred at least some data is considered |
1851 | * successful and will be requeued if there is remaining data to |
1852 | * transfer. |
1853 | * 2. Otherwise the number of retries is incremented and the request |
1854 | * will be requeued if there are remaining retries. |
1855 | * 3. Otherwise the request will be errored out. |
1856 | * That means mmc_blk_mq_complete_rq() is controlled by bytes_xfered and |
1857 | * mqrq->retries. So there are only 4 possible actions here: |
1858 | * 1. do not accept the bytes_xfered value i.e. set it to zero |
1859 | * 2. change mqrq->retries to determine the number of retries |
1860 | * 3. try to reset the card |
1861 | * 4. read one sector at a time |
1862 | */ |
1863 | static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) |
1864 | { |
1865 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; |
1866 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
1867 | struct mmc_blk_request *brq = &mqrq->brq; |
1868 | struct mmc_blk_data *md = mq->blkdata; |
1869 | struct mmc_card *card = mq->card; |
1870 | u32 status; |
1871 | u32 blocks; |
1872 | int err; |
1873 | |
1874 | /* |
1875 | * Some errors the host driver might not have seen. Set the number of |
1876 | * bytes transferred to zero in that case. |
1877 | */ |
1878 | err = __mmc_send_status(card, status: &status, retries: 0); |
1879 | if (err || mmc_blk_status_error(req, status)) |
1880 | brq->data.bytes_xfered = 0; |
1881 | |
1882 | mmc_retune_release(host: card->host); |
1883 | |
1884 | /* |
1885 | * Try again to get the status. This also provides an opportunity for |
1886 | * re-tuning. |
1887 | */ |
1888 | if (err) |
1889 | err = __mmc_send_status(card, status: &status, retries: 0); |
1890 | |
1891 | /* |
1892 | * Nothing more to do after the number of bytes transferred has been |
1893 | * updated and there is no card. |
1894 | */ |
1895 | if (err && mmc_detect_card_removed(host: card->host)) |
1896 | return; |
1897 | |
1898 | /* Try to get back to "tran" state */ |
1899 | if (!mmc_host_is_spi(mq->card->host) && |
1900 | (err || !mmc_ready_for_data(status))) |
1901 | err = mmc_blk_fix_state(card: mq->card, req); |
1902 | |
1903 | /* |
1904 | * Special case for SD cards where the card might record the number of |
1905 | * blocks written. |
1906 | */ |
1907 | if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && |
1908 | rq_data_dir(req) == WRITE) { |
1909 | if (mmc_sd_num_wr_blocks(card, written_blocks: &blocks)) |
1910 | brq->data.bytes_xfered = 0; |
1911 | else |
1912 | brq->data.bytes_xfered = blocks << 9; |
1913 | } |
1914 | |
1915 | /* Reset if the card is in a bad state */ |
1916 | if (!mmc_host_is_spi(mq->card->host) && |
1917 | err && mmc_blk_reset(md, host: card->host, type)) { |
1918 | pr_err("%s: recovery failed!\n" , req->q->disk->disk_name); |
1919 | mqrq->retries = MMC_NO_RETRIES; |
1920 | return; |
1921 | } |
1922 | |
1923 | /* |
1924 | * If anything was done, just return and if there is anything remaining |
1925 | * on the request it will get requeued. |
1926 | */ |
1927 | if (brq->data.bytes_xfered) |
1928 | return; |
1929 | |
1930 | /* Reset before last retry */ |
1931 | if (mqrq->retries + 1 == MMC_MAX_RETRIES && |
1932 | mmc_blk_reset(md, host: card->host, type)) |
1933 | return; |
1934 | |
1935 | /* Command errors fail fast, so use all MMC_MAX_RETRIES */ |
1936 | if (brq->sbc.error || brq->cmd.error) |
1937 | return; |
1938 | |
1939 | /* Reduce the remaining retries for data errors */ |
1940 | if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) { |
1941 | mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES; |
1942 | return; |
1943 | } |
1944 | |
1945 | if (rq_data_dir(req) == READ && brq->data.blocks > |
1946 | queue_physical_block_size(q: mq->queue) >> 9) { |
1947 | /* Read one (native) sector at a time */ |
1948 | mmc_blk_read_single(mq, req); |
1949 | return; |
1950 | } |
1951 | } |
1952 | |
1953 | static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) |
1954 | { |
1955 | mmc_blk_eval_resp_error(brq); |
1956 | |
1957 | return brq->sbc.error || brq->cmd.error || brq->stop.error || |
1958 | brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; |
1959 | } |
1960 | |
1961 | static int mmc_spi_err_check(struct mmc_card *card) |
1962 | { |
1963 | u32 status = 0; |
1964 | int err; |
1965 | |
1966 | /* |
1967 | * SPI does not have a TRAN state we have to wait on, instead the |
1968 | * card is ready again when it no longer holds the line LOW. |
1969 | * We still have to ensure two things here before we know the write |
1970 | * was successful: |
1971 | * 1. The card has not disconnected during busy and we actually read our |
1972 | * own pull-up, thinking it was still connected, so ensure it |
1973 | * still responds. |
1974 | * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a |
1975 | * just reconnected card after being disconnected during busy. |
1976 | */ |
1977 | err = __mmc_send_status(card, status: &status, retries: 0); |
1978 | if (err) |
1979 | return err; |
1980 | /* All R1 and R2 bits of SPI are errors in our case */ |
1981 | if (status) |
1982 | return -EIO; |
1983 | return 0; |
1984 | } |
1985 | |
1986 | static int mmc_blk_busy_cb(void *cb_data, bool *busy) |
1987 | { |
1988 | struct mmc_blk_busy_data *data = cb_data; |
1989 | u32 status = 0; |
1990 | int err; |
1991 | |
1992 | err = mmc_send_status(card: data->card, status: &status); |
1993 | if (err) |
1994 | return err; |
1995 | |
1996 | /* Accumulate response error bits. */ |
1997 | data->status |= status; |
1998 | |
1999 | *busy = !mmc_ready_for_data(status); |
2000 | return 0; |
2001 | } |
2002 | |
2003 | static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) |
2004 | { |
2005 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2006 | struct mmc_blk_busy_data cb_data; |
2007 | int err; |
2008 | |
2009 | if (rq_data_dir(req) == READ) |
2010 | return 0; |
2011 | |
2012 | if (mmc_host_is_spi(card->host)) { |
2013 | err = mmc_spi_err_check(card); |
2014 | if (err) |
2015 | mqrq->brq.data.bytes_xfered = 0; |
2016 | return err; |
2017 | } |
2018 | |
2019 | cb_data.card = card; |
2020 | cb_data.status = 0; |
2021 | err = __mmc_poll_for_busy(host: card->host, period_us: 0, MMC_BLK_TIMEOUT_MS, |
2022 | busy_cb: &mmc_blk_busy_cb, cb_data: &cb_data); |
2023 | |
2024 | /* |
2025 | * Do not assume data transferred correctly if there are any error bits |
2026 | * set. |
2027 | */ |
2028 | if (cb_data.status & mmc_blk_stop_err_bits(brq: &mqrq->brq)) { |
2029 | mqrq->brq.data.bytes_xfered = 0; |
2030 | err = err ? err : -EIO; |
2031 | } |
2032 | |
2033 | /* Copy the exception bit so it will be seen later on */ |
2034 | if (mmc_card_mmc(card) && cb_data.status & R1_EXCEPTION_EVENT) |
2035 | mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; |
2036 | |
2037 | return err; |
2038 | } |
2039 | |
2040 | static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, |
2041 | struct request *req) |
2042 | { |
2043 | int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; |
2044 | |
2045 | mmc_blk_reset_success(md: mq->blkdata, type); |
2046 | } |
2047 | |
2048 | static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) |
2049 | { |
2050 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2051 | unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; |
2052 | |
2053 | if (nr_bytes) { |
2054 | if (blk_update_request(rq: req, BLK_STS_OK, nr_bytes)) |
2055 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
2056 | else |
2057 | __blk_mq_end_request(rq: req, BLK_STS_OK); |
2058 | } else if (!blk_rq_bytes(rq: req)) { |
2059 | __blk_mq_end_request(rq: req, BLK_STS_IOERR); |
2060 | } else if (mqrq->retries++ < MMC_MAX_RETRIES) { |
2061 | blk_mq_requeue_request(rq: req, kick_requeue_list: true); |
2062 | } else { |
2063 | if (mmc_card_removed(mq->card)) |
2064 | req->rq_flags |= RQF_QUIET; |
2065 | blk_mq_end_request(rq: req, BLK_STS_IOERR); |
2066 | } |
2067 | } |
2068 | |
2069 | static bool mmc_blk_urgent_bkops_needed(struct mmc_queue *mq, |
2070 | struct mmc_queue_req *mqrq) |
2071 | { |
2072 | return mmc_card_mmc(mq->card) && !mmc_host_is_spi(mq->card->host) && |
2073 | (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || |
2074 | mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); |
2075 | } |
2076 | |
2077 | static void mmc_blk_urgent_bkops(struct mmc_queue *mq, |
2078 | struct mmc_queue_req *mqrq) |
2079 | { |
2080 | if (mmc_blk_urgent_bkops_needed(mq, mqrq)) |
2081 | mmc_run_bkops(card: mq->card); |
2082 | } |
2083 | |
2084 | static void mmc_blk_hsq_req_done(struct mmc_request *mrq) |
2085 | { |
2086 | struct mmc_queue_req *mqrq = |
2087 | container_of(mrq, struct mmc_queue_req, brq.mrq); |
2088 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
2089 | struct request_queue *q = req->q; |
2090 | struct mmc_queue *mq = q->queuedata; |
2091 | struct mmc_host *host = mq->card->host; |
2092 | unsigned long flags; |
2093 | |
2094 | if (mmc_blk_rq_error(brq: &mqrq->brq) || |
2095 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { |
2096 | spin_lock_irqsave(&mq->lock, flags); |
2097 | mq->recovery_needed = true; |
2098 | mq->recovery_req = req; |
2099 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2100 | |
2101 | host->cqe_ops->cqe_recovery_start(host); |
2102 | |
2103 | schedule_work(work: &mq->recovery_work); |
2104 | return; |
2105 | } |
2106 | |
2107 | mmc_blk_rw_reset_success(mq, req); |
2108 | |
2109 | /* |
2110 | * Block layer timeouts race with completions which means the normal |
2111 | * completion path cannot be used during recovery. |
2112 | */ |
2113 | if (mq->in_recovery) |
2114 | mmc_blk_cqe_complete_rq(mq, req); |
2115 | else if (likely(!blk_should_fake_timeout(req->q))) |
2116 | blk_mq_complete_request(rq: req); |
2117 | } |
2118 | |
2119 | void mmc_blk_mq_complete(struct request *req) |
2120 | { |
2121 | struct mmc_queue *mq = req->q->queuedata; |
2122 | struct mmc_host *host = mq->card->host; |
2123 | |
2124 | if (host->cqe_enabled) |
2125 | mmc_blk_cqe_complete_rq(mq, req); |
2126 | else if (likely(!blk_should_fake_timeout(req->q))) |
2127 | mmc_blk_mq_complete_rq(mq, req); |
2128 | } |
2129 | |
2130 | static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, |
2131 | struct request *req) |
2132 | { |
2133 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2134 | struct mmc_host *host = mq->card->host; |
2135 | |
2136 | if (mmc_blk_rq_error(brq: &mqrq->brq) || |
2137 | mmc_blk_card_busy(card: mq->card, req)) { |
2138 | mmc_blk_mq_rw_recovery(mq, req); |
2139 | } else { |
2140 | mmc_blk_rw_reset_success(mq, req); |
2141 | mmc_retune_release(host); |
2142 | } |
2143 | |
2144 | mmc_blk_urgent_bkops(mq, mqrq); |
2145 | } |
2146 | |
2147 | static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) |
2148 | { |
2149 | unsigned long flags; |
2150 | bool put_card; |
2151 | |
2152 | spin_lock_irqsave(&mq->lock, flags); |
2153 | |
2154 | mq->in_flight[issue_type] -= 1; |
2155 | |
2156 | put_card = (mmc_tot_in_flight(mq) == 0); |
2157 | |
2158 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2159 | |
2160 | if (put_card) |
2161 | mmc_put_card(card: mq->card, ctx: &mq->ctx); |
2162 | } |
2163 | |
2164 | static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, |
2165 | bool can_sleep) |
2166 | { |
2167 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); |
2168 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2169 | struct mmc_request *mrq = &mqrq->brq.mrq; |
2170 | struct mmc_host *host = mq->card->host; |
2171 | |
2172 | mmc_post_req(host, mrq, err: 0); |
2173 | |
2174 | /* |
2175 | * Block layer timeouts race with completions which means the normal |
2176 | * completion path cannot be used during recovery. |
2177 | */ |
2178 | if (mq->in_recovery) { |
2179 | mmc_blk_mq_complete_rq(mq, req); |
2180 | } else if (likely(!blk_should_fake_timeout(req->q))) { |
2181 | if (can_sleep) |
2182 | blk_mq_complete_request_direct(rq: req, complete: mmc_blk_mq_complete); |
2183 | else |
2184 | blk_mq_complete_request(rq: req); |
2185 | } |
2186 | |
2187 | mmc_blk_mq_dec_in_flight(mq, issue_type); |
2188 | } |
2189 | |
2190 | void mmc_blk_mq_recovery(struct mmc_queue *mq) |
2191 | { |
2192 | struct request *req = mq->recovery_req; |
2193 | struct mmc_host *host = mq->card->host; |
2194 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2195 | |
2196 | mq->recovery_req = NULL; |
2197 | mq->rw_wait = false; |
2198 | |
2199 | if (mmc_blk_rq_error(brq: &mqrq->brq)) { |
2200 | mmc_retune_hold_now(host); |
2201 | mmc_blk_mq_rw_recovery(mq, req); |
2202 | } |
2203 | |
2204 | mmc_blk_urgent_bkops(mq, mqrq); |
2205 | |
2206 | mmc_blk_mq_post_req(mq, req, can_sleep: true); |
2207 | } |
2208 | |
2209 | static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, |
2210 | struct request **prev_req) |
2211 | { |
2212 | if (mmc_host_done_complete(host: mq->card->host)) |
2213 | return; |
2214 | |
2215 | mutex_lock(&mq->complete_lock); |
2216 | |
2217 | if (!mq->complete_req) |
2218 | goto out_unlock; |
2219 | |
2220 | mmc_blk_mq_poll_completion(mq, req: mq->complete_req); |
2221 | |
2222 | if (prev_req) |
2223 | *prev_req = mq->complete_req; |
2224 | else |
2225 | mmc_blk_mq_post_req(mq, req: mq->complete_req, can_sleep: true); |
2226 | |
2227 | mq->complete_req = NULL; |
2228 | |
2229 | out_unlock: |
2230 | mutex_unlock(lock: &mq->complete_lock); |
2231 | } |
2232 | |
2233 | void mmc_blk_mq_complete_work(struct work_struct *work) |
2234 | { |
2235 | struct mmc_queue *mq = container_of(work, struct mmc_queue, |
2236 | complete_work); |
2237 | |
2238 | mmc_blk_mq_complete_prev_req(mq, NULL); |
2239 | } |
2240 | |
2241 | static void mmc_blk_mq_req_done(struct mmc_request *mrq) |
2242 | { |
2243 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, |
2244 | brq.mrq); |
2245 | struct request *req = mmc_queue_req_to_req(mqr: mqrq); |
2246 | struct request_queue *q = req->q; |
2247 | struct mmc_queue *mq = q->queuedata; |
2248 | struct mmc_host *host = mq->card->host; |
2249 | unsigned long flags; |
2250 | |
2251 | if (!mmc_host_done_complete(host)) { |
2252 | bool waiting; |
2253 | |
2254 | /* |
2255 | * We cannot complete the request in this context, so record |
2256 | * that there is a request to complete, and that a following |
2257 | * request does not need to wait (although it does need to |
2258 | * complete complete_req first). |
2259 | */ |
2260 | spin_lock_irqsave(&mq->lock, flags); |
2261 | mq->complete_req = req; |
2262 | mq->rw_wait = false; |
2263 | waiting = mq->waiting; |
2264 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2265 | |
2266 | /* |
2267 | * If 'waiting' then the waiting task will complete this |
2268 | * request, otherwise queue a work to do it. Note that |
2269 | * complete_work may still race with the dispatch of a following |
2270 | * request. |
2271 | */ |
2272 | if (waiting) |
2273 | wake_up(&mq->wait); |
2274 | else |
2275 | queue_work(wq: mq->card->complete_wq, work: &mq->complete_work); |
2276 | |
2277 | return; |
2278 | } |
2279 | |
2280 | /* Take the recovery path for errors or urgent background operations */ |
2281 | if (mmc_blk_rq_error(brq: &mqrq->brq) || |
2282 | mmc_blk_urgent_bkops_needed(mq, mqrq)) { |
2283 | spin_lock_irqsave(&mq->lock, flags); |
2284 | mq->recovery_needed = true; |
2285 | mq->recovery_req = req; |
2286 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2287 | wake_up(&mq->wait); |
2288 | schedule_work(work: &mq->recovery_work); |
2289 | return; |
2290 | } |
2291 | |
2292 | mmc_blk_rw_reset_success(mq, req); |
2293 | |
2294 | mq->rw_wait = false; |
2295 | wake_up(&mq->wait); |
2296 | |
2297 | /* context unknown */ |
2298 | mmc_blk_mq_post_req(mq, req, can_sleep: false); |
2299 | } |
2300 | |
2301 | static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) |
2302 | { |
2303 | unsigned long flags; |
2304 | bool done; |
2305 | |
2306 | /* |
2307 | * Wait while there is another request in progress, but not if recovery |
2308 | * is needed. Also indicate whether there is a request waiting to start. |
2309 | */ |
2310 | spin_lock_irqsave(&mq->lock, flags); |
2311 | if (mq->recovery_needed) { |
2312 | *err = -EBUSY; |
2313 | done = true; |
2314 | } else { |
2315 | done = !mq->rw_wait; |
2316 | } |
2317 | mq->waiting = !done; |
2318 | spin_unlock_irqrestore(lock: &mq->lock, flags); |
2319 | |
2320 | return done; |
2321 | } |
2322 | |
2323 | static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) |
2324 | { |
2325 | int err = 0; |
2326 | |
2327 | wait_event(mq->wait, mmc_blk_rw_wait_cond(mq, &err)); |
2328 | |
2329 | /* Always complete the previous request if there is one */ |
2330 | mmc_blk_mq_complete_prev_req(mq, prev_req); |
2331 | |
2332 | return err; |
2333 | } |
2334 | |
2335 | static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, |
2336 | struct request *req) |
2337 | { |
2338 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(rq: req); |
2339 | struct mmc_host *host = mq->card->host; |
2340 | struct request *prev_req = NULL; |
2341 | int err = 0; |
2342 | |
2343 | mmc_blk_rw_rq_prep(mqrq, card: mq->card, recovery_mode: 0, mq); |
2344 | |
2345 | mqrq->brq.mrq.done = mmc_blk_mq_req_done; |
2346 | |
2347 | mmc_pre_req(host, mrq: &mqrq->brq.mrq); |
2348 | |
2349 | err = mmc_blk_rw_wait(mq, prev_req: &prev_req); |
2350 | if (err) |
2351 | goto out_post_req; |
2352 | |
2353 | mq->rw_wait = true; |
2354 | |
2355 | err = mmc_start_request(host, mrq: &mqrq->brq.mrq); |
2356 | |
2357 | if (prev_req) |
2358 | mmc_blk_mq_post_req(mq, req: prev_req, can_sleep: true); |
2359 | |
2360 | if (err) |
2361 | mq->rw_wait = false; |
2362 | |
2363 | /* Release re-tuning here where there is no synchronization required */ |
2364 | if (err || mmc_host_done_complete(host)) |
2365 | mmc_retune_release(host); |
2366 | |
2367 | out_post_req: |
2368 | if (err) |
2369 | mmc_post_req(host, mrq: &mqrq->brq.mrq, err); |
2370 | |
2371 | return err; |
2372 | } |
2373 | |
2374 | static int mmc_blk_wait_for_idle(struct mmc_queue *mq, struct mmc_host *host) |
2375 | { |
2376 | if (host->cqe_enabled) |
2377 | return host->cqe_ops->cqe_wait_for_idle(host); |
2378 | |
2379 | return mmc_blk_rw_wait(mq, NULL); |
2380 | } |
2381 | |
2382 | enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) |
2383 | { |
2384 | struct mmc_blk_data *md = mq->blkdata; |
2385 | struct mmc_card *card = md->queue.card; |
2386 | struct mmc_host *host = card->host; |
2387 | int ret; |
2388 | |
2389 | ret = mmc_blk_part_switch(card, part_type: md->part_type); |
2390 | if (ret) |
2391 | return MMC_REQ_FAILED_TO_START; |
2392 | |
2393 | switch (mmc_issue_type(mq, req)) { |
2394 | case MMC_ISSUE_SYNC: |
2395 | ret = mmc_blk_wait_for_idle(mq, host); |
2396 | if (ret) |
2397 | return MMC_REQ_BUSY; |
2398 | switch (req_op(req)) { |
2399 | case REQ_OP_DRV_IN: |
2400 | case REQ_OP_DRV_OUT: |
2401 | mmc_blk_issue_drv_op(mq, req); |
2402 | break; |
2403 | case REQ_OP_DISCARD: |
2404 | mmc_blk_issue_discard_rq(mq, req); |
2405 | break; |
2406 | case REQ_OP_SECURE_ERASE: |
2407 | mmc_blk_issue_secdiscard_rq(mq, req); |
2408 | break; |
2409 | case REQ_OP_WRITE_ZEROES: |
2410 | mmc_blk_issue_trim_rq(mq, req); |
2411 | break; |
2412 | case REQ_OP_FLUSH: |
2413 | mmc_blk_issue_flush(mq, req); |
2414 | break; |
2415 | default: |
2416 | WARN_ON_ONCE(1); |
2417 | return MMC_REQ_FAILED_TO_START; |
2418 | } |
2419 | return MMC_REQ_FINISHED; |
2420 | case MMC_ISSUE_DCMD: |
2421 | case MMC_ISSUE_ASYNC: |
2422 | switch (req_op(req)) { |
2423 | case REQ_OP_FLUSH: |
2424 | if (!mmc_cache_enabled(host)) { |
2425 | blk_mq_end_request(rq: req, BLK_STS_OK); |
2426 | return MMC_REQ_FINISHED; |
2427 | } |
2428 | ret = mmc_blk_cqe_issue_flush(mq, req); |
2429 | break; |
2430 | case REQ_OP_WRITE: |
2431 | card->written_flag = true; |
2432 | fallthrough; |
2433 | case REQ_OP_READ: |
2434 | if (host->cqe_enabled) |
2435 | ret = mmc_blk_cqe_issue_rw_rq(mq, req); |
2436 | else |
2437 | ret = mmc_blk_mq_issue_rw_rq(mq, req); |
2438 | break; |
2439 | default: |
2440 | WARN_ON_ONCE(1); |
2441 | ret = -EINVAL; |
2442 | } |
2443 | if (!ret) |
2444 | return MMC_REQ_STARTED; |
2445 | return ret == -EBUSY ? MMC_REQ_BUSY : MMC_REQ_FAILED_TO_START; |
2446 | default: |
2447 | WARN_ON_ONCE(1); |
2448 | return MMC_REQ_FAILED_TO_START; |
2449 | } |
2450 | } |
2451 | |
2452 | static inline int mmc_blk_readonly(struct mmc_card *card) |
2453 | { |
2454 | return mmc_card_readonly(card) || |
2455 | !(card->csd.cmdclass & CCC_BLOCK_WRITE); |
2456 | } |
2457 | |
2458 | static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, |
2459 | struct device *parent, |
2460 | sector_t size, |
2461 | bool default_ro, |
2462 | const char *subname, |
2463 | int area_type, |
2464 | unsigned int part_type) |
2465 | { |
2466 | struct mmc_blk_data *md; |
2467 | int devidx, ret; |
2468 | char cap_str[10]; |
2469 | bool cache_enabled = false; |
2470 | bool fua_enabled = false; |
2471 | |
2472 | devidx = ida_alloc_max(ida: &mmc_blk_ida, max: max_devices - 1, GFP_KERNEL); |
2473 | if (devidx < 0) { |
2474 | /* |
2475 | * We get -ENOSPC because there are no more any available |
2476 | * devidx. The reason may be that, either userspace haven't yet |
2477 | * unmounted the partitions, which postpones mmc_blk_release() |
2478 | * from being called, or the device has more partitions than |
2479 | * what we support. |
2480 | */ |
2481 | if (devidx == -ENOSPC) |
2482 | dev_err(mmc_dev(card->host), |
2483 | "no more device IDs available\n" ); |
2484 | |
2485 | return ERR_PTR(error: devidx); |
2486 | } |
2487 | |
2488 | md = kzalloc(size: sizeof(struct mmc_blk_data), GFP_KERNEL); |
2489 | if (!md) { |
2490 | ret = -ENOMEM; |
2491 | goto out; |
2492 | } |
2493 | |
2494 | md->area_type = area_type; |
2495 | |
2496 | /* |
2497 | * Set the read-only status based on the supported commands |
2498 | * and the write protect switch. |
2499 | */ |
2500 | md->read_only = mmc_blk_readonly(card); |
2501 | |
2502 | md->disk = mmc_init_queue(mq: &md->queue, card); |
2503 | if (IS_ERR(ptr: md->disk)) { |
2504 | ret = PTR_ERR(ptr: md->disk); |
2505 | goto err_kfree; |
2506 | } |
2507 | |
2508 | INIT_LIST_HEAD(list: &md->part); |
2509 | INIT_LIST_HEAD(list: &md->rpmbs); |
2510 | kref_init(kref: &md->kref); |
2511 | |
2512 | md->queue.blkdata = md; |
2513 | md->part_type = part_type; |
2514 | |
2515 | md->disk->major = MMC_BLOCK_MAJOR; |
2516 | md->disk->minors = perdev_minors; |
2517 | md->disk->first_minor = devidx * perdev_minors; |
2518 | md->disk->fops = &mmc_bdops; |
2519 | md->disk->private_data = md; |
2520 | md->parent = parent; |
2521 | set_disk_ro(disk: md->disk, read_only: md->read_only || default_ro); |
2522 | if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) |
2523 | md->disk->flags |= GENHD_FL_NO_PART; |
2524 | |
2525 | /* |
2526 | * As discussed on lkml, GENHD_FL_REMOVABLE should: |
2527 | * |
2528 | * - be set for removable media with permanent block devices |
2529 | * - be unset for removable block devices with permanent media |
2530 | * |
2531 | * Since MMC block devices clearly fall under the second |
2532 | * case, we do not set GENHD_FL_REMOVABLE. Userspace |
2533 | * should use the block device creation/destruction hotplug |
2534 | * messages to tell when the card is present. |
2535 | */ |
2536 | |
2537 | snprintf(buf: md->disk->disk_name, size: sizeof(md->disk->disk_name), |
2538 | fmt: "mmcblk%u%s" , card->host->index, subname ? subname : "" ); |
2539 | |
2540 | set_capacity(disk: md->disk, size); |
2541 | |
2542 | if (mmc_host_cmd23(host: card->host)) { |
2543 | if ((mmc_card_mmc(card) && |
2544 | card->csd.mmca_vsn >= CSD_SPEC_VER_3) || |
2545 | (mmc_card_sd(card) && |
2546 | card->scr.cmds & SD_SCR_CMD23_SUPPORT)) |
2547 | md->flags |= MMC_BLK_CMD23; |
2548 | } |
2549 | |
2550 | if (md->flags & MMC_BLK_CMD23 && |
2551 | ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || |
2552 | card->ext_csd.rel_sectors)) { |
2553 | md->flags |= MMC_BLK_REL_WR; |
2554 | fua_enabled = true; |
2555 | cache_enabled = true; |
2556 | } |
2557 | if (mmc_cache_enabled(host: card->host)) |
2558 | cache_enabled = true; |
2559 | |
2560 | blk_queue_write_cache(q: md->queue.queue, enabled: cache_enabled, fua: fua_enabled); |
2561 | |
2562 | string_get_size(size: (u64)size, blk_size: 512, units: STRING_UNITS_2, |
2563 | buf: cap_str, len: sizeof(cap_str)); |
2564 | pr_info("%s: %s %s %s%s\n" , |
2565 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
2566 | cap_str, md->read_only ? " (ro)" : "" ); |
2567 | |
2568 | /* used in ->open, must be set before add_disk: */ |
2569 | if (area_type == MMC_BLK_DATA_AREA_MAIN) |
2570 | dev_set_drvdata(dev: &card->dev, data: md); |
2571 | ret = device_add_disk(parent: md->parent, disk: md->disk, groups: mmc_disk_attr_groups); |
2572 | if (ret) |
2573 | goto err_put_disk; |
2574 | return md; |
2575 | |
2576 | err_put_disk: |
2577 | put_disk(disk: md->disk); |
2578 | blk_mq_free_tag_set(set: &md->queue.tag_set); |
2579 | err_kfree: |
2580 | kfree(objp: md); |
2581 | out: |
2582 | ida_free(&mmc_blk_ida, id: devidx); |
2583 | return ERR_PTR(error: ret); |
2584 | } |
2585 | |
2586 | static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) |
2587 | { |
2588 | sector_t size; |
2589 | |
2590 | if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { |
2591 | /* |
2592 | * The EXT_CSD sector count is in number or 512 byte |
2593 | * sectors. |
2594 | */ |
2595 | size = card->ext_csd.sectors; |
2596 | } else { |
2597 | /* |
2598 | * The CSD capacity field is in units of read_blkbits. |
2599 | * set_capacity takes units of 512 bytes. |
2600 | */ |
2601 | size = (typeof(sector_t))card->csd.capacity |
2602 | << (card->csd.read_blkbits - 9); |
2603 | } |
2604 | |
2605 | return mmc_blk_alloc_req(card, parent: &card->dev, size, default_ro: false, NULL, |
2606 | MMC_BLK_DATA_AREA_MAIN, part_type: 0); |
2607 | } |
2608 | |
2609 | static int mmc_blk_alloc_part(struct mmc_card *card, |
2610 | struct mmc_blk_data *md, |
2611 | unsigned int part_type, |
2612 | sector_t size, |
2613 | bool default_ro, |
2614 | const char *subname, |
2615 | int area_type) |
2616 | { |
2617 | struct mmc_blk_data *part_md; |
2618 | |
2619 | part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, |
2620 | subname, area_type, part_type); |
2621 | if (IS_ERR(ptr: part_md)) |
2622 | return PTR_ERR(ptr: part_md); |
2623 | list_add(new: &part_md->part, head: &md->part); |
2624 | |
2625 | return 0; |
2626 | } |
2627 | |
2628 | /** |
2629 | * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev |
2630 | * @filp: the character device file |
2631 | * @cmd: the ioctl() command |
2632 | * @arg: the argument from userspace |
2633 | * |
2634 | * This will essentially just redirect the ioctl()s coming in over to |
2635 | * the main block device spawning the RPMB character device. |
2636 | */ |
2637 | static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, |
2638 | unsigned long arg) |
2639 | { |
2640 | struct mmc_rpmb_data *rpmb = filp->private_data; |
2641 | int ret; |
2642 | |
2643 | switch (cmd) { |
2644 | case MMC_IOC_CMD: |
2645 | ret = mmc_blk_ioctl_cmd(md: rpmb->md, |
2646 | ic_ptr: (struct mmc_ioc_cmd __user *)arg, |
2647 | rpmb); |
2648 | break; |
2649 | case MMC_IOC_MULTI_CMD: |
2650 | ret = mmc_blk_ioctl_multi_cmd(md: rpmb->md, |
2651 | user: (struct mmc_ioc_multi_cmd __user *)arg, |
2652 | rpmb); |
2653 | break; |
2654 | default: |
2655 | ret = -EINVAL; |
2656 | break; |
2657 | } |
2658 | |
2659 | return ret; |
2660 | } |
2661 | |
2662 | #ifdef CONFIG_COMPAT |
2663 | static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, |
2664 | unsigned long arg) |
2665 | { |
2666 | return mmc_rpmb_ioctl(filp, cmd, arg: (unsigned long)compat_ptr(uptr: arg)); |
2667 | } |
2668 | #endif |
2669 | |
2670 | static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) |
2671 | { |
2672 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, |
2673 | struct mmc_rpmb_data, chrdev); |
2674 | |
2675 | get_device(dev: &rpmb->dev); |
2676 | filp->private_data = rpmb; |
2677 | mmc_blk_get(disk: rpmb->md->disk); |
2678 | |
2679 | return nonseekable_open(inode, filp); |
2680 | } |
2681 | |
2682 | static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) |
2683 | { |
2684 | struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, |
2685 | struct mmc_rpmb_data, chrdev); |
2686 | |
2687 | mmc_blk_put(md: rpmb->md); |
2688 | put_device(dev: &rpmb->dev); |
2689 | |
2690 | return 0; |
2691 | } |
2692 | |
2693 | static const struct file_operations mmc_rpmb_fileops = { |
2694 | .release = mmc_rpmb_chrdev_release, |
2695 | .open = mmc_rpmb_chrdev_open, |
2696 | .owner = THIS_MODULE, |
2697 | .llseek = no_llseek, |
2698 | .unlocked_ioctl = mmc_rpmb_ioctl, |
2699 | #ifdef CONFIG_COMPAT |
2700 | .compat_ioctl = mmc_rpmb_ioctl_compat, |
2701 | #endif |
2702 | }; |
2703 | |
2704 | static void mmc_blk_rpmb_device_release(struct device *dev) |
2705 | { |
2706 | struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); |
2707 | |
2708 | ida_free(&mmc_rpmb_ida, id: rpmb->id); |
2709 | kfree(objp: rpmb); |
2710 | } |
2711 | |
2712 | static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, |
2713 | struct mmc_blk_data *md, |
2714 | unsigned int part_index, |
2715 | sector_t size, |
2716 | const char *subname) |
2717 | { |
2718 | int devidx, ret; |
2719 | char rpmb_name[DISK_NAME_LEN]; |
2720 | char cap_str[10]; |
2721 | struct mmc_rpmb_data *rpmb; |
2722 | |
2723 | /* This creates the minor number for the RPMB char device */ |
2724 | devidx = ida_alloc_max(ida: &mmc_rpmb_ida, max: max_devices - 1, GFP_KERNEL); |
2725 | if (devidx < 0) |
2726 | return devidx; |
2727 | |
2728 | rpmb = kzalloc(size: sizeof(*rpmb), GFP_KERNEL); |
2729 | if (!rpmb) { |
2730 | ida_free(&mmc_rpmb_ida, id: devidx); |
2731 | return -ENOMEM; |
2732 | } |
2733 | |
2734 | snprintf(buf: rpmb_name, size: sizeof(rpmb_name), |
2735 | fmt: "mmcblk%u%s" , card->host->index, subname ? subname : "" ); |
2736 | |
2737 | rpmb->id = devidx; |
2738 | rpmb->part_index = part_index; |
2739 | rpmb->dev.init_name = rpmb_name; |
2740 | rpmb->dev.bus = &mmc_rpmb_bus_type; |
2741 | rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); |
2742 | rpmb->dev.parent = &card->dev; |
2743 | rpmb->dev.release = mmc_blk_rpmb_device_release; |
2744 | device_initialize(dev: &rpmb->dev); |
2745 | dev_set_drvdata(dev: &rpmb->dev, data: rpmb); |
2746 | rpmb->md = md; |
2747 | |
2748 | cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); |
2749 | rpmb->chrdev.owner = THIS_MODULE; |
2750 | ret = cdev_device_add(cdev: &rpmb->chrdev, dev: &rpmb->dev); |
2751 | if (ret) { |
2752 | pr_err("%s: could not add character device\n" , rpmb_name); |
2753 | goto out_put_device; |
2754 | } |
2755 | |
2756 | list_add(new: &rpmb->node, head: &md->rpmbs); |
2757 | |
2758 | string_get_size(size: (u64)size, blk_size: 512, units: STRING_UNITS_2, |
2759 | buf: cap_str, len: sizeof(cap_str)); |
2760 | |
2761 | pr_info("%s: %s %s %s, chardev (%d:%d)\n" , |
2762 | rpmb_name, mmc_card_id(card), mmc_card_name(card), cap_str, |
2763 | MAJOR(mmc_rpmb_devt), rpmb->id); |
2764 | |
2765 | return 0; |
2766 | |
2767 | out_put_device: |
2768 | put_device(dev: &rpmb->dev); |
2769 | return ret; |
2770 | } |
2771 | |
2772 | static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) |
2773 | |
2774 | { |
2775 | cdev_device_del(cdev: &rpmb->chrdev, dev: &rpmb->dev); |
2776 | put_device(dev: &rpmb->dev); |
2777 | } |
2778 | |
2779 | /* MMC Physical partitions consist of two boot partitions and |
2780 | * up to four general purpose partitions. |
2781 | * For each partition enabled in EXT_CSD a block device will be allocatedi |
2782 | * to provide access to the partition. |
2783 | */ |
2784 | |
2785 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
2786 | { |
2787 | int idx, ret; |
2788 | |
2789 | if (!mmc_card_mmc(card)) |
2790 | return 0; |
2791 | |
2792 | for (idx = 0; idx < card->nr_parts; idx++) { |
2793 | if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { |
2794 | /* |
2795 | * RPMB partitions does not provide block access, they |
2796 | * are only accessed using ioctl():s. Thus create |
2797 | * special RPMB block devices that do not have a |
2798 | * backing block queue for these. |
2799 | */ |
2800 | ret = mmc_blk_alloc_rpmb_part(card, md, |
2801 | part_index: card->part[idx].part_cfg, |
2802 | size: card->part[idx].size >> 9, |
2803 | subname: card->part[idx].name); |
2804 | if (ret) |
2805 | return ret; |
2806 | } else if (card->part[idx].size) { |
2807 | ret = mmc_blk_alloc_part(card, md, |
2808 | part_type: card->part[idx].part_cfg, |
2809 | size: card->part[idx].size >> 9, |
2810 | default_ro: card->part[idx].force_ro, |
2811 | subname: card->part[idx].name, |
2812 | area_type: card->part[idx].area_type); |
2813 | if (ret) |
2814 | return ret; |
2815 | } |
2816 | } |
2817 | |
2818 | return 0; |
2819 | } |
2820 | |
2821 | static void mmc_blk_remove_req(struct mmc_blk_data *md) |
2822 | { |
2823 | /* |
2824 | * Flush remaining requests and free queues. It is freeing the queue |
2825 | * that stops new requests from being accepted. |
2826 | */ |
2827 | del_gendisk(gp: md->disk); |
2828 | mmc_cleanup_queue(&md->queue); |
2829 | mmc_blk_put(md); |
2830 | } |
2831 | |
2832 | static void mmc_blk_remove_parts(struct mmc_card *card, |
2833 | struct mmc_blk_data *md) |
2834 | { |
2835 | struct list_head *pos, *q; |
2836 | struct mmc_blk_data *part_md; |
2837 | struct mmc_rpmb_data *rpmb; |
2838 | |
2839 | /* Remove RPMB partitions */ |
2840 | list_for_each_safe(pos, q, &md->rpmbs) { |
2841 | rpmb = list_entry(pos, struct mmc_rpmb_data, node); |
2842 | list_del(entry: pos); |
2843 | mmc_blk_remove_rpmb_part(rpmb); |
2844 | } |
2845 | /* Remove block partitions */ |
2846 | list_for_each_safe(pos, q, &md->part) { |
2847 | part_md = list_entry(pos, struct mmc_blk_data, part); |
2848 | list_del(entry: pos); |
2849 | mmc_blk_remove_req(md: part_md); |
2850 | } |
2851 | } |
2852 | |
2853 | #ifdef CONFIG_DEBUG_FS |
2854 | |
2855 | static int mmc_dbg_card_status_get(void *data, u64 *val) |
2856 | { |
2857 | struct mmc_card *card = data; |
2858 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
2859 | struct mmc_queue *mq = &md->queue; |
2860 | struct request *req; |
2861 | int ret; |
2862 | |
2863 | /* Ask the block layer about the card status */ |
2864 | req = blk_mq_alloc_request(q: mq->queue, opf: REQ_OP_DRV_IN, flags: 0); |
2865 | if (IS_ERR(ptr: req)) |
2866 | return PTR_ERR(ptr: req); |
2867 | req_to_mmc_queue_req(rq: req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS; |
2868 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
2869 | blk_execute_rq(rq: req, at_head: false); |
2870 | ret = req_to_mmc_queue_req(rq: req)->drv_op_result; |
2871 | if (ret >= 0) { |
2872 | *val = ret; |
2873 | ret = 0; |
2874 | } |
2875 | blk_mq_free_request(rq: req); |
2876 | |
2877 | return ret; |
2878 | } |
2879 | DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, |
2880 | NULL, "%08llx\n" ); |
2881 | |
2882 | /* That is two digits * 512 + 1 for newline */ |
2883 | #define EXT_CSD_STR_LEN 1025 |
2884 | |
2885 | static int mmc_ext_csd_open(struct inode *inode, struct file *filp) |
2886 | { |
2887 | struct mmc_card *card = inode->i_private; |
2888 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
2889 | struct mmc_queue *mq = &md->queue; |
2890 | struct request *req; |
2891 | char *buf; |
2892 | ssize_t n = 0; |
2893 | u8 *ext_csd; |
2894 | int err, i; |
2895 | |
2896 | buf = kmalloc(EXT_CSD_STR_LEN + 1, GFP_KERNEL); |
2897 | if (!buf) |
2898 | return -ENOMEM; |
2899 | |
2900 | /* Ask the block layer for the EXT CSD */ |
2901 | req = blk_mq_alloc_request(q: mq->queue, opf: REQ_OP_DRV_IN, flags: 0); |
2902 | if (IS_ERR(ptr: req)) { |
2903 | err = PTR_ERR(ptr: req); |
2904 | goto out_free; |
2905 | } |
2906 | req_to_mmc_queue_req(rq: req)->drv_op = MMC_DRV_OP_GET_EXT_CSD; |
2907 | req_to_mmc_queue_req(rq: req)->drv_op_result = -EIO; |
2908 | req_to_mmc_queue_req(rq: req)->drv_op_data = &ext_csd; |
2909 | blk_execute_rq(rq: req, at_head: false); |
2910 | err = req_to_mmc_queue_req(rq: req)->drv_op_result; |
2911 | blk_mq_free_request(rq: req); |
2912 | if (err) { |
2913 | pr_err("FAILED %d\n" , err); |
2914 | goto out_free; |
2915 | } |
2916 | |
2917 | for (i = 0; i < 512; i++) |
2918 | n += sprintf(buf: buf + n, fmt: "%02x" , ext_csd[i]); |
2919 | n += sprintf(buf: buf + n, fmt: "\n" ); |
2920 | |
2921 | if (n != EXT_CSD_STR_LEN) { |
2922 | err = -EINVAL; |
2923 | kfree(objp: ext_csd); |
2924 | goto out_free; |
2925 | } |
2926 | |
2927 | filp->private_data = buf; |
2928 | kfree(objp: ext_csd); |
2929 | return 0; |
2930 | |
2931 | out_free: |
2932 | kfree(objp: buf); |
2933 | return err; |
2934 | } |
2935 | |
2936 | static ssize_t mmc_ext_csd_read(struct file *filp, char __user *ubuf, |
2937 | size_t cnt, loff_t *ppos) |
2938 | { |
2939 | char *buf = filp->private_data; |
2940 | |
2941 | return simple_read_from_buffer(to: ubuf, count: cnt, ppos, |
2942 | from: buf, EXT_CSD_STR_LEN); |
2943 | } |
2944 | |
2945 | static int mmc_ext_csd_release(struct inode *inode, struct file *file) |
2946 | { |
2947 | kfree(objp: file->private_data); |
2948 | return 0; |
2949 | } |
2950 | |
2951 | static const struct file_operations mmc_dbg_ext_csd_fops = { |
2952 | .open = mmc_ext_csd_open, |
2953 | .read = mmc_ext_csd_read, |
2954 | .release = mmc_ext_csd_release, |
2955 | .llseek = default_llseek, |
2956 | }; |
2957 | |
2958 | static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
2959 | { |
2960 | struct dentry *root; |
2961 | |
2962 | if (!card->debugfs_root) |
2963 | return; |
2964 | |
2965 | root = card->debugfs_root; |
2966 | |
2967 | if (mmc_card_mmc(card) || mmc_card_sd(card)) { |
2968 | md->status_dentry = |
2969 | debugfs_create_file_unsafe(name: "status" , mode: 0400, parent: root, |
2970 | data: card, |
2971 | fops: &mmc_dbg_card_status_fops); |
2972 | } |
2973 | |
2974 | if (mmc_card_mmc(card)) { |
2975 | md->ext_csd_dentry = |
2976 | debugfs_create_file(name: "ext_csd" , S_IRUSR, parent: root, data: card, |
2977 | fops: &mmc_dbg_ext_csd_fops); |
2978 | } |
2979 | } |
2980 | |
2981 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
2982 | struct mmc_blk_data *md) |
2983 | { |
2984 | if (!card->debugfs_root) |
2985 | return; |
2986 | |
2987 | debugfs_remove(dentry: md->status_dentry); |
2988 | md->status_dentry = NULL; |
2989 | |
2990 | debugfs_remove(dentry: md->ext_csd_dentry); |
2991 | md->ext_csd_dentry = NULL; |
2992 | } |
2993 | |
2994 | #else |
2995 | |
2996 | static void mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) |
2997 | { |
2998 | } |
2999 | |
3000 | static void mmc_blk_remove_debugfs(struct mmc_card *card, |
3001 | struct mmc_blk_data *md) |
3002 | { |
3003 | } |
3004 | |
3005 | #endif /* CONFIG_DEBUG_FS */ |
3006 | |
3007 | static int mmc_blk_probe(struct mmc_card *card) |
3008 | { |
3009 | struct mmc_blk_data *md; |
3010 | int ret = 0; |
3011 | |
3012 | /* |
3013 | * Check that the card supports the command class(es) we need. |
3014 | */ |
3015 | if (!(card->csd.cmdclass & CCC_BLOCK_READ)) |
3016 | return -ENODEV; |
3017 | |
3018 | mmc_fixup_device(card, table: mmc_blk_fixups); |
3019 | |
3020 | card->complete_wq = alloc_workqueue(fmt: "mmc_complete" , |
3021 | flags: WQ_MEM_RECLAIM | WQ_HIGHPRI, max_active: 0); |
3022 | if (!card->complete_wq) { |
3023 | pr_err("Failed to create mmc completion workqueue" ); |
3024 | return -ENOMEM; |
3025 | } |
3026 | |
3027 | md = mmc_blk_alloc(card); |
3028 | if (IS_ERR(ptr: md)) { |
3029 | ret = PTR_ERR(ptr: md); |
3030 | goto out_free; |
3031 | } |
3032 | |
3033 | ret = mmc_blk_alloc_parts(card, md); |
3034 | if (ret) |
3035 | goto out; |
3036 | |
3037 | /* Add two debugfs entries */ |
3038 | mmc_blk_add_debugfs(card, md); |
3039 | |
3040 | pm_runtime_set_autosuspend_delay(dev: &card->dev, delay: 3000); |
3041 | pm_runtime_use_autosuspend(dev: &card->dev); |
3042 | |
3043 | /* |
3044 | * Don't enable runtime PM for SD-combo cards here. Leave that |
3045 | * decision to be taken during the SDIO init sequence instead. |
3046 | */ |
3047 | if (!mmc_card_sd_combo(card)) { |
3048 | pm_runtime_set_active(dev: &card->dev); |
3049 | pm_runtime_enable(dev: &card->dev); |
3050 | } |
3051 | |
3052 | return 0; |
3053 | |
3054 | out: |
3055 | mmc_blk_remove_parts(card, md); |
3056 | mmc_blk_remove_req(md); |
3057 | out_free: |
3058 | destroy_workqueue(wq: card->complete_wq); |
3059 | return ret; |
3060 | } |
3061 | |
3062 | static void mmc_blk_remove(struct mmc_card *card) |
3063 | { |
3064 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
3065 | |
3066 | mmc_blk_remove_debugfs(card, md); |
3067 | mmc_blk_remove_parts(card, md); |
3068 | pm_runtime_get_sync(dev: &card->dev); |
3069 | if (md->part_curr != md->part_type) { |
3070 | mmc_claim_host(host: card->host); |
3071 | mmc_blk_part_switch(card, part_type: md->part_type); |
3072 | mmc_release_host(host: card->host); |
3073 | } |
3074 | if (!mmc_card_sd_combo(card)) |
3075 | pm_runtime_disable(dev: &card->dev); |
3076 | pm_runtime_put_noidle(dev: &card->dev); |
3077 | mmc_blk_remove_req(md); |
3078 | destroy_workqueue(wq: card->complete_wq); |
3079 | } |
3080 | |
3081 | static int _mmc_blk_suspend(struct mmc_card *card) |
3082 | { |
3083 | struct mmc_blk_data *part_md; |
3084 | struct mmc_blk_data *md = dev_get_drvdata(dev: &card->dev); |
3085 | |
3086 | if (md) { |
3087 | mmc_queue_suspend(&md->queue); |
3088 | list_for_each_entry(part_md, &md->part, part) { |
3089 | mmc_queue_suspend(&part_md->queue); |
3090 | } |
3091 | } |
3092 | return 0; |
3093 | } |
3094 | |
3095 | static void mmc_blk_shutdown(struct mmc_card *card) |
3096 | { |
3097 | _mmc_blk_suspend(card); |
3098 | } |
3099 | |
3100 | #ifdef CONFIG_PM_SLEEP |
3101 | static int mmc_blk_suspend(struct device *dev) |
3102 | { |
3103 | struct mmc_card *card = mmc_dev_to_card(dev); |
3104 | |
3105 | return _mmc_blk_suspend(card); |
3106 | } |
3107 | |
3108 | static int mmc_blk_resume(struct device *dev) |
3109 | { |
3110 | struct mmc_blk_data *part_md; |
3111 | struct mmc_blk_data *md = dev_get_drvdata(dev); |
3112 | |
3113 | if (md) { |
3114 | /* |
3115 | * Resume involves the card going into idle state, |
3116 | * so current partition is always the main one. |
3117 | */ |
3118 | md->part_curr = md->part_type; |
3119 | mmc_queue_resume(&md->queue); |
3120 | list_for_each_entry(part_md, &md->part, part) { |
3121 | mmc_queue_resume(&part_md->queue); |
3122 | } |
3123 | } |
3124 | return 0; |
3125 | } |
3126 | #endif |
3127 | |
3128 | static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); |
3129 | |
3130 | static struct mmc_driver mmc_driver = { |
3131 | .drv = { |
3132 | .name = "mmcblk" , |
3133 | .pm = &mmc_blk_pm_ops, |
3134 | }, |
3135 | .probe = mmc_blk_probe, |
3136 | .remove = mmc_blk_remove, |
3137 | .shutdown = mmc_blk_shutdown, |
3138 | }; |
3139 | |
3140 | static int __init mmc_blk_init(void) |
3141 | { |
3142 | int res; |
3143 | |
3144 | res = bus_register(bus: &mmc_rpmb_bus_type); |
3145 | if (res < 0) { |
3146 | pr_err("mmcblk: could not register RPMB bus type\n" ); |
3147 | return res; |
3148 | } |
3149 | res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb" ); |
3150 | if (res < 0) { |
3151 | pr_err("mmcblk: failed to allocate rpmb chrdev region\n" ); |
3152 | goto out_bus_unreg; |
3153 | } |
3154 | |
3155 | if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) |
3156 | pr_info("mmcblk: using %d minors per device\n" , perdev_minors); |
3157 | |
3158 | max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); |
3159 | |
3160 | res = register_blkdev(MMC_BLOCK_MAJOR, "mmc" ); |
3161 | if (res) |
3162 | goto out_chrdev_unreg; |
3163 | |
3164 | res = mmc_register_driver(drv: &mmc_driver); |
3165 | if (res) |
3166 | goto out_blkdev_unreg; |
3167 | |
3168 | return 0; |
3169 | |
3170 | out_blkdev_unreg: |
3171 | unregister_blkdev(MMC_BLOCK_MAJOR, name: "mmc" ); |
3172 | out_chrdev_unreg: |
3173 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); |
3174 | out_bus_unreg: |
3175 | bus_unregister(bus: &mmc_rpmb_bus_type); |
3176 | return res; |
3177 | } |
3178 | |
3179 | static void __exit mmc_blk_exit(void) |
3180 | { |
3181 | mmc_unregister_driver(drv: &mmc_driver); |
3182 | unregister_blkdev(MMC_BLOCK_MAJOR, name: "mmc" ); |
3183 | unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); |
3184 | bus_unregister(bus: &mmc_rpmb_bus_type); |
3185 | } |
3186 | |
3187 | module_init(mmc_blk_init); |
3188 | module_exit(mmc_blk_exit); |
3189 | |
3190 | MODULE_LICENSE("GPL" ); |
3191 | MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver" ); |
3192 | |