aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r--drivers/mmc/core/block.c92
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/card.h7
-rw-r--r--drivers/mmc/core/core.c20
-rw-r--r--drivers/mmc/core/host.c11
-rw-r--r--drivers/mmc/core/mmc.c6
-rw-r--r--drivers/mmc/core/queue.c9
-rw-r--r--drivers/mmc/core/queue.h1
-rw-r--r--drivers/mmc/core/quirks.h116
-rw-r--r--drivers/mmc/core/sd.c213
-rw-r--r--drivers/mmc/core/sd_ops.c134
-rw-r--r--drivers/mmc/core/sd_ops.h6
12 files changed, 454 insertions, 163 deletions
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index c0ffe0817fd4..eda8e848615a 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -180,6 +180,13 @@ static DEFINE_MUTEX(open_lock);
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+/*
+ * Allow quirks to be overridden for the current card
+ */
+static char *card_quirks;
+module_param(card_quirks, charp, 0644);
+MODULE_PARM_DESC(card_quirks, "Force the use of the indicated quirks (a bitfield)");
+
static inline int mmc_blk_part_switch(struct mmc_card *card,
unsigned int part_type);
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -889,7 +896,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
- ret = mmc_cmdq_disable(card);
+ if (mmc_card_sd(card))
+ ret = mmc_sd_cmdq_disable(card);
+ else
+ ret = mmc_cmdq_disable(card);
if (ret)
return ret;
}
@@ -908,8 +918,12 @@ static int mmc_blk_part_switch_post(struct mmc_card *card,
if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
- if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
- ret = mmc_cmdq_enable(card);
+ if (card->reenable_cmdq && !card->ext_csd.cmdq_en) {
+ if (mmc_card_sd(card))
+ ret = mmc_sd_cmdq_enable(card);
+ else
+ ret = mmc_cmdq_enable(card);
+ }
}
return ret;
@@ -1120,7 +1134,10 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
switch (mq_rq->drv_op) {
case MMC_DRV_OP_IOCTL:
if (card->ext_csd.cmdq_en) {
- ret = mmc_cmdq_disable(card);
+ if (mmc_card_sd(card))
+ ret = mmc_sd_cmdq_disable(card);
+ else
+ ret = mmc_cmdq_disable(card);
if (ret)
break;
}
@@ -1138,8 +1155,12 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
/* Always switch back to main area after RPMB access */
if (rpmb_ioctl)
mmc_blk_part_switch(card, 0);
- else if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
- mmc_cmdq_enable(card);
+ else if (card->reenable_cmdq && !card->ext_csd.cmdq_en) {
+ if (mmc_card_sd(card))
+ mmc_sd_cmdq_enable(card);
+ else
+ mmc_cmdq_enable(card);
+ }
break;
case MMC_DRV_OP_BOOT_WP:
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
@@ -1181,12 +1202,26 @@ static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
sector_t from;
int err = 0;
blk_status_t status = BLK_STS_OK;
+ bool restart_cmdq = false;
if (!mmc_card_can_erase(card)) {
status = BLK_STS_NOTSUPP;
goto fail;
}
+ /*
+ * Only Discard ops are supported with SD cards in CQ mode
+ * (SD Physical Spec v9.00 4.19.2)
+ */
+ if (mmc_card_sd(card) && card->ext_csd.cmdq_en && erase_arg != SD_DISCARD_ARG) {
+ restart_cmdq = true;
+ err = mmc_sd_cmdq_disable(card);
+ if (err) {
+ status = BLK_STS_IOERR;
+ goto fail;
+ }
+ }
+
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
@@ -1207,6 +1242,11 @@ static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
status = BLK_STS_IOERR;
else
mmc_blk_reset_success(md, type);
+
+ if (restart_cmdq)
+ err = mmc_sd_cmdq_enable(card);
+ if (err)
+ status = BLK_STS_IOERR;
fail:
blk_mq_end_request(req, status);
}
@@ -1529,6 +1569,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
struct request_queue *q = req->q;
struct mmc_host *host = mq->card->host;
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool write = req_op(req) == REQ_OP_WRITE;
unsigned long flags;
bool put_card;
int err;
@@ -1560,6 +1601,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
spin_lock_irqsave(&mq->lock, flags);
+ if (write)
+ mq->pending_writes--;
mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -1972,7 +2015,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
return;
}
- if (rq_data_dir(req) == READ && brq->data.blocks >
+ if (0 && rq_data_dir(req) == READ && brq->data.blocks >
queue_physical_block_size(mq->queue) >> 9) {
/* Read one (native) sector at a time */
mmc_blk_read_single(mq, req);
@@ -2080,6 +2123,8 @@ static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
unsigned int nr_bytes = mqrq->brq.data.bytes_xfered;
+ if (req_op(req) == REQ_OP_WRITE)
+ mq->pending_writes--;
if (nr_bytes) {
if (blk_update_request(req, BLK_STS_OK, nr_bytes))
blk_mq_requeue_request(req, true);
@@ -2174,13 +2219,17 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
mmc_blk_urgent_bkops(mq, mqrq);
}
-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
+static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type,
+ bool write)
{
unsigned long flags;
bool put_card;
spin_lock_irqsave(&mq->lock, flags);
+ if (write)
+ mq->pending_writes--;
+
mq->in_flight[issue_type] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
@@ -2195,6 +2244,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
bool can_sleep)
{
enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ bool write = req_op(req) == REQ_OP_WRITE;
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
struct mmc_host *host = mq->card->host;
@@ -2214,7 +2264,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
blk_mq_complete_request(req);
}
- mmc_blk_mq_dec_in_flight(mq, issue_type);
+ mmc_blk_mq_dec_in_flight(mq, issue_type, write);
}
void mmc_blk_mq_recovery(struct mmc_queue *mq)
@@ -3265,6 +3315,8 @@ static int mmc_blk_probe(struct mmc_card *card)
{
struct mmc_blk_data *md;
int ret = 0;
+ char quirk_str[24];
+ char cap_str[10];
/*
* Check that the card supports the command class(es) we need.
@@ -3272,7 +3324,16 @@ static int mmc_blk_probe(struct mmc_card *card)
if (!(card->csd.cmdclass & CCC_BLOCK_READ))
return -ENODEV;
- mmc_fixup_device(card, mmc_blk_fixups);
+ if (card_quirks) {
+ unsigned long quirks;
+ if (kstrtoul(card_quirks, 0, &quirks) == 0)
+ card->quirks = (unsigned int)quirks;
+ else
+ pr_err("mmc_block: Invalid card_quirks parameter '%s'\n",
+ card_quirks);
+ }
+ else
+ mmc_fixup_device(card, mmc_blk_fixups);
card->complete_wq = alloc_workqueue("mmc_complete",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
@@ -3287,6 +3348,17 @@ static int mmc_blk_probe(struct mmc_card *card)
goto out_free;
}
+ string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ if (card->quirks)
+ snprintf(quirk_str, sizeof(quirk_str),
+ " (quirks 0x%08x)", card->quirks);
+ else
+ quirk_str[0] = '\0';
+ pr_info("%s: %s %s %s%s%s\n",
+ md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
+ cap_str, md->read_only ? " (ro)" : "", quirk_str);
+
ret = mmc_blk_alloc_parts(card, md);
if (ret)
goto out;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index ec4f3462bf80..c7cadebe3ad0 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -267,6 +267,8 @@ static void mmc_release_card(struct device *dev)
sdio_free_common_cis(card);
+ kfree(card->ext_reg_buf);
+
kfree(card->info);
kfree(card);
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 1200951bab08..5c94d8eaf8e1 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -88,11 +88,13 @@ struct mmc_fixup {
#define CID_MANFID_GIGASTONE 0x12
#define CID_MANFID_MICRON 0x13
#define CID_MANFID_SAMSUNG 0x15
+#define CID_MANFID_SAMSUNG_SD 0x1b
#define CID_MANFID_APACER 0x27
#define CID_MANFID_SWISSBIT 0x5D
#define CID_MANFID_KINGSTON 0x70
#define CID_MANFID_HYNIX 0x90
#define CID_MANFID_KINGSTON_SD 0x9F
+#define CID_MANFID_LONGSYS_SD 0xAD
#define CID_MANFID_NUMONYX 0xFE
#define END_FIXUP { NULL }
@@ -300,6 +302,11 @@ static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c)
return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY;
}
+static inline int mmc_card_working_sd_cq(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_WORKING_SD_CQ;
+}
+
static inline int mmc_card_no_uhs_ddr50_tuning(const struct mmc_card *c)
{
return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 860378bea557..764611873cc6 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -463,6 +463,7 @@ int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq)
goto out_err;
trace_mmc_request_start(host, mrq);
+ led_trigger_event(host->led, LED_FULL);
return 0;
@@ -550,10 +551,18 @@ int mmc_cqe_recovery(struct mmc_host *host)
* Recovery is expected seldom, if at all, but it reduces performance,
* so make sure it is not completely silent.
*/
- pr_warn("%s: running CQE recovery\n", mmc_hostname(host));
+ pr_warn_ratelimited("%s: running CQE recovery\n", mmc_hostname(host));
host->cqe_ops->cqe_recovery_start(host);
+ err = mmc_detect_card_removed(host);
+ if (err) {
+ host->cqe_ops->cqe_recovery_finish(host);
+ host->cqe_ops->cqe_off(host);
+ mmc_retune_release(host);
+ return err;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_STOP_TRANSMISSION;
cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
@@ -563,7 +572,11 @@ int mmc_cqe_recovery(struct mmc_host *host)
mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ if (mmc_card_sd(host->card))
+ cmd.opcode = SD_CMDQ_TASK_MGMT;
+ else
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+
cmd.arg = 1; /* Discard entire queue */
cmd.flags = MMC_RSP_R1B_NO_CRC | MMC_CMD_AC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
@@ -1861,7 +1874,8 @@ EXPORT_SYMBOL(mmc_erase);
bool mmc_card_can_erase(struct mmc_card *card)
{
- return (card->csd.cmdclass & CCC_ERASE && card->erase_size);
+ return (card->csd.cmdclass & CCC_ERASE && card->erase_size) &&
+ !(card->quirks & MMC_QUIRK_ERASE_BROKEN);
}
EXPORT_SYMBOL(mmc_card_can_erase);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 88c95dbfd9cf..049ec4867268 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -271,7 +271,7 @@ EXPORT_SYMBOL(mmc_of_parse_clk_phase);
int mmc_of_parse(struct mmc_host *host)
{
struct device *dev = host->parent;
- u32 bus_width, drv_type, cd_debounce_delay_ms;
+ u32 bus_width, drv_type, cd_debounce_delay_ms, cq_allow;
int ret;
if (!dev || !dev_fwnode(dev))
@@ -408,6 +408,15 @@ int mmc_of_parse(struct mmc_host *host)
host->caps2 &= ~(MMC_CAP2_HS400_1_8V | MMC_CAP2_HS400_1_2V |
MMC_CAP2_HS400_ES);
+ cq_allow = 0;
+ /*
+ * Downstream property - if a u32 and 2 instead of a bool,
+ * trust most A2 SD cards claiming CQ support.
+ */
+ device_property_read_u32(dev, "supports-cqe", &cq_allow);
+ if (cq_allow == 2)
+ host->caps2 |= MMC_CAP2_SD_CQE_PERMISSIVE;
+
/* Must be after "non-removable" check */
if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) {
if (host->caps & MMC_CAP_NONREMOVABLE)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3e7d9437477c..afe172583cb0 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1668,6 +1668,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
card->ocr = ocr;
card->type = MMC_TYPE_MMC;
card->rca = 1;
+ card->max_posted_writes = 1;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
}
@@ -1926,13 +1927,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
host->cqe_enabled = true;
if (card->ext_csd.cmdq_en) {
- pr_info("%s: Command Queue Engine enabled\n",
- mmc_hostname(host));
+ pr_info("%s: Command Queue Engine enabled, %u tags\n",
+ mmc_hostname(host), card->ext_csd.cmdq_depth);
} else {
host->hsq_enabled = true;
pr_info("%s: Host Software Queue enabled\n",
mmc_hostname(host));
}
+ card->max_posted_writes = card->ext_csd.cmdq_depth;
}
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 284856c8f655..feda8ddbd8b6 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -266,6 +266,11 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
+ if (!host->hsq_enabled && host->cqe_enabled && req_op(req) == REQ_OP_WRITE &&
+ mq->pending_writes >= card->max_posted_writes) {
+ spin_unlock_irq(&mq->lock);
+ return BLK_STS_RESOURCE;
+ }
break;
default:
/*
@@ -282,6 +287,8 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
/* Parallel dispatch of requests is not supported at the moment */
mq->busy = true;
+ if (req_op(req) == REQ_OP_WRITE)
+ mq->pending_writes++;
mq->in_flight[issue_type] += 1;
get_card = (mmc_tot_in_flight(mq) == 1);
cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -321,6 +328,8 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
bool put_card = false;
spin_lock_irq(&mq->lock);
+ if (req_op(req) == REQ_OP_WRITE)
+ mq->pending_writes--;
mq->in_flight[issue_type] -= 1;
if (mmc_tot_in_flight(mq) == 0)
put_card = true;
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 1498840a4ea0..39180d97911b 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -79,6 +79,7 @@ struct mmc_queue {
struct request_queue *queue;
spinlock_t lock;
int in_flight[MMC_ISSUE_MAX];
+ int pending_writes;
unsigned int cqe_busy;
#define MMC_CQE_DCMD_BUSY BIT(0)
bool busy;
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index c417ed34c057..d975d62b3734 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -18,20 +18,33 @@
static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
/*
* Kingston Canvas Go! Plus microSD cards never finish SD cache flush.
- * This has so far only been observed on cards from 11/2019, while new
- * cards from 2023/05 do not exhibit this behavior.
+ * This has been observed on cards from 2019/11 and 2021/11, while new
+ * cards from 2023/05 and 2024/08 do not exhibit this behavior.
*/
- _FIXUP_EXT("SD64G", CID_MANFID_KINGSTON_SD, 0x5449, 2019, 11,
- 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
- MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_KINGSTON_SD, 0x5449, 2019,
+ CID_MONTH_ANY, 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_sd, MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_KINGSTON_SD, 0x5449, 2020,
+ CID_MONTH_ANY, 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_sd, MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_KINGSTON_SD, 0x5449, 2021,
+ CID_MONTH_ANY, 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_sd, MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_KINGSTON_SD, 0x5449, 2022,
+ CID_MONTH_ANY, 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_sd, MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
/*
* GIGASTONE Gaming Plus microSD cards manufactured on 02/2022 never
* clear Flush Cache bit and set Poweroff Notification Ready bit.
*/
- _FIXUP_EXT("ASTC", CID_MANFID_GIGASTONE, 0x3456, 2022, 2,
- 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
- MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
+ _FIXUP_EXT("ASTC", CID_MANFID_GIGASTONE, 0x3456, 2022, 2, 0, -1ull,
+ SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE |
+ MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
EXT_CSD_REV_ANY),
/*
@@ -40,8 +53,8 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
* only been observed on cards manufactured on 01/2019 that are using
* Bay Trail host controllers.
*/
- _FIXUP_EXT("0016G", CID_MANFID_SWISSBIT, 0x5342, 2019, 1,
- 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ _FIXUP_EXT("0016G", CID_MANFID_SWISSBIT, 0x5342, 2019, 1, 0, -1ull,
+ SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY),
/*
@@ -50,13 +63,39 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
MMC_QUIRK_BROKEN_SD_DISCARD),
+ /*
+ * Samsung Pro Plus/EVO Plus/Pro Ultimate SD cards (2023) claim to cache
+ * flush OK, but become unresponsive afterwards.
+ */
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_SAMSUNG_SD, 0x534d, 2023, CID_MONTH_ANY,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ /*
+ * Early Sandisk Extreme and Extreme Pro A2 cards never finish SD cache
+ * flush in CQ mode. Latest card date this was seen on is 10/2020.
+ */
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, 2019, CID_MONTH_ANY,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, 2020, CID_MONTH_ANY,
+ 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY),
+
+ /* SD A2 allow-list - only trust CQ on these cards */
+ /* Raspberry Pi A2 cards */
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_LONGSYS_SD, 0x4c53, CID_YEAR_ANY, CID_MONTH_ANY,
+ cid_rev(1, 0, 0, 0), -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
+ MMC_QUIRK_WORKING_SD_CQ, EXT_CSD_REV_ANY),
+
END_FIXUP
};
static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
-#define INAND_CMD38_ARG_EXT_CSD 113
-#define INAND_CMD38_ARG_ERASE 0x00
-#define INAND_CMD38_ARG_TRIM 0x01
+#define INAND_CMD38_ARG_EXT_CSD 113
+#define INAND_CMD38_ARG_ERASE 0x00
+#define INAND_CMD38_ARG_TRIM 0x01
#define INAND_CMD38_ARG_SECERASE 0x80
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
@@ -153,6 +192,29 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /*
+ * Some SD cards reports discard support while they don't
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
+ MMC_QUIRK_BROKEN_SD_DISCARD),
+
+ /*
+ * On some Kingston SD cards, multiple erases of less than 64
+ * sectors can cause corruption.
+ */
+ MMC_FIXUP("SD16G", 0x41, 0x3432, add_quirk, MMC_QUIRK_ERASE_BROKEN),
+ MMC_FIXUP("SD32G", 0x41, 0x3432, add_quirk, MMC_QUIRK_ERASE_BROKEN),
+ MMC_FIXUP("SD64G", 0x41, 0x3432, add_quirk, MMC_QUIRK_ERASE_BROKEN),
+
+ /*
+ * Larger Integral SD cards using rebranded Phison controllers trash
+ * nearby flash blocks after erases.
+ */
+ MMC_FIXUP("SD64G", 0x27, 0x5048, add_quirk, MMC_QUIRK_ERASE_BROKEN),
+ MMC_FIXUP("SD128", 0x27, 0x5048, add_quirk, MMC_QUIRK_ERASE_BROKEN),
+ MMC_FIXUP("SD256", 0x27, 0x5048, add_quirk, MMC_QUIRK_ERASE_BROKEN),
+ MMC_FIXUP("SD512", 0x27, 0x5048, add_quirk, MMC_QUIRK_ERASE_BROKEN),
+
END_FIXUP
};
@@ -161,19 +223,18 @@ static const struct mmc_fixup __maybe_unused mmc_ext_csd_fixups[] = {
* Certain Hynix eMMC 4.41 cards might get broken when HPI feature
* is used so disable the HPI feature for such buggy cards.
*/
- MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
- 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, 0x014a, add_quirk,
+ MMC_QUIRK_BROKEN_HPI, 5),
/*
* Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI
* feature is used so disable the HPI feature for such buggy cards.
*/
- MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX,
- 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
+ MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX, 0x014e,
+ add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
END_FIXUP
};
-
static const struct mmc_fixup __maybe_unused sdio_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
@@ -181,14 +242,14 @@ static const struct mmc_fixup __maybe_unused sdio_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
add_quirk, MMC_QUIRK_DISABLE_CD),
- SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
- add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, add_quirk,
+ MMC_QUIRK_NONSTD_FUNC_IF),
- SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
- add_quirk, MMC_QUIRK_DISABLE_CD),
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, add_quirk,
+ MMC_QUIRK_DISABLE_CD),
- SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200,
- add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512),
+ SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200, add_quirk,
+ MMC_QUIRK_BROKEN_BYTE_MODE_512),
SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
@@ -204,8 +265,8 @@ static const struct mmc_fixup __maybe_unused sdio_card_init_methods[] = {
SDIO_FIXUP_COMPATIBLE("silabs,wf200", add_quirk,
MMC_QUIRK_BROKEN_BYTE_MODE_512 |
- MMC_QUIRK_LENIENT_FN0 |
- MMC_QUIRK_BLKSZ_FOR_BYTE_MODE),
+ MMC_QUIRK_LENIENT_FN0 |
+ MMC_QUIRK_BLKSZ_FOR_BYTE_MODE),
END_FIXUP
};
@@ -235,8 +296,7 @@ static inline void mmc_fixup_device(struct mmc_card *card,
if (f->manfid != CID_MANFID_ANY &&
f->manfid != card->cid.manfid)
continue;
- if (f->oemid != CID_OEMID_ANY &&
- f->oemid != card->cid.oemid)
+ if (f->oemid != CID_OEMID_ANY && f->oemid != card->cid.oemid)
continue;
if (f->name != CID_NAME_ANY &&
strncmp(f->name, card->cid.prod_name,
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 67cd63004829..a6d43acd46ba 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -730,7 +730,8 @@ MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
MMC_DEV_ATTR(ocr, "0x%08x\n", card->ocr);
MMC_DEV_ATTR(rca, "0x%04x\n", card->rca);
-
+MMC_DEV_ATTR(ext_perf, "%02x\n", card->ext_perf.feature_support);
+MMC_DEV_ATTR(ext_power, "%02x\n", card->ext_power.feature_support);
static ssize_t mmc_dsr_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -792,6 +793,8 @@ static struct attribute *sd_std_attrs[] = {
&dev_attr_ocr.attr,
&dev_attr_rca.attr,
&dev_attr_dsr.attr,
+ &dev_attr_ext_perf.attr,
+ &dev_attr_ext_power.attr,
NULL,
};
@@ -1034,98 +1037,16 @@ static bool mmc_sd_card_using_v18(struct mmc_card *card)
(SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50);
}
-static int sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
- u8 reg_data)
-{
- struct mmc_host *host = card->host;
- struct mmc_request mrq = {};
- struct mmc_command cmd = {};
- struct mmc_data data = {};
- struct scatterlist sg;
- u8 *reg_buf;
-
- reg_buf = kzalloc(512, GFP_KERNEL);
- if (!reg_buf)
- return -ENOMEM;
-
- mrq.cmd = &cmd;
- mrq.data = &data;
-
- /*
- * Arguments of CMD49:
- * [31:31] MIO (0 = memory).
- * [30:27] FNO (function number).
- * [26:26] MW - mask write mode (0 = disable).
- * [25:18] page number.
- * [17:9] offset address.
- * [8:0] length (0 = 1 byte).
- */
- cmd.arg = fno << 27 | page << 18 | offset << 9;
-
- /* The first byte in the buffer is the data to be written. */
- reg_buf[0] = reg_data;
-
- data.flags = MMC_DATA_WRITE;
- data.blksz = 512;
- data.blocks = 1;
- data.sg = &sg;
- data.sg_len = 1;
- sg_init_one(&sg, reg_buf, 512);
-
- cmd.opcode = SD_WRITE_EXTR_SINGLE;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
-
- mmc_set_data_timeout(&data, card);
- mmc_wait_for_req(host, &mrq);
-
- kfree(reg_buf);
-
- /*
- * Note that, the SD card is allowed to signal busy on DAT0 up to 1s
- * after the CMD49. Although, let's leave this to be managed by the
- * caller.
- */
-
- if (cmd.error)
- return cmd.error;
- if (data.error)
- return data.error;
-
- return 0;
-}
-
-static int sd_read_ext_reg(struct mmc_card *card, u8 fno, u8 page,
- u16 offset, u16 len, u8 *reg_buf)
-{
- u32 cmd_args;
-
- /*
- * Command arguments of CMD48:
- * [31:31] MIO (0 = memory).
- * [30:27] FNO (function number).
- * [26:26] reserved (0).
- * [25:18] page number.
- * [17:9] offset address.
- * [8:0] length (0 = 1 byte, 1ff = 512 bytes).
- */
- cmd_args = fno << 27 | page << 18 | offset << 9 | (len -1);
-
- return mmc_send_adtc_data(card, card->host, SD_READ_EXTR_SINGLE,
- cmd_args, reg_buf, 512);
-}
-
static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
u16 offset)
{
int err;
u8 *reg_buf;
- reg_buf = kzalloc(512, GFP_KERNEL);
- if (!reg_buf)
- return -ENOMEM;
+ reg_buf = card->ext_reg_buf;
/* Read the extension register for power management function. */
- err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ err = mmc_sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
if (err) {
pr_warn("%s: error %d reading PM func of ext reg\n",
mmc_hostname(card->host), err);
@@ -1152,7 +1073,6 @@ static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page,
card->ext_power.offset = offset;
out:
- kfree(reg_buf);
return err;
}
@@ -1162,11 +1082,9 @@ static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
int err;
u8 *reg_buf;
- reg_buf = kzalloc(512, GFP_KERNEL);
- if (!reg_buf)
- return -ENOMEM;
+ reg_buf = card->ext_reg_buf;
- err = sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
+ err = mmc_sd_read_ext_reg(card, fno, page, offset, 512, reg_buf);
if (err) {
pr_warn("%s: error %d reading PERF func of ext reg\n",
mmc_hostname(card->host), err);
@@ -1192,16 +1110,34 @@ static int sd_parse_ext_reg_perf(struct mmc_card *card, u8 fno, u8 page,
if ((reg_buf[4] & BIT(0)) && !mmc_card_broken_sd_cache(card))
card->ext_perf.feature_support |= SD_EXT_PERF_CACHE;
- /* Command queue support indicated via queue depth bits (0 to 4). */
- if (reg_buf[6] & 0x1f)
+ /*
+ * Command queue support indicated via queue depth bits (0 to 4).
+ * Qualify this with the other mandatory required features.
+ */
+ if (reg_buf[6] & 0x1f && card->ext_power.feature_support & SD_EXT_POWER_OFF_NOTIFY &&
+ card->ext_perf.feature_support & SD_EXT_PERF_CACHE) {
card->ext_perf.feature_support |= SD_EXT_PERF_CMD_QUEUE;
+ card->ext_csd.cmdq_depth = reg_buf[6] & 0x1f;
+ card->ext_csd.cmdq_support = true;
+ pr_debug("%s: Command Queue supported depth %u\n",
+ mmc_hostname(card->host),
+ card->ext_csd.cmdq_depth);
+ /*
+ * If CQ is enabled, there is a contract between host and card such that
+ * VDD will be maintained and removed only if a power off notification
+ * is provided. An SD card in an accessible slot means surprise removal
+ * is a possibility. As a middle ground, keep the default maximum of 1
+ * posted write unless the card is "hardwired".
+ */
+ if (!mmc_card_is_removable(card->host))
+ card->max_posted_writes = card->ext_csd.cmdq_depth;
+ }
card->ext_perf.fno = fno;
card->ext_perf.page = page;
card->ext_perf.offset = offset;
out:
- kfree(reg_buf);
return err;
}
@@ -1256,7 +1192,7 @@ static int sd_parse_ext_reg(struct mmc_card *card, u8 *gen_info_buf,
return 0;
}
-static int sd_read_ext_regs(struct mmc_card *card)
+static int mmc_sd_read_ext_regs(struct mmc_card *card)
{
int err, i;
u8 num_ext, *gen_info_buf;
@@ -1268,15 +1204,21 @@ static int sd_read_ext_regs(struct mmc_card *card)
if (!(card->scr.cmds & SD_SCR_CMD48_SUPPORT))
return 0;
- gen_info_buf = kzalloc(512, GFP_KERNEL);
+ gen_info_buf = kzalloc(1024, GFP_KERNEL);
if (!gen_info_buf)
return -ENOMEM;
+ card->ext_reg_buf = kzalloc(512, GFP_KERNEL);
+ if (!card->ext_reg_buf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
/*
* Read 512 bytes of general info, which is found at function number 0,
* at page 0 and with no offset.
*/
- err = sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
+ err = mmc_sd_read_ext_reg(card, 0, 0, 0, 512, gen_info_buf);
if (err) {
pr_err("%s: error %d reading general info of SD ext reg\n",
mmc_hostname(card->host), err);
@@ -1293,14 +1235,23 @@ static int sd_read_ext_regs(struct mmc_card *card)
num_ext = gen_info_buf[4];
/*
- * We only support revision 0 and limit it to 512 bytes for simplicity.
+ * We only support revision 0 and up to the spec-defined maximum of 1K.
* No matter what, let's return zero to allow us to continue using the
* card, even if we can't support the features from the SD function
* extensions registers.
*/
- if (rev != 0 || len > 512) {
- pr_warn("%s: non-supported SD ext reg layout\n",
- mmc_hostname(card->host));
+ if (rev != 0 || len > 1024) {
+ pr_warn("%s: non-supported SD ext reg layout rev %u length %u\n",
+ mmc_hostname(card->host), rev, len);
+ goto out;
+ }
+
+ /* If the General Information block spills into the next page, read the rest */
+ if (len > 512)
+ err = mmc_sd_read_ext_reg(card, 0, 1, 0, 512, &gen_info_buf[512]);
+ if (err) {
+ pr_err("%s: error %d reading page 1 of general info of SD ext reg\n",
+ mmc_hostname(card->host), err);
goto out;
}
@@ -1338,9 +1289,7 @@ static int sd_flush_cache(struct mmc_host *host)
if (!sd_cache_enabled(host))
return 0;
- reg_buf = kzalloc(512, GFP_KERNEL);
- if (!reg_buf)
- return -ENOMEM;
+ reg_buf = card->ext_reg_buf;
/*
* Set Flush Cache at bit 0 in the performance enhancement register at
@@ -1350,7 +1299,7 @@ static int sd_flush_cache(struct mmc_host *host)
page = card->ext_perf.page;
offset = card->ext_perf.offset + 261;
- err = sd_write_ext_reg(card, fno, page, offset, BIT(0));
+ err = mmc_sd_write_ext_reg(card, fno, page, offset, BIT(0));
if (err) {
pr_warn("%s: error %d writing Cache Flush bit\n",
mmc_hostname(host), err);
@@ -1366,7 +1315,7 @@ static int sd_flush_cache(struct mmc_host *host)
* Read the Flush Cache bit. The card shall reset it, to confirm that
* it's has completed the flushing of the cache.
*/
- err = sd_read_ext_reg(card, fno, page, offset, 1, reg_buf);
+ err = mmc_sd_read_ext_reg(card, fno, page, offset, 1, reg_buf);
if (err) {
pr_warn("%s: error %d reading Cache Flush bit\n",
mmc_hostname(host), err);
@@ -1376,26 +1325,20 @@ static int sd_flush_cache(struct mmc_host *host)
if (reg_buf[0] & BIT(0))
err = -ETIMEDOUT;
out:
- kfree(reg_buf);
return err;
}
static int sd_enable_cache(struct mmc_card *card)
{
- u8 *reg_buf;
int err;
card->ext_perf.feature_enabled &= ~SD_EXT_PERF_CACHE;
- reg_buf = kzalloc(512, GFP_KERNEL);
- if (!reg_buf)
- return -ENOMEM;
-
/*
* Set Cache Enable at bit 0 in the performance enhancement register at
* 260 bytes offset.
*/
- err = sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
+ err = mmc_sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
card->ext_perf.offset + 260, BIT(0));
if (err) {
pr_warn("%s: error %d writing Cache Enable bit\n",
@@ -1409,7 +1352,6 @@ static int sd_enable_cache(struct mmc_card *card)
card->ext_perf.feature_enabled |= SD_EXT_PERF_CACHE;
out:
- kfree(reg_buf);
return err;
}
@@ -1452,6 +1394,7 @@ retry:
card->ocr = ocr;
card->type = MMC_TYPE_SD;
+ card->max_posted_writes = 1;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
}
@@ -1572,7 +1515,7 @@ retry:
cont:
if (!oldcard) {
/* Read/parse the extension registers. */
- err = sd_read_ext_regs(card);
+ err = mmc_sd_read_ext_regs(card);
if (err)
goto free_card;
}
@@ -1584,13 +1527,45 @@ cont:
goto free_card;
}
+ /* Disallow command queueing on unvetted cards unless overridden */
+ if (!(host->caps2 & MMC_CAP2_SD_CQE_PERMISSIVE) && !mmc_card_working_sd_cq(card))
+ card->ext_csd.cmdq_support = false;
+
+ /* Enable command queueing if supported */
+ if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) {
+ /*
+ * Right now the MMC block layer uses DCMDs to issue
+ * cache-flush commands specific to eMMC devices.
+ * Turning off DCMD support avoids generating Illegal Command
+ * errors on SD, and flushing is instead done synchronously
+ * by mmc_blk_issue_flush().
+ */
+ host->caps2 &= ~MMC_CAP2_CQE_DCMD;
+ err = mmc_sd_cmdq_enable(card);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warn("%s: Enabling CMDQ failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.cmdq_support = false;
+ card->ext_csd.cmdq_depth = 0;
+ }
+ }
+ card->reenable_cmdq = card->ext_csd.cmdq_en;
+
if (!mmc_card_ult_capacity(card) && host->cqe_ops && !host->cqe_enabled) {
err = host->cqe_ops->cqe_enable(host, card);
if (!err) {
host->cqe_enabled = true;
- host->hsq_enabled = true;
- pr_info("%s: Host Software Queue enabled\n",
- mmc_hostname(host));
+
+ if (card->ext_csd.cmdq_en) {
+ pr_info("%s: Command Queue Engine enabled, %u tags\n",
+ mmc_hostname(host), card->ext_csd.cmdq_depth);
+ } else {
+ host->hsq_enabled = true;
+ pr_info("%s: Host Software Queue enabled\n",
+ mmc_hostname(host));
+ }
}
}
@@ -1663,7 +1638,7 @@ static int sd_busy_poweroff_notify_cb(void *cb_data, bool *busy)
* one byte offset and is one byte long. The Power Off Notification
* Ready is bit 0.
*/
- err = sd_read_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ err = mmc_sd_read_ext_reg(card, card->ext_power.fno, card->ext_power.page,
card->ext_power.offset + 1, 1, data->reg_buf);
if (err) {
pr_warn("%s: error %d reading status reg of PM func\n",
@@ -1689,7 +1664,7 @@ static int sd_poweroff_notify(struct mmc_card *card)
* Set the Power Off Notification bit in the power management settings
* register at 2 bytes offset.
*/
- err = sd_write_ext_reg(card, card->ext_power.fno, card->ext_power.page,
+ err = mmc_sd_write_ext_reg(card, card->ext_power.fno, card->ext_power.page,
card->ext_power.offset + 2, BIT(0));
if (err) {
pr_warn("%s: error %d writing Power Off Notify bit\n",
diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c
index cd86463dd306..5d03b6b6e69b 100644
--- a/drivers/mmc/core/sd_ops.c
+++ b/drivers/mmc/core/sd_ops.c
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/ktime.h>
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
@@ -417,3 +418,136 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)
return 0;
}
+
+
+int mmc_sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
+ u8 reg_data)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_request mrq = {};
+ struct mmc_command cmd = {};
+ struct mmc_data data = {};
+ struct scatterlist sg;
+ u8 *reg_buf;
+
+ reg_buf = card->ext_reg_buf;
+ memset(reg_buf, 0, 512);
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ /*
+ * Arguments of CMD49:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] MW - mask write mode (0 = disable).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte).
+ */
+ cmd.arg = fno << 27 | page << 18 | offset << 9;
+
+ /* The first byte in the buffer is the data to be written. */
+ reg_buf[0] = reg_data;
+
+ data.flags = MMC_DATA_WRITE;
+ data.blksz = 512;
+ data.blocks = 1;
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, reg_buf, 512);
+
+ cmd.opcode = SD_WRITE_EXTR_SINGLE;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ mmc_set_data_timeout(&data, card);
+ mmc_wait_for_req(host, &mrq);
+
+ /*
+ * Note that, the SD card is allowed to signal busy on DAT0 up to 1s
+ * after the CMD49. Although, let's leave this to be managed by the
+ * caller.
+ */
+
+ if (cmd.error)
+ return cmd.error;
+ if (data.error)
+ return data.error;
+
+ return 0;
+}
+
+int mmc_sd_read_ext_reg(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset, u16 len, u8 *reg_buf)
+{
+ u32 cmd_args;
+
+ /*
+ * Command arguments of CMD48:
+ * [31:31] MIO (0 = memory).
+ * [30:27] FNO (function number).
+ * [26:26] reserved (0).
+ * [25:18] page number.
+ * [17:9] offset address.
+ * [8:0] length (0 = 1 byte, 1ff = 512 bytes).
+ */
+ cmd_args = fno << 27 | page << 18 | offset << 9 | (len - 1);
+
+ return mmc_send_adtc_data(card, card->host, SD_READ_EXTR_SINGLE,
+ cmd_args, reg_buf, 512);
+}
+
+static int mmc_sd_cmdq_switch(struct mmc_card *card, bool enable)
+{
+ int err;
+ u8 reg = 0;
+ u8 *reg_buf = card->ext_reg_buf;
+ ktime_t timeout;
+ /*
+ * SD offers two command queueing modes - sequential (in-order) and
+ * voluntary (out-of-order). Apps Class A2 performance is only
+ * guaranteed for voluntary CQ (bit 1 = 0), so use that in preference
+ * to sequential.
+ */
+ if (enable)
+ reg = BIT(0);
+
+ /* Performance enhancement register byte 262 controls command queueing */
+ err = mmc_sd_write_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
+ card->ext_perf.offset + 262, reg);
+ if (err)
+ goto out;
+
+ /* Poll the register - cards may have a lazy init/deinit sequence. */
+ timeout = ktime_add_ms(ktime_get(), 10);
+ while (1) {
+ err = mmc_sd_read_ext_reg(card, card->ext_perf.fno, card->ext_perf.page,
+ card->ext_perf.offset + 262, 1, reg_buf);
+ if (err)
+ break;
+ if ((reg_buf[0] & BIT(0)) == reg)
+ break;
+ if (ktime_after(ktime_get(), timeout)) {
+ err = -EBADMSG;
+ break;
+ }
+ usleep_range(100, 200);
+ }
+out:
+ if (!err)
+ card->ext_csd.cmdq_en = enable;
+
+ return err;
+}
+
+int mmc_sd_cmdq_enable(struct mmc_card *card)
+{
+ return mmc_sd_cmdq_switch(card, true);
+}
+EXPORT_SYMBOL_GPL(mmc_sd_cmdq_enable);
+
+int mmc_sd_cmdq_disable(struct mmc_card *card)
+{
+ return mmc_sd_cmdq_switch(card, false);
+}
+EXPORT_SYMBOL_GPL(mmc_sd_cmdq_disable);
diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h
index 8fffc1b29757..db04885ecb0f 100644
--- a/drivers/mmc/core/sd_ops.h
+++ b/drivers/mmc/core/sd_ops.h
@@ -24,6 +24,12 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr);
int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card);
int mmc_send_ext_addr(struct mmc_host *host, u32 addr);
void mmc_uhs2_prepare_cmd(struct mmc_host *host, struct mmc_request *mrq);
+int mmc_sd_cmdq_enable(struct mmc_card *card);
+int mmc_sd_cmdq_disable(struct mmc_card *card);
+int mmc_sd_write_ext_reg(struct mmc_card *card, u8 fno, u8 page, u16 offset,
+ u8 reg_data);
+int mmc_sd_read_ext_reg(struct mmc_card *card, u8 fno, u8 page,
+ u16 offset, u16 len, u8 *reg_buf);
#endif