[PATCH v6 7/7] ufs: core: Add error handling for MCQ mode

From: Bao D. Nguyen
Date: Tue May 23 2023 - 03:30:42 EST


Add support for error handling for MCQ mode.

Signed-off-by: Bao D. Nguyen <quic_nguyenb@xxxxxxxxxxx>
---
drivers/ufs/core/ufshcd.c | 106 ++++++++++++++++++++++++++++++++++++++++------
1 file changed, 94 insertions(+), 12 deletions(-)

diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 69562c4..90cf047 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -3148,6 +3148,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
+
+ /* MCQ mode */
+ if (is_mcq_enabled(hba)) {
+ err = ufshcd_clear_cmd(hba, lrbp->task_tag);
+ hba->dev_cmd.complete = NULL;
+ return err;
+ }
+
+ /* SDB mode */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
@@ -5572,6 +5581,31 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
}

/**
+ * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
+ * invoked from the error handler context or ufshcd_host_reset_and_restore()
+ * to complete the pending transfers and free the resources associated with
+ * the scsi command.
+ *
+ * @hba: per adapter instance
+ */
+static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba)
+{
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+ int tag;
+
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ cmd = lrbp->cmd;
+ if (ufshcd_cmd_inflight(lrbp->cmd)) {
+ set_host_byte(cmd, DID_ERROR);
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ scsi_done(cmd);
+ }
+ }
+}
+
+/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*
@@ -6137,7 +6171,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba)
{
- ufshcd_transfer_req_compl(hba);
+ if (is_mcq_enabled(hba))
+ ufshcd_mcq_compl_pending_transfer(hba);
+ else
+ ufshcd_transfer_req_compl(hba);
+
ufshcd_tmc_handler(hba);
}

@@ -6378,18 +6416,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba)
bool needs_reset = false;
int tag, ret;

- /* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- ret ? "failed" : "succeeded");
- if (ret) {
- needs_reset = true;
- goto out;
+ if (is_mcq_enabled(hba)) {
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for (tag = 0; tag < hba->nutrs; tag++) {
+ lrbp = &hba->lrb[tag];
+ if (!ufshcd_cmd_inflight(lrbp->cmd))
+ continue;
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
+ }
+ } else {
+ /* Clear pending transfer requests */
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ ret ? "failed" : "succeeded");
+ if (ret) {
+ needs_reset = true;
+ goto out;
+ }
}
}
-
/* Clear pending task management requests */
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
if (ufshcd_clear_tm_cmd(hba, tag)) {
@@ -7321,6 +7377,8 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
+ struct ufs_hw_queue *hwq;
+ struct ufshcd_lrb *lrbp;
u32 pos, not_cleared_mask = 0;
int err;
u8 resp = 0xF, lun;
@@ -7336,6 +7394,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
goto out;
}

+ if (is_mcq_enabled(hba)) {
+ for (pos = 0; pos < hba->nutrs; pos++) {
+ lrbp = &hba->lrb[pos];
+ if (ufshcd_cmd_inflight(lrbp->cmd) &&
+ lrbp->lun == lun) {
+ ufshcd_clear_cmd(hba, pos);
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
+ }
+ err = 0;
+ goto out;
+ }
+
/* clear the commands that were pending for corresponding LUN */
spin_lock_irqsave(&hba->outstanding_lock, flags);
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
@@ -7610,7 +7682,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
*/
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
- int err;
+ struct ufs_hw_queue *hwq;
+ int err, i;

/*
* Stop the host controller and complete the requests
@@ -7622,6 +7695,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
ufshcd_complete_requests(hba);
hba->silence_err_logs = false;

+ if (is_mcq_enabled(hba)) {
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->sq_tail_slot = 0;
+ hwq->cq_tail_slot = 0;
+ hwq->cq_head_slot = 0;
+ }
+ }
+
/* scale up clocks to max frequency before full reinitialization */
ufshcd_scale_clks(hba, true);

--
2.7.4