[PATCH v3 7/8] usb: dwc2: host: Add a delay before releasing periodic bandwidth

From: Douglas Anderson
Date: Mon Nov 16 2015 - 19:53:38 EST


We'd like to be able to use HCD_BH in order to speed up the dwc2 host
interrupt handler quite a bit. However, according to the kernel doc for
usb_submit_urb() (specifically the part about "Reserved Bandwidth
Transfers"), we need to keep a reservation active as long as a device
driver keeps submitting. That was easy to do when we gave back the URB
in the interrupt context: we just looked at when our queue was empty and
released the reserved bandwidth then. ...but now we need a little more
complexity.

We'll follow EHCI's lead in commit 9118f9eb4f1e ("USB: EHCI: improve
interrupt qh unlink") and add a 5ms delay. Since we don't have a whole
timer infrastructure in dwc2, we'll just add a timer per QH. The
overhead for this is very small.

Signed-off-by: Douglas Anderson <dianders@xxxxxxxxxxxx>
---
Changes in v3:
- Moved periodic bandwidth release delay patch later in the series.

Changes in v2:
- Periodic bandwidth release delay new for V2

drivers/usb/dwc2/hcd.h | 6 ++
drivers/usb/dwc2/hcd_queue.c | 252 ++++++++++++++++++++++++++++++++-----------
2 files changed, 193 insertions(+), 65 deletions(-)

diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index ff99cb44c89d..2d9bb45c1777 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -211,6 +211,7 @@ enum dwc2_transaction_type {
/**
* struct dwc2_qh - Software queue head structure
*
+ * @hsotg: The HCD state structure for the DWC OTG controller
* @ep_type: Endpoint type. One of the following values:
* - USB_ENDPOINT_XFER_CONTROL
* - USB_ENDPOINT_XFER_BULK
@@ -250,13 +251,16 @@ enum dwc2_transaction_type {
* @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
* descriptor and indicates original XferSize value for the
* descriptor
+ * @unreserve_timer: Timer for releasing periodic reservation.
* @tt_buffer_dirty True if clear_tt_buffer_complete is pending
+ * @unreserve_pending: True if we planned to unreserve but haven't yet.
*
* A Queue Head (QH) holds the static characteristics of an endpoint and
* maintains a list of transfers (QTDs) for that endpoint. A QH structure may
* be entered in either the non-periodic or periodic schedule.
*/
struct dwc2_qh {
+ struct dwc2_hsotg *hsotg;
u8 ep_type;
u8 ep_is_in;
u16 maxp;
@@ -279,7 +283,9 @@ struct dwc2_qh {
struct dwc2_hcd_dma_desc *desc_list;
dma_addr_t desc_list_dma;
u32 *n_bytes;
+ struct timer_list unreserve_timer;
unsigned tt_buffer_dirty:1;
+ unsigned unreserve_pending:1;
};

/**
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index c5a2edb04bec..a287e52f9a63 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -53,6 +53,101 @@
#include "core.h"
#include "hcd.h"

+/* Wait this long before releasing periodic reservation */
+#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
+
+/**
+ * dwc2_do_unreserve() - Actually release the periodic reservation
+ *
+ * This function actually releases the periodic bandwidth that was reserved
+ * by the given qh.
+ *
+ * @hsotg: The HCD state structure for the DWC OTG controller
+ * @qh: QH for the periodic transfer.
+ */
+static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+{
+ int start = qh->start_usecs;
+ int utime = qh->usecs;
+ int i;
+
+ assert_spin_locked(&hsotg->lock);
+
+ WARN_ON(!qh->unreserve_pending);
+
+ /* No more unreserve pending--we're doing it */
+ qh->unreserve_pending = false;
+
+ if (WARN_ON(!list_empty(&qh->qh_list_entry)))
+ list_del_init(&qh->qh_list_entry);
+
+ /* Update claimed usecs per (micro)frame */
+ hsotg->periodic_usecs -= qh->usecs;
+
+ /* Release periodic channel reservation */
+ if (hsotg->core_params->uframe_sched <= 0) {
+ hsotg->periodic_channels--;
+ return;
+ }
+
+ bitmap_clear(hsotg->periodic_bitmap, start, utime);
+
+ if (qh->do_split) {
+ for (i = start / EARLY_FRAME_USEC;
+ i < DIV_ROUND_UP(start + utime - 1, EARLY_FRAME_USEC);
+ i++)
+ hsotg->has_split[i] = false;
+ }
+}
+
+/**
+ * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation
+ *
+ * According to the kernel doc for usb_submit_urb() (specifically the part about
+ * "Reserved Bandwidth Transfers"), we need to keep a reservation active as
+ * long as a device driver keeps submitting. Since we're using HCD_BH to give
+ * back the URB we need to give the driver a little bit of time before we
+ * release the reservation. This worker is called after the appropriate
+ * delay.
+ *
+ * @work: Pointer to a qh unreserve_work.
+ */
+static void dwc2_unreserve_timer_fn(unsigned long data)
+{
+ struct dwc2_qh *qh = (struct dwc2_qh *)data;
+ struct dwc2_hsotg *hsotg = qh->hsotg;
+ unsigned long flags;
+
+ /*
+ * Wait for the lock, or for us to be scheduled again. We
+ * could be scheduled again if:
+ * - We started executing but didn't get the lock yet.
+ * - A new reservation came in, but cancel didn't take effect
+ * because we already started executing.
+ * - The timer has been kicked again.
+ * In that case cancel and wait for the next call.
+ */
+ while (!spin_trylock_irqsave(&hsotg->lock, flags)) {
+ if (timer_pending(&qh->unreserve_timer))
+ return;
+ }
+
+ /*
+ * Might be no more unreserve pending if:
+ * - We started executing but didn't get the lock yet.
+ * - A new reservation came in, but cancel didn't take effect
+ * because we already started executing.
+ *
+ * We can't put this in the loop above because unreserve_pending needs
+ * to be accessed under lock, so we can only check it once we got the
+ * lock.
+ */
+ if (qh->unreserve_pending)
+ dwc2_do_unreserve(hsotg, qh);
+
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+}
+
/**
* dwc2_qh_init() - Initializes a QH structure
*
@@ -71,6 +166,9 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
dev_vdbg(hsotg->dev, "%s()\n", __func__);

/* Initialize QH */
+ qh->hsotg = hsotg;
+ setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
+ (unsigned long)qh);
qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;

@@ -240,6 +338,15 @@ struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
*/
void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
+ /* Make sure any unreserve work is finished. */
+ if (del_timer_sync(&qh->unreserve_timer)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_do_unreserve(hsotg, qh);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ }
+
if (hsotg->core_params->dma_desc_enable > 0)
dwc2_hcd_qh_free_ddma(hsotg, qh);
kfree(qh);
@@ -483,55 +590,76 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;

- if (hsotg->core_params->uframe_sched > 0) {
- int frame = -1;
+ status = dwc2_check_max_xfer_size(hsotg, qh);
+ if (status) {
+ dev_dbg(hsotg->dev,
+ "%s: Channel max transfer size too small for periodic transfer\n",
+ __func__);
+ return status;
+ }

- status = dwc2_find_uframe(hsotg, qh);
- if (status == 0)
- frame = 7;
- else if (status > 0)
- frame = status - 1;
+ /* Cancel pending unreserve; if canceled OK, unreserve was pending */
+ if (del_timer(&qh->unreserve_timer))
+ WARN_ON(!qh->unreserve_pending);

- /* Set the new frame up */
- if (frame >= 0) {
- qh->sched_uframe = frame;
+ /*
+ * Only need to reserve if there's not an unreserve pending, since if an
+ * unreserve is pending then by definition our old reservation is still
+ * valid. Unreserve might still be pending even if we didn't cancel if
+ * dwc2_unreserve_timer_fn() already started. Code in the timer handles
+ * that case.
+ */
+ if (!qh->unreserve_pending) {
+ if (hsotg->core_params->uframe_sched > 0) {
+ int frame = -1;
+
+ status = dwc2_find_uframe(hsotg, qh);
+ if (status == 0)
+ frame = 7;
+ else if (status > 0)
+ frame = status - 1;
+
+ /* Set the new frame up */
+ if (frame >= 0) {
+ qh->sched_uframe = frame;
+ qh->sched_frame = ((qh->sched_frame + 7) & ~7) |
+ frame;
+ dwc2_sch_dbg(hsotg,
+ "sched_p %p sch=%04x, uf=%d, us=%d\n",
+ qh, qh->sched_frame, frame,
+ qh->start_usecs);
+ }

- /* The next frame that matches our scheduled uframe */
- qh->sched_frame = ((qh->sched_frame + 7) & ~7) | frame;
- dwc2_sch_dbg(hsotg,
- "sched_p %p sch=%04x, uf=%d, us=%d\n",
- qh, qh->sched_frame, frame,
- qh->start_usecs);
+ if (status > 0)
+ status = 0;
+ } else {
+ status = dwc2_periodic_channel_available(hsotg);
+ if (status) {
+ dev_info(hsotg->dev,
+ "%s: No host channel available for periodic transfer\n",
+ __func__);
+ return status;
+ }
+
+ status = dwc2_check_periodic_bandwidth(hsotg, qh);
}

- if (status > 0)
- status = 0;
- } else {
- status = dwc2_periodic_channel_available(hsotg);
if (status) {
- dev_info(hsotg->dev,
- "%s: No host channel available for periodic transfer\n",
- __func__);
+ dev_dbg(hsotg->dev,
+ "%s: Insufficient periodic bandwidth for periodic transfer\n",
+ __func__);
return status;
}

- status = dwc2_check_periodic_bandwidth(hsotg, qh);
- }
+ if (hsotg->core_params->uframe_sched <= 0)
+ /* Reserve periodic channel */
+ hsotg->periodic_channels++;

- if (status) {
- dev_dbg(hsotg->dev,
- "%s: Insufficient periodic bandwidth for periodic transfer\n",
- __func__);
- return status;
+ /* Update claimed usecs per (micro)frame */
+ hsotg->periodic_usecs += qh->usecs;
}

- status = dwc2_check_max_xfer_size(hsotg, qh);
- if (status) {
- dev_dbg(hsotg->dev,
- "%s: Channel max transfer size too small for periodic transfer\n",
- __func__);
- return status;
- }
+ qh->unreserve_pending = 0;

if (hsotg->core_params->dma_desc_enable > 0)
/* Don't rely on SOF and start in ready schedule */
@@ -541,13 +669,6 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
list_add_tail(&qh->qh_list_entry,
&hsotg->periodic_sched_inactive);

- if (hsotg->core_params->uframe_sched <= 0)
- /* Reserve periodic channel */
- hsotg->periodic_channels++;
-
- /* Update claimed usecs per (micro)frame */
- hsotg->periodic_usecs += qh->usecs;
-
return status;
}

@@ -561,29 +682,31 @@ static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
- int start = qh->start_usecs;
- int utime = qh->usecs;
- int i;
+ bool did_modify;

- list_del_init(&qh->qh_list_entry);
+ assert_spin_locked(&hsotg->lock);

- /* Update claimed usecs per (micro)frame */
- hsotg->periodic_usecs -= qh->usecs;
-
- if (hsotg->core_params->uframe_sched <= 0) {
- /* Release periodic channel reservation */
- hsotg->periodic_channels--;
- return;
- }
-
- bitmap_clear(hsotg->periodic_bitmap, start, utime);
+ /*
+ * Schedule the unreserve to happen in a little bit. Cases here:
+ * - Unreserve worker might be sitting there waiting to grab the lock.
+ * In this case it will notice it's been schedule again and will
+ * quit.
+ * - Unreserve worker might not be scheduled.
+ *
+ * We should never already be scheduled since dwc2_schedule_periodic()
+ * should have canceled the scheduled unreserve timer (hence the
+ * warning on did_modify).
+ *
+ * We add + 1 to the timer to guarantee that at least 1 jiffy has
+ * passed (otherwise if the jiffy counter might tick right after we
+ * read it and we'll get no delay).
+ */
+ did_modify = mod_timer(&qh->unreserve_timer,
+ jiffies + DWC2_UNRESERVE_DELAY + 1);
+ WARN_ON(did_modify);
+ qh->unreserve_pending = 1;

- if (qh->do_split) {
- for (i = start / EARLY_FRAME_USEC;
- i < DIV_ROUND_UP(start + utime - 1, EARLY_FRAME_USEC);
- i++)
- hsotg->has_split[i] = false;
- }
+ list_del_init(&qh->qh_list_entry);
}

/**
@@ -710,7 +833,6 @@ static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
qh->sched_frame = frame_number;

if (hsotg->core_params->uframe_sched > 0)
- /* The next frame that matches our scheduled uframe */
qh->sched_frame = ((qh->sched_frame + 7) & ~7) |
qh->sched_uframe;
else
--
2.6.0.rc2.230.g3dd15c0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/