[RFC PATCH] usb: gadget: Introduce Cadence USB2 UDC Driver

From: Neil Armstrong
Date: Fri Oct 16 2015 - 10:55:04 EST


Introduces UDC support for the Device-Mode only version of the
Cadence USB2 Controller IP Core.

Host mode and OTG mode are not implemented by lack of hardware.
Support for Isochronous endpoints is not implemented by lack of time.

Internal DMA is supported and can be activated by DT property.

Signed-off-by: Neil Armstrong <narmstrong@xxxxxxxxxxxx>
---
.../devicetree/bindings/usb/cadence-hsudc.txt | 55 +
drivers/usb/gadget/udc/Kconfig | 15 +
drivers/usb/gadget/udc/Makefile | 1 +
drivers/usb/gadget/udc/cadence_hsudc.c | 2136 ++++++++++++++++++++
drivers/usb/gadget/udc/cadence_hsudc_regs.h | 283 +++
5 files changed, 2490 insertions(+)
create mode 100644 Documentation/devicetree/bindings/usb/cadence-hsudc.txt
create mode 100644 drivers/usb/gadget/udc/cadence_hsudc.c
create mode 100644 drivers/usb/gadget/udc/cadence_hsudc_regs.h

diff --git a/Documentation/devicetree/bindings/usb/cadence-hsudc.txt b/Documentation/devicetree/bindings/usb/cadence-hsudc.txt
new file mode 100644
index 0000000..4a9fa7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/cadence-hsudc.txt
@@ -0,0 +1,55 @@
+Cadence USB2.0 Device Controller
+
+The Cadence USB2.0 Device Controller as verilog IP core library.
+
+Required properties:
+- compatible : Should be "cdns,usbhs-udc"
+- reg : Physical base address and size of the USB2
+ device registers map.
+- interrupts : Should contain single irq line of USB2 device
+ controller
+- cdns,ep-in : Should be an array of u32 containing 1 for each available
+ IN endpoint, endpoint 0 is mandatory,
+ 0 means not available, 16 entries max
+- cdns,ep-out : Should be an array of u32 containing 1 for each available
+ OUT endpoint, endpoint 0 is mandatory
+ 0 means not available, 16 entries max
+- cdns,ep-in-size : Should be an array of u32 containing the max buffer
+ size available for each IN endpoint,
+ i.e. 64 for EP0, 512 for BULK, 1024 for ISO
+- cdns,ep-in-buffers : Should be an array of u32 containing the buffer
+ count available for each IN endpoint,
+ i.e. 1 for EP0 or BULK, 2 to 4 for ISO
+- cdns,ep-in-buffstart : Should be an array of u32 containing the buffer
+ start offset for each IN endpoint
+- cdns,ep-out-size : Should be an array of u32 containing the max buffer
+ size available for each OUT endpoint,
+ i.e. 64 for EP0, 512 for BULK, 1024 for ISO
+- cdns,ep-out-buffers : Should be an array of u32 containing the buffer
+ count available for each OUT endpoint,
+ i.e. 1 for EP0 or BULK, 2 to 4 for ISO
+- cdns,ep-out-buffstart : Should be an array of u32 containing the buffer
+ start offset for each OUT endpoint
+
+Optional Properties :
+- cdns,dma-enable : Present of dma is present and must be enabled
+- cdns,dma-channels : If cdns,dma-enable is present, should be an u32
+ containing the count of DMA channels
+- clocks : Should contain a phandle to the bus clock to enable
+
+Example :
+ usbd@80402000 {
+ compatible = "cdns,usbhs-udc";
+ interrupts = <25>;
+ reg = <0x80402000 0x1000>;
+ cdns,ep-in = <1 1 1>;
+ cdns,ep-out = <1 1 1>;
+ cdns,ep-in-size = <64 512 512>;
+ cdns,ep-out-size = <64 512 512>;
+ cdns,ep-in-buffers = <1 1 1>;
+ cdns,ep-out-buffers = <1 1 1>;
+ cdns,ep-in-buffstart = <0 64 576>;
+ cdns,ep-out-buffstart = <0 64 576>;
+ cdns,dma-enable;
+ cdns,dma-channels = <4>;
+ };
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 9a3a6b0..2d24f85 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -373,6 +373,21 @@ config USB_GADGET_XILINX
dynamically linked module called "udc-xilinx" and force all
gadget drivers to also be dynamically linked.

+config USB_CADENCE_HSUDC
+ tristate "Cadence USB2.0 Device Controller Driver"
+ depends on OF
+ help
+ Cadence sells an USB2.0 IP with Device Only mode.
+ This is a variant of the OTG Core.
+ This driver implements the Device Only mode,
+ OTG nor Host mode are implemented in the driver.
+ The Cadence USB2.0 DMA support is implemented.
+ Support for Isochronous endpoints is not implemented.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "cusb2_udc" and force all
+ gadget drivers to also be dynamically linked.
+
#
# LAST -- dummy/emulated controller
#
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index fba2049..94cf53c 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -31,3 +31,4 @@ obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o
obj-$(CONFIG_USB_GR_UDC) += gr_udc.o
obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o
obj-$(CONFIG_USB_BDC_UDC) += bdc/
+obj-$(CONFIG_USB_CADENCE_HSUDC) += cadence_hsudc.o
diff --git a/drivers/usb/gadget/udc/cadence_hsudc.c b/drivers/usb/gadget/udc/cadence_hsudc.c
new file mode 100644
index 0000000..53f600b
--- /dev/null
+++ b/drivers/usb/gadget/udc/cadence_hsudc.c
@@ -0,0 +1,2136 @@
+/*
+ * linux/drivers/usb/gadget/udc/cadence_hsudc.c
+ * - Cadence USB2.0 Device Controller Driver
+ *
+ * Copyright (C) 2015 Neotion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/delay.h>
+#include <linux/semaphore.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "cadence_hsudc_regs.h"
+
+/* Driver Status
+ *
+ * Managed :
+ * - EP0 Endpoint Status, Clear/Set Feature and Gadget stack pass-through
+ * - EP IN/OUT 1 to 15 with hardware caps
+ * - EP Bulk and Interrupt tranfer
+ * - DMA in normal mode, with auto-arm
+ * - Endpoint Halting (from Gadget stack or EP0 Setup)
+ * - HW config via device tree
+ *
+ * TODOs :
+ * - LPM
+ * - USB Suspend/Wakeup
+ * - IP config like AHB master configuration
+ *
+ * Not (Never?) Supported :
+ * - Isochronous (No Hardware available)
+ * - OTG/OTG2 (No Hardware available)
+ * - Host Mode (No Hardware available)
+ * - Configuration FSM (No Hardware available)
+ *
+ */
+
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+struct cadence_hsudc_request {
+ struct usb_request req;
+ struct list_head queue;
+};
+
+struct cadence_hsudc;
+struct cadence_hsudc_ep;
+
+struct hsudc_dma_channel {
+ struct cadence_hsudc_ep *cur_ep;
+
+ int num;
+ int is_available;
+
+ int in_use;
+};
+
+struct cadence_hsudc_ep {
+ struct cadence_hsudc *hsudc_dev;
+ const struct usb_endpoint_descriptor *desc;
+ struct usb_ep ep;
+ int num;
+ int is_in;
+ int is_ep0;
+ int is_available;
+
+ struct list_head queue;
+ struct cadence_hsudc_request *cur;
+ spinlock_t s;
+
+ struct work_struct ws;
+ struct work_struct comp;
+
+ int maxpacket;
+
+ struct hsudc_dma_channel *dma_channel;
+ int use_dma;
+};
+
+struct hsudc_hw_config {
+ unsigned ep_in_exist[HSUDC_EP_COUNT];
+ unsigned ep_out_exist[HSUDC_EP_COUNT];
+ unsigned ep_in_size[HSUDC_EP_COUNT];
+ unsigned ep_out_size[HSUDC_EP_COUNT];
+ unsigned ep_in_buffering[HSUDC_EP_COUNT];
+ unsigned ep_out_buffering[HSUDC_EP_COUNT];
+ unsigned ep_in_startbuff[HSUDC_EP_COUNT];
+ unsigned ep_out_startbuff[HSUDC_EP_COUNT];
+ unsigned dma_enabled;
+ unsigned dma_channels;
+};
+
+struct cadence_hsudc {
+ struct platform_device *pdev;
+ void __iomem *io_base;
+ const struct hsudc_hw_config *hw_config;
+ int irq;
+
+ struct usb_gadget_driver *driver;
+ struct usb_gadget gadget;
+
+ struct cadence_hsudc_ep ep_in[HSUDC_EP_COUNT]; /* 0 is not available */
+ struct cadence_hsudc_ep ep_out[HSUDC_EP_COUNT]; /* 0 is not available */
+ struct cadence_hsudc_ep ep0;
+ struct work_struct ep0_setup;
+
+ struct workqueue_struct *wq_ep;
+
+ struct hsudc_dma_channel dma_channels[HSUDC_DMA_CHANNELS];
+ struct semaphore dma_sem;
+ spinlock_t dma_s;
+};
+
+/* Register Access */
+#define hsudc_write8(value, reg) \
+ writeb((value)&0xFF, hsudc_dev->io_base + (reg))
+#define hsudc_write16(value, reg) \
+ writew((value)&0xFFFF, hsudc_dev->io_base + (reg))
+#define hsudc_write32(value, reg) \
+ writel((value), hsudc_dev->io_base + (reg))
+
+#define hsudc_read8(reg) readb(hsudc_dev->io_base + (reg))
+#define hsudc_read16(reg) readw(hsudc_dev->io_base + (reg))
+#define hsudc_read32(reg) readl(hsudc_dev->io_base + (reg))
+
+static inline void cadence_hsudc_dma_irq(struct cadence_hsudc *hsudc_dev,
+ unsigned dma_channel,
+ unsigned dmairq, dmashortirq)
+{
+ struct hsudc_dma_channel *channel =
+ &hsudc_dev->dma_channels[dma_channel];
+
+ if ((dmairq & (1 << i))) {
+ /* Clear and disable DMAIRQ */
+ hsudc_write32(1 << i, HSUDC_DMA_IRQ_REG32);
+ hsudc_write32(hsudc_read32(HSUDC_DMA_IEN_REG32) &
+ ~(1 << dma_channel),
+ HSUDC_DMA_IEN_REG32);
+ }
+ if ((dmashortirq & (1 << dma_channel))) {
+ /* Clear and disable DMASHORTIRQ */
+ hsudc_write32(1 << dma_channel, HSUDC_DMA_SHORTIRQ_REG32);
+ hsudc_write32(hsudc_read32(HSUDC_DMA_SHORTIEN_REG32) &
+ ~(1 << dma_channel),
+ HSUDC_DMA_SHORTIEN_REG32);
+ }
+ if (channel->is_available &
+ channel->in_use &&
+ channel->cur_ep->cur) {
+ struct cadence_hsudc_request *hsudc_req =
+ channel->cur_ep->cur;
+ unsigned remain =
+ hsudc_read32(HSUDC_DMA_CNT_REG32(dma_channel));
+
+ hsudc_req->req.actual = hsudc_req->req.length - remain;
+
+ queue_work(hsudc_dev->wq_ep,
+ &channel->cur_ep->comp);
+
+ channel->cur_ep->dma_channel = NULL;
+ channel->cur_ep = NULL;
+ channel->in_use = 0;
+
+ /* Free DMA channel */
+ up(&hsudc_dev->dma_sem);
+ }
+}
+
+static irqreturn_t cadence_hsudc_irq(int irq, void *data)
+{
+ struct cadence_hsudc *hsudc_dev = data;
+
+ unsigned in_packet_irq;
+ unsigned out_packet_irq;
+ unsigned usbirq;
+ unsigned dmairq, dmashortirq;
+
+ (void)irq;
+
+ in_packet_irq = hsudc_read16(HSUDC_INIRQ_REG16) &
+ hsudc_read16(HSUDC_INIEN_REG16);
+ out_packet_irq = hsudc_read16(HSUDC_OUTIRQ_REG16) &
+ hsudc_read16(HSUDC_OUTIEN_REG16);
+ usbirq = hsudc_read8(HSUDC_USBIRQ_REG8) &
+ hsudc_read8(HSUDC_USBIEN_REG8);
+
+ dmairq = hsudc_read32(HSUDC_DMA_IRQ_REG32) &
+ hsudc_read32(HSUDC_DMA_IEN_REG32);
+ dmashortirq = hsudc_read32(HSUDC_DMA_SHORTIRQ_REG32) &
+ hsudc_read32(HSUDC_DMA_SHORTIEN_REG32);
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "irq: in %04X out %04X usb %04X dma %x/%x\n",
+ in_packet_irq, out_packet_irq,
+ usbirq, dmairq, dmashortirq);
+
+ if (dmairq || dmashortirq) {
+ unsigned i;
+
+ for (i = 0; i < hsudc_dev->hw_config->dma_channels; ++i)
+ if ((dmairq & (1 << i)) || (dmashortirq & (1 << i)))
+ cadence_hsudc_dma_irq(hsudc_dev, i,
+ dmairq, dmashortirq);
+ }
+
+ if (in_packet_irq || out_packet_irq) {
+ unsigned i;
+
+ /* Handle EP0 */
+ if ((out_packet_irq & 1) || (in_packet_irq & 1)) {
+ /* Clear IRQ */
+ if (out_packet_irq & 1)
+ hsudc_write16(1, HSUDC_OUTIRQ_REG16);
+ else
+ hsudc_write16(1, HSUDC_INIRQ_REG16);
+
+ queue_work(hsudc_dev->wq_ep, &hsudc_dev->ep0.comp);
+ }
+
+ for (i = 1; i < HSUDC_EP_COUNT; ++i) {
+ if (out_packet_irq & (1 << i)) {
+ /* Clear IRQ */
+ hsudc_write16(1 << i, HSUDC_OUTIRQ_REG16);
+
+ if (hsudc_dev->ep_out[i].cur)
+ queue_work(hsudc_dev->wq_ep,
+ &hsudc_dev->ep_out[i].comp);
+ }
+
+ if (in_packet_irq & (1 << i)) {
+ /* Clear IRQ */
+ hsudc_write16(1 << i, HSUDC_INIRQ_REG16);
+
+ if (hsudc_dev->ep_in[i].cur)
+ queue_work(hsudc_dev->wq_ep,
+ &hsudc_dev->ep_in[i].comp);
+ }
+ }
+ }
+
+ /* Clear All USB IRQs */
+ hsudc_write8(usbirq, HSUDC_USBIRQ_REG8);
+
+ if (usbirq & HSUDC_USBIRQ_URES_MSK) {
+ dev_dbg(&hsudc_dev->pdev->dev, "irq: RESET\n");
+ hsudc_dev->gadget.speed = USB_SPEED_FULL;
+ }
+
+ if (usbirq & HSUDC_USBIRQ_HSPPED_MSK) {
+ /* High Speed indicator */
+ dev_dbg(&hsudc_dev->pdev->dev, "irq: HSPPED\n");
+ hsudc_dev->gadget.speed = USB_SPEED_HIGH;
+ }
+
+ if (usbirq & HSUDC_USBIRQ_SUDAV_MSK)
+ /* Queue SETUP work */
+ dev_vdbg(&hsudc_dev->pdev->dev, "irq: SUDAV\n");
+
+ if (usbirq & HSUDC_USBIRQ_SUTOK_MSK) {
+ dev_vdbg(&hsudc_dev->pdev->dev, "irq: SUTOK\n");
+ queue_work(hsudc_dev->wq_ep, &hsudc_dev->ep0_setup);
+ }
+
+ if (usbirq & HSUDC_USBIRQ_SOF_MSK)
+ dev_vdbg(&hsudc_dev->pdev->dev, "irq: SOF\n");
+
+ if (usbirq & HSUDC_USBIRQ_SUSP_MSK)
+ /* TODO handle suspended */
+ dev_vdbg(&hsudc_dev->pdev->dev, "irq: SUSP\n");
+
+ return IRQ_HANDLED;
+}
+
+static int hsudc_dma_get_channel(struct cadence_hsudc *hsudc_dev,
+ struct cadence_hsudc_ep *hsudc_ep)
+{
+ unsigned i;
+
+ spin_lock(&hsudc_dev->dma_s);
+
+ /* Get DMA */
+ down(&hsudc_dev->dma_sem);
+
+ for (i = 0; i < hsudc_dev->hw_config->dma_channels; ++i) {
+ if (hsudc_dev->dma_channels[i].is_available
+ && !hsudc_dev->dma_channels[i].in_use) {
+ hsudc_dev->dma_channels[i].in_use = 1;
+ hsudc_dev->dma_channels[i].cur_ep = hsudc_ep;
+ hsudc_ep->dma_channel = &hsudc_dev->dma_channels[i];
+ hsudc_ep->use_dma = 1;
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s got dma channel %d for req %p\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"),
+ i, hsudc_ep->cur);
+
+ spin_unlock(&hsudc_dev->dma_s);
+
+ return 0;
+ }
+ }
+
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error failed to get dma channel\n",
+ __func__);
+
+ up(&hsudc_dev->dma_sem);
+
+ spin_unlock(&hsudc_dev->dma_s);
+
+ return -1;
+}
+
+static int hsudc_dma_init(struct cadence_hsudc *hsudc_dev,
+ struct cadence_hsudc_ep *hsudc_ep,
+ struct cadence_hsudc_request *hsudc_req)
+{
+ int ret;
+
+ /* Map buffer as DMA address */
+ hsudc_req->req.dma = dma_map_single(hsudc_dev->gadget.dev.parent,
+ hsudc_req->req.buf,
+ hsudc_req->req.length,
+ hsudc_ep->is_in ?
+ DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+
+ ret = dma_mapping_error(hsudc_dev->gadget.dev.parent,
+ hsudc_req->req.dma);
+ if (ret) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): dma mapping error %d\n",
+ __func__, ret);
+
+ hsudc_ep->use_dma = 0;
+ return ret;
+ }
+
+ /* Configure DMA direction, EP, address and mode */
+ hsudc_write32(hsudc_req->req.dma,
+ HSUDC_DMA_ADDR_REG32(hsudc_ep->dma_channel->num));
+ hsudc_write32(hsudc_req->req.length,
+ HSUDC_DMA_CNT_REG32(hsudc_ep->dma_channel->num));
+
+ /* Mode normal, incremental address */
+ if (hsudc_ep->is_in)
+ hsudc_write8(HSUDC_DMA_MODE_DIRECTION_IN |
+ HSUDC_DMA_MODE_ADDRESS_INC,
+ HSUDC_DMA_MODE_REG8(hsudc_ep->dma_channel->num));
+ else
+ hsudc_write8(HSUDC_DMA_MODE_ADDRESS_INC,
+ HSUDC_DMA_MODE_REG8(hsudc_ep->dma_channel->num));
+
+ hsudc_write8(hsudc_ep->num << HSUDC_DMA_ENDP_SHIFT,
+ HSUDC_DMA_ENDP_REG8(hsudc_ep->dma_channel->num));
+
+ /* TODO HSUDC_DMA_BUSCTRL_REG8 */
+
+ /* Enable DMAIRQ, DMASHORTIRQ */
+ hsudc_write32(hsudc_read32(HSUDC_DMA_IEN_REG32) |
+ (1 << hsudc_ep->dma_channel->num), HSUDC_DMA_IEN_REG32);
+ hsudc_write32(hsudc_read32(HSUDC_DMA_SHORTIEN_REG32) |
+ (1 << hsudc_ep->dma_channel->num),
+ HSUDC_DMA_SHORTIEN_REG32);
+
+ return 0;
+}
+
+/*
+ * Enable, Configure and Reset endpoint
+ */
+static int cadence_hsudc_ep_enable(struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ uint16_t maxpacket;
+ uint32_t tmp;
+
+ hsudc_ep = container_of(ep, struct cadence_hsudc_ep, ep);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ maxpacket = le16_to_cpu(desc->wMaxPacketSize);
+
+ if (!ep || !hsudc_ep) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error bad ep\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!desc || hsudc_ep->desc) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error bad descriptor\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!hsudc_ep->num) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error ep[0]\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (((desc->bEndpointAddress & USB_DIR_IN) == USB_DIR_IN &&
+ hsudc_ep->is_in == 0) ||
+ ((desc->bEndpointAddress & USB_DIR_IN) == 0 &&
+ hsudc_ep->is_in == 1)) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid direction\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (desc->bDescriptorType != USB_DT_ENDPOINT) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error not USB_DT_ENDPOINT\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!maxpacket || maxpacket > hsudc_ep->maxpacket) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error maxpacket %d\n",
+ __func__,
+ maxpacket);
+ return -EINVAL;
+ }
+
+ if (!hsudc_dev->driver ||
+ hsudc_dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error bogus device state\n",
+ __func__);
+ return -ESHUTDOWN;
+ }
+
+ tmp = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ switch (tmp) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ dev_err(&hsudc_dev->pdev->dev,
+ "%s(): error only one control endpoint\n", __func__);
+ return -EINVAL;
+ case USB_ENDPOINT_XFER_INT:
+ if (maxpacket > hsudc_ep->maxpacket) {
+ dev_err(&hsudc_dev->pdev->dev,
+ "%s(): error '%s', bogus maxpacket %d for XFER_INT\n",
+ __func__, hsudc_ep->ep.name, maxpacket);
+ return -EINVAL;
+ }
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ if (maxpacket > hsudc_ep->maxpacket) {
+ dev_err(&hsudc_dev->pdev->dev,
+ "%s(): error '%s', bogus maxpacket %d for XFER_BULK\n",
+ __func__, hsudc_ep->ep.name, maxpacket);
+ return -EINVAL;
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ dev_err(&hsudc_dev->pdev->dev,
+ "%s(): error USB_ENDPOINT_XFER_ISOC not supported yet.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* initialize endpoint to match this descriptor */
+ hsudc_ep->desc = desc;
+ hsudc_ep->ep.maxpacket = maxpacket;
+ spin_lock_init(&hsudc_ep->s);
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): '%s', is_in %d, maxpacket %d\n",
+ __func__, hsudc_ep->ep.name, hsudc_ep->is_in, maxpacket);
+
+ if (hsudc_ep->is_in) {
+ unsigned val =
+ hsudc_dev->hw_config->ep_in_buffering[hsudc_ep->num] &
+ HSUDC_EP_CON_BUF_MSK;
+
+ /* Set EP type */
+ if ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT)
+ val |= HSUDC_EP_CON_TYPE_INTERRUPT;
+ else
+ val |= HSUDC_EP_CON_TYPE_BULK;
+
+ /* Enable EP */
+ val |= HSUDC_EP_CON_VAL_MSK;
+
+ hsudc_write8(val, HSUDC_EP_INCON_REG8(hsudc_ep->num));
+
+ hsudc_write16(hsudc_ep->ep.maxpacket,
+ HSUDC_EP_IN_MAXPCK_REG16(hsudc_ep->num));
+
+ /* Select endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_IO_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(hsudc_ep->num |
+ HSUDC_ENDPRST_IO_MSK |
+ HSUDC_ENDPRST_TOGRST_MSK |
+ HSUDC_ENDPRST_FIFORST_MSK, HSUDC_ENDPRST_REG8);
+ } else {
+ unsigned val =
+ hsudc_dev->hw_config->ep_out_buffering[hsudc_ep->
+ num] &
+ HSUDC_EP_CON_BUF_MSK;
+
+ /* Set EP type */
+ if ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT)
+ val |= HSUDC_EP_CON_TYPE_INTERRUPT;
+ else
+ val |= HSUDC_EP_CON_TYPE_BULK;
+
+ /* Enable EP */
+ val |= HSUDC_EP_CON_VAL_MSK;
+
+ hsudc_write8(val, HSUDC_EP_OUTCON_REG8(hsudc_ep->num));
+
+ hsudc_write16(hsudc_ep->ep.maxpacket,
+ HSUDC_EP_OUT_MAXPCK_REG16(hsudc_ep->num));
+
+ /* Select endpoint */
+ hsudc_write8(hsudc_ep->num, HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_TOGRST_MSK |
+ HSUDC_ENDPRST_FIFORST_MSK, HSUDC_ENDPRST_REG8);
+ }
+
+ return 0;
+}
+
+/*
+ * Disable and Reset endpoint
+ */
+static int cadence_hsudc_ep_disable(struct usb_ep *ep)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *req;
+
+ hsudc_ep = container_of(ep, struct cadence_hsudc_ep, ep);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ spin_lock(&hsudc_ep->s);
+
+ hsudc_ep->desc = NULL;
+ if (hsudc_ep->cur) {
+ hsudc_ep->cur->req.status = -ESHUTDOWN;
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): nuked cur %p\n",
+ __func__,
+ hsudc_ep->cur);
+ queue_work(hsudc_dev->wq_ep, &hsudc_ep->comp);
+ }
+
+ while (!list_empty(&hsudc_ep->queue)) {
+ req = list_entry(hsudc_ep->queue.next,
+ struct cadence_hsudc_request,
+ queue);
+ list_del_init(&req->queue);
+
+ if (req == hsudc_ep->cur)
+ continue;
+
+ req->req.status = -ESHUTDOWN;
+ req->req.complete(&hsudc_ep->ep, &req->req);
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): nuked %p\n",
+ __func__,
+ req);
+ }
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): '%s'\n", __func__,
+ hsudc_ep->ep.name);
+ hsudc_ep->ep.maxpacket = hsudc_ep->maxpacket;
+ INIT_LIST_HEAD(&hsudc_ep->queue);
+
+ if (hsudc_ep->is_in) {
+ hsudc_write8(0, HSUDC_EP_INCON_REG8(hsudc_ep->num));
+
+ /* Select endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_IO_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_IO_MSK |
+ HSUDC_ENDPRST_TOGRST_MSK |
+ HSUDC_ENDPRST_FIFORST_MSK, HSUDC_ENDPRST_REG8);
+ } else {
+ hsudc_write8(0, HSUDC_EP_OUTCON_REG8(hsudc_ep->num));
+
+ /* Select endpoint */
+ hsudc_write8(hsudc_ep->num, HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_TOGRST_MSK |
+ HSUDC_ENDPRST_FIFORST_MSK, HSUDC_ENDPRST_REG8);
+ }
+
+ spin_unlock(&hsudc_ep->s);
+ return 0;
+
+}
+
+/*
+ * Allocate request internal structure
+ */
+static struct usb_request *cadence_hsudc_ep_alloc_request(struct usb_ep *ep,
+ unsigned int gfp_flags)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *req;
+
+ hsudc_ep = container_of(ep, struct cadence_hsudc_ep, ep);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ req = kzalloc(sizeof(struct cadence_hsudc_request), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+ req->req.dma = 0xFFFFFFFF;
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): %p @ '%s'\n", __func__,
+ &req->req,
+ hsudc_ep->ep.name);
+
+ return &req->req;
+}
+
+/*
+ * Free request internal structure
+ */
+static void cadence_hsudc_ep_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *hsudc_req;
+
+ hsudc_ep = container_of(ep, struct cadence_hsudc_ep, ep);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ hsudc_req = container_of(req, struct cadence_hsudc_request, req);
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): %p @ '%s'\n", __func__,
+ &hsudc_req->req,
+ hsudc_ep->ep.name);
+
+ kfree(hsudc_req);
+}
+
+/*
+ * Continue/Completion work for ep0
+ * If some more data must be read/pushed, restart ep or complete
+ * At end of request, ACK status request and STALL data requests
+ */
+static void hsudc_ep0_completion(struct work_struct *work)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *hsudc_req;
+
+ hsudc_ep = container_of(work, struct cadence_hsudc_ep, comp);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+ hsudc_req = hsudc_ep->cur;
+
+ /* Should be a get status implicit request */
+ if (hsudc_req == NULL) {
+ /* Disable IRQ */
+ hsudc_write16(hsudc_read16(HSUDC_OUTIEN_REG16) & ~1,
+ HSUDC_OUTIEN_REG16);
+ hsudc_write16(hsudc_read16(HSUDC_INIEN_REG16) & ~1,
+ HSUDC_INIEN_REG16);
+
+ /* Finish control transaction */
+ hsudc_write8(HSUDC_EP0_CS_HSNAK_MSK,
+ HSUDC_EP0_CS_REG8);
+
+ return;
+ }
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): %p @ '%s'\n", __func__,
+ &hsudc_req->req,
+ hsudc_ep->ep.name);
+
+ if (!hsudc_ep->is_in) {
+ /* Retrieve data from FIFO */
+ uint8_t *buf = hsudc_req->req.buf + hsudc_req->req.actual;
+ unsigned length = hsudc_read8(HSUDC_EP0_OUTBC_REG8);
+ unsigned i;
+
+ /* copy data in ep fifo */
+ for (i = 0; i < length; ++i)
+ buf[i] = hsudc_read8(HSUDC_EP0_OUTBUF_BASE_REG + i);
+
+ hsudc_req->req.actual += length;
+
+ if (hsudc_req->req.actual < hsudc_req->req.length) {
+ length = hsudc_req->req.length - hsudc_req->req.actual;
+
+ if (length > hsudc_ep->maxpacket)
+ length = hsudc_ep->maxpacket;
+
+ hsudc_write8(length, HSUDC_EP0_OUTBC_REG8);
+
+ return;
+ }
+
+ /* Disable IRQ */
+ hsudc_write16(hsudc_read16(HSUDC_OUTIEN_REG16) & ~1,
+ HSUDC_OUTIEN_REG16);
+ } else {
+ if (hsudc_req->req.actual < hsudc_req->req.length) {
+ uint8_t *buf =
+ hsudc_req->req.buf + hsudc_req->req.actual;
+ unsigned length =
+ hsudc_req->req.length - hsudc_req->req.actual;
+ unsigned i;
+
+ if (length > hsudc_ep->maxpacket)
+ length = hsudc_ep->maxpacket;
+
+ /* copy data in ep0 fifo */
+ for (i = 0; i < length; ++i)
+ hsudc_write8(buf[i],
+ HSUDC_EP0_INBUF_BASE_REG + i);
+
+ hsudc_req->req.actual += length;
+
+ /* Load byte size */
+ hsudc_write8(length, HSUDC_EP0_INBC_REG8);
+
+ return;
+ }
+
+ /* Disable IRQ */
+ hsudc_write16(hsudc_read16(HSUDC_INIEN_REG16) & ~1,
+ HSUDC_INIEN_REG16);
+ }
+
+ /* Finish control transaction */
+ hsudc_write8(HSUDC_EP0_CS_HSNAK_MSK,
+ HSUDC_EP0_CS_REG8);
+
+ spin_lock(&hsudc_ep->s);
+
+ if (hsudc_ep->cur) {
+ hsudc_req = hsudc_ep->cur;
+ hsudc_ep->cur = NULL;
+ spin_unlock(&hsudc_ep->s);
+
+ hsudc_req->req.status = 0;
+ hsudc_req->req.complete(&hsudc_ep->ep, &hsudc_req->req);
+ } else
+ spin_unlock(&hsudc_ep->s);
+}
+
+static void hsudc_ep0_work(struct work_struct *work)
+{
+ BUG();
+}
+
+static int hsudc_ep0_queue(struct cadence_hsudc *hsudc_dev,
+ struct cadence_hsudc_request *req)
+{
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): %s length %d actual %d\n",
+ __func__,
+ (hsudc_dev->ep0.is_in?"IN":"OUT"),
+ req->req.length, req->req.actual);
+
+ spin_lock(&hsudc_dev->ep0.s);
+
+ hsudc_dev->ep0.cur = req;
+
+ if (!req->req.length) {
+ /* Finish control transaction */
+ hsudc_write8(HSUDC_EP0_CS_HSNAK_MSK,
+ HSUDC_EP0_CS_REG8);
+
+ spin_unlock(&hsudc_dev->ep0.s);
+ return 0;
+ }
+
+ spin_unlock(&hsudc_dev->ep0.s);
+
+ if (hsudc_dev->ep0.is_in) {
+ uint8_t *buf = req->req.buf + req->req.actual;
+ unsigned length = req->req.length - req->req.actual;
+ unsigned i;
+
+ if (length > hsudc_dev->ep0.maxpacket)
+ length = hsudc_dev->ep0.maxpacket;
+
+ /* copy data in ep0 fifo */
+ for (i = 0; i < length; ++i)
+ hsudc_write8(buf[i], HSUDC_EP0_INBUF_BASE_REG + i);
+
+ req->req.actual += length;
+
+ /* Clear and enable ep0 in irq */
+ hsudc_write16(1, HSUDC_INIRQ_REG16);
+ hsudc_write16(hsudc_read16(HSUDC_INIEN_REG16) | 1,
+ HSUDC_INIEN_REG16);
+
+ /* Load byte size */
+ hsudc_write8(length, HSUDC_EP0_INBC_REG8);
+ } else {
+ unsigned length = req->req.length - req->req.actual;
+
+ if (length > hsudc_dev->ep0.maxpacket)
+ length = hsudc_dev->ep0.maxpacket;
+
+ /* Clear and enable ep0 out irq */
+ hsudc_write16(1, HSUDC_OUTIRQ_REG16);
+ hsudc_write16(hsudc_read16(HSUDC_OUTIEN_REG16) | 1,
+ HSUDC_OUTIEN_REG16);
+
+ /* ARM out ep0, set size */
+ hsudc_write8(length, HSUDC_EP0_OUTBC_REG8);
+ }
+
+ return 0;
+}
+
+static inline void hsudc_copy_to_fifo(struct cadence_hsudc_ep *hsudc_ep,
+ size_t length, void *buf)
+{
+ int i;
+ unsigned reg = HSUDC_FIFODAT_REG32(hsudc_ep->num);
+ struct cadence_hsudc *hsudc_dev = hsudc_ep->hsudc_dev;
+
+ /* copy data in ep fifo, with optimized accesses */
+ for (i = 0; i < length;) {
+ if ((i % 4) == 0 && (length - i) >= 4) {
+ hsudc_write32(*(uint32_t *)(&buf[i]), reg);
+ i += 4;
+ } else if ((i % 2) == 0
+ && (length - i) >= 2) {
+ hsudc_write16(*(uint16_t *)(&buf[i]), reg);
+ i += 2;
+ } else {
+ hsudc_write8(*(uint8_t *)(&buf[i]), reg);
+ i += 1;
+ }
+ }
+}
+
+static inline void hsudc_copy_from_fifo(struct cadence_hsudc_ep *hsudc_ep,
+ size_t length, void *buf)
+{
+ int i;
+ unsigned reg = HSUDC_FIFODAT_REG32(hsudc_ep->num);
+ struct cadence_hsudc *hsudc_dev = hsudc_ep->hsudc_dev;
+
+ /* copy data from ep fifo, with optimized accesses */
+ for (i = 0; i < length;) {
+ if ((i % 4) == 0 && (length - i) >= 4) {
+ *((uint32_t *) &buf[i]) = hsudc_read32(reg);
+ i += 4;
+ } else if ((i % 2) == 0 && (length - i) >= 2) {
+ *((uint16_t *) &buf[i]) = hsudc_read16(reg);
+ i += 2;
+ } else {
+ buf[i] = hsudc_read8(reg);
+ i += 1;
+ }
+ }
+}
+
+/*
+ * Continue/Completion work
+ * If some more data must be read/pushed, restart ep or complete
+ * If another request is available, run ep_work to start it
+ */
+static void hsudc_ep_completion(struct work_struct *work)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *hsudc_req;
+
+ hsudc_ep = container_of(work, struct cadence_hsudc_ep, comp);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ spin_lock(&hsudc_ep->s);
+
+ hsudc_req = hsudc_ep->cur;
+
+ if (hsudc_req == NULL) {
+ spin_unlock(&hsudc_ep->s);
+ return;
+ }
+
+ if (hsudc_req->req.status != -EINPROGRESS)
+ /* Request was unqueued */
+ goto req_complete;
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s req %p/%d:%d\n", __func__,
+ hsudc_ep->num, (hsudc_ep->is_in?"in":"out"),
+ &hsudc_req->req, hsudc_req->req.length,
+ hsudc_req->req.actual);
+
+ if (!hsudc_ep->use_dma && hsudc_ep->is_in) {
+ if (hsudc_req->req.actual < hsudc_req->req.length) {
+ void *buf =
+ hsudc_req->req.buf + hsudc_req->req.actual;
+ unsigned length =
+ hsudc_req->req.length -
+ hsudc_req->req.actual;
+ unsigned i;
+
+ if (length > hsudc_ep->ep.maxpacket)
+ length = hsudc_ep->ep.maxpacket;
+
+ hsudc_copy_to_fifo(hsudc_ep, length, buf);
+
+ hsudc_req->req.actual += length;
+
+ dev_dbg(&hsudc_dev->pdev->dev,
+ "%s(): ep%d%s req %p/%d:%d len %d max %d\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"),
+ &hsudc_req->req, hsudc_req->req.length,
+ hsudc_req->req.actual,
+ length, hsudc_ep->ep.maxpacket);
+
+ /* ARM out ep,
+ * set busy bit to enable sending to the host
+ */
+ hsudc_write8(0x00,
+ HSUDC_EP_INCS_REG8(hsudc_ep->num));
+
+ spin_unlock(&hsudc_ep->s);
+ return;
+ }
+ } else if (!hsudc_ep->use_dma && !hsudc_ep->is_in) {
+ /* Retrieve data from FIFO */
+ void *buf =
+ hsudc_req->req.buf + hsudc_req->req.actual;
+ unsigned length =
+ hsudc_read16(HSUDC_EP_OUTBC_REG16(hsudc_ep->num));
+
+ hsudc_copy_from_fifo(hsudc_ep, length, buf);
+
+ hsudc_req->req.actual += length;
+
+ dev_vdbg(&hsudc_dev->pdev->dev,
+ "%s(): ep%d%s req %p/%d:%d len %d max %d\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"),
+ &hsudc_req->req, hsudc_req->req.length,
+ hsudc_req->req.actual,
+ length, hsudc_ep->ep.maxpacket);
+
+ if (length == hsudc_ep->ep.maxpacket
+ && hsudc_req->req.actual < hsudc_req->req.length) {
+ /* ARM out ep,
+ * set busy bit to enable acking from the host
+ */
+ hsudc_write8(0x00,
+ HSUDC_EP_OUTCS_REG8(hsudc_ep->num));
+
+ spin_unlock(&hsudc_ep->s);
+ return;
+ }
+ } else {
+ dma_unmap_single(hsudc_dev->gadget.dev.parent,
+ hsudc_req->req.dma, hsudc_req->req.length,
+ hsudc_ep->is_in ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ hsudc_req->req.dma = DMA_ADDR_INVALID;
+ hsudc_ep->use_dma = 0;
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s req %p/%d:%d dma end\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"),
+ &hsudc_req->req, hsudc_req->req.length,
+ hsudc_req->req.actual);
+ }
+
+ /* Explicit ZLP handling :
+ * IN, non zero, multiple of maxpacket, ZLP required
+ */
+ if (hsudc_ep->is_in && hsudc_req->req.actual &&
+ (!(hsudc_req->req.actual % hsudc_ep->ep.maxpacket))
+ && hsudc_req->req.zero) {
+ /* Send explicit ZLP */
+ hsudc_req->req.zero = 0;
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s explicit ZLP\n",
+ __func__,
+ hsudc_ep->num, (hsudc_ep->is_in?"in":"out"));
+
+ /* ARM out ep, set busy bit to enable sending to the host */
+ hsudc_write8(0x00, HSUDC_EP_INCS_REG8(hsudc_ep->num));
+
+ spin_unlock(&hsudc_ep->s);
+ return;
+ }
+
+ if (hsudc_req->req.status == -EINPROGRESS)
+ hsudc_req->req.status = 0;
+
+req_complete:
+ /* Remove request from list */
+ list_del_init(&hsudc_req->queue);
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s req %p/%d:%d complete status %d\n",
+ __func__,
+ hsudc_ep->num, (hsudc_ep->is_in?"in":"out"),
+ &hsudc_req->req, hsudc_req->req.length,
+ hsudc_req->req.actual,
+ hsudc_req->req.status);
+
+ hsudc_ep->cur = NULL;
+
+ spin_unlock(&hsudc_ep->s);
+
+ /* Complete request, unlock so the complete can
+ * also queue another request and we handle it immediately
+ * without disabling the irqs
+ */
+ hsudc_req->req.complete(&hsudc_ep->ep, &hsudc_req->req);
+
+ spin_lock(&hsudc_ep->s);
+
+ /* If queue is not empty, continue work */
+ if (!list_empty(&hsudc_ep->queue))
+ queue_work(hsudc_dev->wq_ep, &hsudc_ep->ws);
+ else {
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s queue empty\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"));
+
+ /* Disable EP IRQ */
+ if (hsudc_ep->is_in) {
+ hsudc_write16(1 << hsudc_ep->num, HSUDC_INIRQ_REG16);
+ hsudc_write16(hsudc_read16(HSUDC_INIEN_REG16) &
+ ~(1 << hsudc_ep->num),
+ HSUDC_INIEN_REG16);
+ } else {
+ hsudc_write16(hsudc_read16(HSUDC_OUTIEN_REG16) &
+ ~(1 << hsudc_ep->num),
+ HSUDC_OUTIEN_REG16);
+ }
+ }
+
+ spin_unlock(&hsudc_ep->s);
+}
+
+static int hsudc_ep0_clear_feature(struct cadence_hsudc *hsudc_dev,
+ unsigned type,
+ unsigned w_value, int w_index)
+{
+ if (type == USB_RECIP_ENDPOINT && w_value == USB_ENDPOINT_HALT) {
+ unsigned num = w_index & 0xf;
+ unsigned is_in = w_index & USB_DIR_IN;
+
+ if (is_in && hsudc_dev->ep_in[num].is_available) {
+ /* Select endpoint */
+ hsudc_write8(num | HSUDC_ENDPRST_IO_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(num | HSUDC_ENDPRST_IO_MSK |
+ HSUDC_ENDPRST_TOGRST_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* UnHalt */
+ hsudc_write8(hsudc_read8(HSUDC_EP_INCON_REG8(num)) &
+ ~HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_INCON_REG8(num));
+
+ return 0;
+ } else if (!is_in && hsudc_dev->ep_out[num].is_available) {
+ /* Select endpoint */
+ hsudc_write8(num, HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(num | HSUDC_ENDPRST_TOGRST_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* UnHalt */
+ hsudc_write8(hsudc_read8(HSUDC_EP_OUTCON_REG8(num)) &
+ ~HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_OUTCON_REG8(num));
+
+ return 0;
+ } else
+ return -1; /* Invalid Endpoint, STALL */
+ } else
+ return -1; /* STALL */
+}
+
+static int hsudc_ep0_set_feature(struct cadence_hsudc *hsudc_dev, unsigned type,
+ unsigned w_value, int w_index)
+{
+ if (type == USB_RECIP_ENDPOINT && w_value == USB_ENDPOINT_HALT) {
+ unsigned num = w_index & 0xf;
+ unsigned is_in = w_index & USB_DIR_IN;
+
+ if (is_in && hsudc_dev->ep_in[num].is_available) {
+ /* endpoint in stall */
+ hsudc_write8(hsudc_read8(HSUDC_EP_INCON_REG8(num)) |
+ HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_INCON_REG8(num));
+
+ return 0;
+ } else if (!is_in && hsudc_dev->ep_out[num].is_available) {
+ /* endpoint out stall */
+ hsudc_write8(hsudc_read8(HSUDC_EP_OUTCON_REG8(num)) |
+ HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_OUTCON_REG8(num));
+
+ return 0;
+ } else
+ return -1; /* Invalid Endpoint, STALL */
+ } else
+ return -1; /* STALL */
+}
+
+static int hsudc_ep0_get_status(struct cadence_hsudc *hsudc_dev, unsigned type,
+ int w_index)
+{
+ uint8_t status[2] = { 0, 0 };
+
+ if (type == USB_RECIP_ENDPOINT) {
+ unsigned num = w_index & 0xf;
+ unsigned is_in = w_index & USB_DIR_IN;
+
+ if (is_in && hsudc_dev->ep_in[num].is_available) {
+ if ((hsudc_read8(HSUDC_EP_INCON_REG8(num)) &
+ HSUDC_EP_CON_STALL_MSK) == HSUDC_EP_CON_STALL_MSK)
+ status[0] = 1;
+ } else if (!is_in && hsudc_dev->ep_out[num].is_available) {
+ if ((hsudc_read8(HSUDC_EP_OUTCON_REG8(num)) &
+ HSUDC_EP_CON_STALL_MSK) == HSUDC_EP_CON_STALL_MSK)
+ status[0] = 1;
+ } else
+ return -1; /* Invalid EP */
+ }
+ /* Copy into fifo */
+ hsudc_write8(status[0], HSUDC_EP0_INBUF_BASE_REG + 0);
+ hsudc_write8(status[1], HSUDC_EP0_INBUF_BASE_REG + 1);
+
+ /* Clear and enable ep0 in irq */
+ hsudc_write16(1, HSUDC_INIRQ_REG16);
+ hsudc_write16(hsudc_read16(HSUDC_INIEN_REG16) | 1, HSUDC_INIEN_REG16);
+
+ /* Load byte size */
+ hsudc_write8(2, HSUDC_EP0_INBC_REG8);
+
+ return 0;
+}
+
+static void hsudc_ep0_setup(struct work_struct *work)
+{
+ struct cadence_hsudc *hsudc_dev;
+ union setup {
+ uint8_t raw[8];
+ struct usb_ctrlrequest r;
+ } ctrlrequest;
+ unsigned i;
+ int ret;
+
+#define w_index le16_to_cpu(ctrlrequest.r.wIndex)
+#define w_value le16_to_cpu(ctrlrequest.r.wValue)
+#define w_length le16_to_cpu(ctrlrequest.r.wLength)
+
+ hsudc_dev = container_of(work, struct cadence_hsudc, ep0_setup);
+
+ for (i = 0; i < 8; ++i)
+ ctrlrequest.raw[i] =
+ hsudc_read8(HSUDC_EP0_SETUPDAT_BASE_REG + i);
+
+ dev_vdbg(&hsudc_dev->pdev->dev,
+ "SETUP bRequest 0x%x bRequestType 0x%x w_index 0x%x w_value 0x%x w_length %d\n",
+ ctrlrequest.r.bRequest, ctrlrequest.r.bRequestType,
+ w_index, w_value, w_length);
+
+ if (ctrlrequest.r.bRequestType & USB_DIR_IN)
+ hsudc_dev->ep0.is_in = 1;
+ else
+ hsudc_dev->ep0.is_in = 0;
+
+ switch (ctrlrequest.r.bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ return; /* Supported by Hardware */
+ case USB_REQ_CLEAR_FEATURE:
+ dev_dbg(&hsudc_dev->pdev->dev, "USB_REQ_CLEAR_FEATURE\n");
+ if (hsudc_ep0_clear_feature(hsudc_dev,
+ ctrlrequest.r.bRequestType & 0xf,
+ w_index, w_value) < 0)
+ goto hsudc_ep0_setup_stall;
+ /* Finish control transaction */
+ hsudc_write8(HSUDC_EP0_CS_HSNAK_MSK, HSUDC_EP0_CS_REG8);
+ return;
+ case USB_REQ_SET_FEATURE:
+ dev_dbg(&hsudc_dev->pdev->dev, "USB_REQ_SET_FEATURE\n");
+ if (hsudc_ep0_set_feature(hsudc_dev,
+ ctrlrequest.r.bRequestType & 0xf,
+ w_index, w_value) < 0)
+ goto hsudc_ep0_setup_stall;
+ /* Finish control transaction */
+ hsudc_write8(HSUDC_EP0_CS_HSNAK_MSK, HSUDC_EP0_CS_REG8);
+ return;
+ case USB_REQ_GET_STATUS:
+ dev_dbg(&hsudc_dev->pdev->dev, "USB_REQ_GET_STATUS\n");
+ if (hsudc_ep0_get_status(hsudc_dev,
+ ctrlrequest.r.bRequestType & 0xf,
+ w_index) < 0)
+ goto hsudc_ep0_setup_stall;
+ return;
+ default:
+ ret =
+ hsudc_dev->driver->setup(&hsudc_dev->gadget,
+ &ctrlrequest.r);
+ dev_vdbg(&hsudc_dev->pdev->dev, "Driver SETUP ret %d\n", ret);
+ if (ret < 0) {
+ dev_dbg(&hsudc_dev->pdev->dev,
+ "req %02x.%02x protocol STALL; ret %d\n",
+ ctrlrequest.r.bRequestType,
+ ctrlrequest.r.bRequest, ret);
+ goto hsudc_ep0_setup_stall;
+ }
+ }
+
+ if (!w_length) {
+ /* Finish control transaction */
+ hsudc_write8(HSUDC_EP0_CS_HSNAK_MSK, HSUDC_EP0_CS_REG8);
+ }
+#undef w_value
+#undef w_index
+#undef w_length
+
+ return;
+
+hsudc_ep0_setup_stall:
+ hsudc_write8(HSUDC_EP0_CS_STALL_MSK, HSUDC_EP0_CS_REG8);
+}
+
+static void hsudc_ep_work(struct work_struct *work)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *hsudc_req;
+
+ hsudc_ep = container_of(work, struct cadence_hsudc_ep, ws);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ spin_lock(&hsudc_ep->s);
+
+ if (list_empty(&hsudc_ep->queue)) {
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s queue empty\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"));
+
+ /* Disable EP IRQ */
+ if (hsudc_ep->is_in) {
+ hsudc_write16(hsudc_read16(HSUDC_INIEN_REG16) &
+ ~(1 << hsudc_ep->num),
+ HSUDC_INIEN_REG16);
+ hsudc_write16(1 << hsudc_ep->num, HSUDC_INIRQ_REG16);
+ } else {
+ hsudc_write16(hsudc_read16(HSUDC_OUTIEN_REG16) &
+ ~(1 << hsudc_ep->num),
+ HSUDC_OUTIEN_REG16);
+ hsudc_write16(1 << hsudc_ep->num, HSUDC_OUTIRQ_REG16);
+ }
+ spin_unlock(&hsudc_ep->s);
+ return;
+ }
+
+ hsudc_req = list_entry(hsudc_ep->queue.next,
+ struct cadence_hsudc_request, queue);
+
+ hsudc_ep->cur = hsudc_req;
+
+ if (!hsudc_req)
+ BUG();
+
+ spin_unlock(&hsudc_ep->s);
+
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s req %p/%d:%d\n",
+ __func__,
+ hsudc_ep->num, (hsudc_ep->is_in?"in":"out"),
+ &hsudc_req->req,
+ hsudc_req->req.length,
+ hsudc_req->req.actual);
+
+ if (hsudc_req->req.length > 0 &&
+ ((unsigned long)hsudc_req->req.buf & 0x3) == 0 &&
+ hsudc_dev->hw_config->dma_enabled &&
+ hsudc_dma_get_channel(hsudc_dev, hsudc_ep) == 0 &&
+ hsudc_dma_init(hsudc_dev, hsudc_ep, hsudc_req) == 0) {
+
+ /* Start DMA Channel */
+ hsudc_write8(HSUDC_DMA_WORK_START,
+ HSUDC_DMA_WORK_REG8(hsudc_ep->dma_channel->num));
+
+ } else if (hsudc_ep->is_in) {
+ uint8_t *buf =
+ hsudc_req->req.buf + hsudc_req->req.actual;
+ unsigned length =
+ hsudc_req->req.length - hsudc_req->req.actual;
+ unsigned i;
+
+ if (length > hsudc_ep->ep.maxpacket)
+ length = hsudc_ep->ep.maxpacket;
+
+ /* copy data in ep fifo, with optimized accesses */
+ for (i = 0; i < length;) {
+ if ((i % 4) == 0 && (length - i) >= 4) {
+ hsudc_write32(*(uint32_t *) (&buf[i]),
+ HSUDC_FIFODAT_REG32
+ (hsudc_ep->num));
+ i += 4;
+ } else if ((i % 2) == 0 && (length - i) >= 2) {
+ hsudc_write16(*(uint16_t *) (&buf[i]),
+ HSUDC_FIFODAT_REG32
+ (hsudc_ep->num));
+ i += 2;
+ } else {
+ hsudc_write8(*(uint8_t *) (&buf[i]),
+ HSUDC_FIFODAT_REG32
+ (hsudc_ep->num));
+ i += 1;
+ }
+ }
+
+ hsudc_req->req.actual += length;
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s req %p/%d:%d start\n",
+ __func__,
+ hsudc_ep->num, (hsudc_ep->is_in?"in":"out"),
+ &hsudc_req->req,
+ hsudc_req->req.length,
+ hsudc_req->req.actual);
+
+ /* Enable IRQ */
+ hsudc_write16(1 << hsudc_ep->num, HSUDC_INIRQ_REG16);
+ hsudc_write16(hsudc_read16(HSUDC_INIEN_REG16) |
+ (1 << hsudc_ep->num), HSUDC_INIEN_REG16);
+
+ /* ARM out ep, set busy bit to enable sending to the host */
+ hsudc_write8(0x00, HSUDC_EP_INCS_REG8(hsudc_ep->num));
+ } else {
+ /* Enable IRQ */
+ hsudc_write16(hsudc_read16(HSUDC_OUTIEN_REG16) |
+ (1 << hsudc_ep->num), HSUDC_OUTIEN_REG16);
+
+ /* ARM out ep, set busy bit to enable acking from the host */
+ hsudc_write8(0x00, HSUDC_EP_OUTCS_REG8(hsudc_ep->num));
+ }
+}
+
+static int cadence_hsudc_ep_queue(struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *hsudc_req;
+ int running = 0;
+
+ hsudc_req = container_of(req, struct cadence_hsudc_request, req);
+ hsudc_ep = container_of(ep, struct cadence_hsudc_ep, ep);
+
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ if (!req || !req->complete || !req->buf) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid request %p\n",
+ __func__, req);
+ return -EINVAL;
+ }
+
+ if (!hsudc_dev->driver ||
+ hsudc_dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!hsudc_ep->desc && hsudc_ep->num) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid ep\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ req->status = -EINPROGRESS;
+ req->actual = 0;
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): '%s', req %p, empty %d\n",
+ __func__, hsudc_ep->ep.name, req,
+ list_empty(&hsudc_ep->queue));
+
+ if (hsudc_ep->is_ep0)
+ return hsudc_ep0_queue(hsudc_dev, hsudc_req);
+
+ spin_lock(&hsudc_ep->s);
+
+ if (list_empty(&hsudc_ep->queue))
+ running = 0;
+ else
+ running = 1;
+
+ list_add_tail(&hsudc_req->queue, &hsudc_ep->queue);
+
+ if (!running)
+ queue_work(hsudc_dev->wq_ep, &hsudc_ep->ws);
+
+ spin_unlock(&hsudc_ep->s);
+
+ return 0;
+}
+
+static int cadence_hsudc_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+ struct cadence_hsudc_request *hsudc_req;
+
+ hsudc_req = container_of(req, struct cadence_hsudc_request, req);
+ hsudc_ep = container_of(ep, struct cadence_hsudc_ep, ep);
+
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ if (!req || !req->complete || !req->buf) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid request %p\n",
+ __func__, req);
+ return -EINVAL;
+ }
+
+ if (!hsudc_dev->driver ||
+ hsudc_dev->gadget.speed == USB_SPEED_UNKNOWN) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid device\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!hsudc_ep->desc && hsudc_ep->num) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid ep\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&hsudc_ep->s);
+
+ if (hsudc_ep->cur == hsudc_req) {
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s req %p unqueue cur req\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"), hsudc_req);
+ req->status = -ECONNRESET;
+ queue_work(hsudc_dev->wq_ep, &hsudc_ep->comp);
+ spin_unlock(&hsudc_ep->s);
+ return 0;
+ }
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s req %p unqueue\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"), hsudc_req);
+
+ /* Remove request from list */
+ list_del_init(&hsudc_req->queue);
+
+ spin_unlock(&hsudc_ep->s);
+
+ req->status = -ECONNRESET;
+ req->complete(ep, req);
+
+ return 0;
+}
+
+static int cadence_hsudc_ep_set_halt(struct usb_ep *ep, int value)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct cadence_hsudc_ep *hsudc_ep;
+
+ hsudc_ep = container_of(ep, struct cadence_hsudc_ep, ep);
+ hsudc_dev = hsudc_ep->hsudc_dev;
+
+ spin_lock(&hsudc_ep->s);
+
+ if (hsudc_ep->is_in) {
+ if (value) {
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s stall\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"));
+
+ /* endpoint in stall */
+ hsudc_write8(hsudc_read8
+ (HSUDC_EP_INCON_REG8(hsudc_ep->num)) |
+ HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_INCON_REG8(hsudc_ep->num));
+ } else {
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s unhalt\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"));
+
+ /* Select endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_IO_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_IO_MSK |
+ HSUDC_ENDPRST_TOGRST_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* UnHalt */
+ hsudc_write8(
+ hsudc_read8(HSUDC_EP_INCON_REG8(hsudc_ep->num))
+ & ~HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_INCON_REG8(hsudc_ep->num));
+ }
+ } else {
+ if (value) {
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s stall\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"));
+
+ /* endpoint out stall */
+ hsudc_write8(
+ hsudc_read8(HSUDC_EP_OUTCON_REG8(hsudc_ep->num))
+ | HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_OUTCON_REG8(hsudc_ep->num));
+ } else {
+ dev_vdbg(&hsudc_dev->pdev->dev, "%s(): ep%d%s unhalt\n",
+ __func__,
+ hsudc_ep->num,
+ (hsudc_ep->is_in?"in":"out"));
+
+ /* Select endpoint */
+ hsudc_write8(hsudc_ep->num, HSUDC_ENDPRST_REG8);
+
+ /* Reset endpoint */
+ hsudc_write8(hsudc_ep->num | HSUDC_ENDPRST_TOGRST_MSK,
+ HSUDC_ENDPRST_REG8);
+
+ /* UnHalt */
+ hsudc_write8(
+ hsudc_read8(HSUDC_EP_OUTCON_REG8(hsudc_ep->num))
+ & ~HSUDC_EP_CON_STALL_MSK,
+ HSUDC_EP_OUTCON_REG8(hsudc_ep->num));
+ }
+ }
+
+ spin_unlock(&hsudc_ep->s);
+
+ return 0;
+}
+
+static const struct usb_ep_ops cadence_hsudc_ep_ops = {
+ .enable = cadence_hsudc_ep_enable,
+ .disable = cadence_hsudc_ep_disable,
+ .alloc_request = cadence_hsudc_ep_alloc_request,
+ .free_request = cadence_hsudc_ep_free_request,
+ .queue = cadence_hsudc_ep_queue,
+ .dequeue = cadence_hsudc_ep_dequeue,
+ .set_halt = cadence_hsudc_ep_set_halt,
+};
+
+int cadence_hsudc_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct cadence_hsudc *hsudc_dev = container_of(gadget,
+ struct cadence_hsudc, gadget);
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s():\n", __func__);
+
+ if (!driver
+ || !driver->setup) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid arguments\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (hsudc_dev->driver) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error already in use\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ driver->driver.bus = NULL;
+ hsudc_dev->driver = driver;
+ hsudc_dev->gadget.dev.of_node = hsudc_dev->pdev->dev.of_node;
+ hsudc_dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ /* Setup USB Speed */
+ hsudc_write8(HSUDC_SPEEDCTRL_HS_MSK,
+ HSUDC_SPEEDCTRL_REG8);
+
+ /* Configure EP0 maxpacket (EVCI writes 8 here) */
+ hsudc_write8(0xFF, HSUDC_EP0_OUTBC_REG8);
+ hsudc_write8(hsudc_dev->ep0.maxpacket, HSUDC_EP0_MAXPCK_REG8);
+ hsudc_write16(hsudc_read16(HSUDC_OUTIEN_REG16) | 1,
+ HSUDC_OUTIEN_REG16);
+
+ /* Connect */
+ hsudc_write8(hsudc_read8(HSUDC_USBCS_REG8) & ~HSUDC_USBCS_DISCON_MSK,
+ HSUDC_USBCS_REG8);
+
+ /* Enable : */
+ /* - High Speed mode interrupt */
+ /* - Start reset interrupt */
+ /* - SETUP data interrupt */
+ /* - Suspend interrupt */
+ hsudc_write8(HSUDC_USBIEN_SUTOKIE_MSK |
+ HSUDC_USBIEN_URESIE_MSK |
+ HSUDC_USBIEN_HSPIE_MSK,
+ HSUDC_USBIEN_REG8);
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): bound to %s\n", __func__,
+ driver->driver.name);
+
+ return 0;
+}
+
+int cadence_hsudc_udc_stop(struct usb_gadget *gadget)
+{
+ struct cadence_hsudc *hsudc_dev = container_of(gadget,
+ struct cadence_hsudc, gadget);
+ unsigned i;
+
+ if (!hsudc_dev->driver) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error invalid arguments\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ disable_irq(hsudc_dev->irq);
+
+ for (i = 1; i < HSUDC_EP_COUNT; i++) {
+ if (hsudc_dev->ep_in[i].is_available) {
+ cancel_work_sync(&hsudc_dev->ep_in[i].ws);
+ cancel_work_sync(&hsudc_dev->ep_in[i].comp);
+ }
+ if (hsudc_dev->ep_out[i].is_available) {
+ cancel_work_sync(&hsudc_dev->ep_out[i].ws);
+ cancel_work_sync(&hsudc_dev->ep_out[i].comp);
+ }
+ }
+ cancel_work_sync(&hsudc_dev->ep0.ws);
+ cancel_work_sync(&hsudc_dev->ep0.comp);
+ cancel_work_sync(&hsudc_dev->ep0_setup);
+ flush_workqueue(hsudc_dev->wq_ep);
+
+ /* Disconnect */
+ hsudc_write8(hsudc_read8(HSUDC_USBCS_REG8) | HSUDC_USBCS_DISCON_MSK,
+ HSUDC_USBCS_REG8);
+
+ hsudc_dev->driver = NULL;
+ hsudc_dev->gadget.speed = USB_SPEED_UNKNOWN;
+
+ dev_dbg(&hsudc_dev->pdev->dev, "%s(): unbound\n", __func__);
+
+ return 0;
+}
+
+static const struct usb_gadget_ops cadence_hsudc_gadget_ops = {
+ .udc_start = cadence_hsudc_udc_start,
+ .udc_stop = cadence_hsudc_udc_stop,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id cadence_hsudc_of_match[] = {
+ { .compatible = "cdns,usbhs-udc" },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, cadence_hsudc_of_match);
+
+static int cadence_hsudc_of_probe(struct cadence_hsudc *hsudc_dev)
+{
+ int ret, i;
+ int ep_in_count = 0;
+ int ep_out_count = 0;
+ struct device_node *np;
+ struct hsudc_hw_config *hw_config;
+ u32 val;
+
+ np = hsudc_dev->pdev->dev.of_node;
+ if (!np)
+ return -EINVAL;
+
+ hw_config = devm_kzalloc(&hsudc_dev->pdev->dev,
+ sizeof(struct hsudc_hw_config),
+ GFP_KERNEL);
+ if (!hw_config)
+ return -ENOMEM;
+
+ ep_in_count = of_property_count_u32_elems(np, "cdns,ep-in");
+ if (ep_in_count < 1) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-in should have 1+ ep\n");
+ return -EINVAL;
+ }
+ if (ep_in_count >= HSUDC_EP_COUNT)
+ ep_in_count = HSUDC_EP_COUNT;
+ dev_info(&hsudc_dev->pdev->dev, "max %d in EPs\n", ep_in_count);
+
+ ep_out_count = of_property_count_u32_elems(np, "cdns,ep-out");
+ if (ep_out_count < 1) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-out should have 1+ ep\n");
+ return -EINVAL;
+ }
+ if (ep_out_count >= HSUDC_EP_COUNT)
+ ep_out_count = HSUDC_EP_COUNT;
+ dev_info(&hsudc_dev->pdev->dev, "max %d out EPs\n", ep_out_count);
+
+ ret = of_property_count_u32_elems(np, "cdns,ep-in-size");
+ if (ret < ep_in_count) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-in-size size differs (%d < %d)\n",
+ ret, ep_in_count);
+ return -EINVAL;
+ }
+
+ ret = of_property_count_u32_elems(np, "cdns,ep-in-buffers");
+ if (ret < ep_in_count) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-in-buffers size differs (%d < %d)\n",
+ ret, ep_in_count);
+ return -EINVAL;
+ }
+
+ ret = of_property_count_u32_elems(np, "cdns,ep-in-buffstart");
+ if (ret < ep_in_count) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-in-buffstart size differs (%d < %d)\n",
+ ret, ep_in_count);
+ return -EINVAL;
+ }
+
+ ret = of_property_count_u32_elems(np, "cdns,ep-out-size");
+ if (ret < ep_out_count) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-out-size size differs (%d < %d)\n",
+ ret, ep_out_count);
+ return -EINVAL;
+ }
+
+ ret = of_property_count_u32_elems(np, "cdns,ep-out-buffers");
+ if (ret < ep_out_count) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-out-buffers size differs (%d < %d)\n",
+ ret, ep_out_count);
+ return -EINVAL;
+ }
+
+ ret = of_property_count_u32_elems(np, "cdns,ep-out-buffstart");
+ if (ret < ep_out_count) {
+ dev_err(&hsudc_dev->pdev->dev, "cdns,ep-out-buffstart size differs (%d < %d)\n",
+ ret, ep_out_count);
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < ep_in_count ; ++i) {
+ of_property_read_u32_index(np, "cdns,ep-in", i, &val);
+ hw_config->ep_in_exist[i] = !!val;
+ if (!hw_config->ep_in_exist[i])
+ continue;
+ of_property_read_u32_index(np, "cdns,ep-in-size", i, &val);
+ hw_config->ep_in_size[i] = val;
+ of_property_read_u32_index(np, "cdns,ep-in-buffers", i, &val);
+ hw_config->ep_in_buffering[i] = val;
+ of_property_read_u32_index(np, "cdns,ep-in-buffstart", i, &val);
+ hw_config->ep_in_startbuff[i] = val;
+ }
+
+ for (i = 0 ; i < ep_out_count ; ++i) {
+ of_property_read_u32_index(np, "cdns,ep-out", i, &val);
+ hw_config->ep_out_exist[i] = !!val;
+ if (!hw_config->ep_out_exist[i])
+ continue;
+ of_property_read_u32_index(np, "cdns,ep-out-size", i, &val);
+ hw_config->ep_out_size[i] = val;
+ of_property_read_u32_index(np, "cdns,ep-out-buffers", i, &val);
+ hw_config->ep_out_buffering[i] = val;
+ of_property_read_u32_index(np,
+ "cdns,ep-out-buffstart", i, &val);
+ hw_config->ep_out_startbuff[i] = val;
+ }
+
+ if (of_property_read_bool(np, "cdns,dma-enable")) {
+ ret = of_property_read_u32(np, "cdns,dma-channels", &val);
+ if (ret < 0 || val < 1)
+ dev_warn(&hsudc_dev->pdev->dev,
+ "cdns,dma-enable exists without valid cdns,dma-channels, disabling DMA\n");
+ else {
+ hw_config->dma_enabled = 1;
+ hw_config->dma_channels = val;
+ }
+ }
+
+ hsudc_dev->hw_config = hw_config;
+
+ return 0;
+}
+
+static int cadence_hsudc_probe(struct platform_device *pdev)
+{
+ struct cadence_hsudc *hsudc_dev;
+ struct resource *res;
+ struct reset_control *reset;
+ struct clk *pclk;
+ int i;
+ int ret = -EIO;
+
+ reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(reset) && PTR_ERR(reset) == -EPROBE_DEFER)
+ return PTR_ERR(reset);
+
+ dev_info(&pdev->dev, "Cadence USB2.0 Device Controller");
+
+ hsudc_dev = devm_kzalloc(&pdev->dev, sizeof(struct cadence_hsudc),
+ GFP_KERNEL);
+ if (hsudc_dev == NULL)
+ return -ENOMEM;
+
+ hsudc_dev->pdev = pdev;
+
+ ret = cadence_hsudc_of_probe(hsudc_dev);
+ if (ret)
+ return ret;
+
+ /* TODO : Add non-dt pdata initialization */
+ if (!hsudc_dev->hw_config) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error hw_config missing\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ pclk = devm_clk_get(&pdev->dev, NULL);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hsudc_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsudc_dev->io_base)) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error ioremap() failed\n",
+ __func__);
+ return PTR_ERR(hsudc_dev->io_base);
+ }
+
+ hsudc_dev->irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, hsudc_dev->irq,
+ cadence_hsudc_irq, 0, "hsudc_dev_irq", hsudc_dev);
+ if (ret) {
+ dev_err(&hsudc_dev->pdev->dev, "%s(): error request_irq() failed\n",
+ __func__);
+ return ret;
+ }
+
+ hsudc_dev->wq_ep = create_workqueue("hsudc_wq_ep");
+ if (!hsudc_dev->wq_ep) {
+ dev_err(&hsudc_dev->pdev->dev,
+ "%s(): error create_workqueue() failed\n", __func__);
+ return -EBUSY;
+ }
+
+ /* init software state */
+ hsudc_dev->gadget.max_speed = USB_SPEED_HIGH;
+ hsudc_dev->gadget.ops = &cadence_hsudc_gadget_ops;
+ hsudc_dev->gadget.name = dev_name(&pdev->dev);
+ hsudc_dev->gadget.ep0 = &hsudc_dev->ep0.ep;
+
+ /* ep0 init handling */
+ spin_lock_init(&hsudc_dev->ep0.s);
+ INIT_LIST_HEAD(&hsudc_dev->ep0.queue);
+ hsudc_dev->ep0.maxpacket = hsudc_dev->hw_config->ep_in_size[0];
+ usb_ep_set_maxpacket_limit(&hsudc_dev->ep0.ep,
+ hsudc_dev->ep0.maxpacket);
+ hsudc_dev->ep0.ep.ops = &cadence_hsudc_ep_ops;
+ hsudc_dev->ep0.ep.name = "ep0-inout";
+ hsudc_dev->ep0.is_available = 1;
+ hsudc_dev->ep0.is_ep0 = 1;
+ hsudc_dev->ep0.num = 0;
+ hsudc_dev->ep0.hsudc_dev = hsudc_dev;
+ INIT_WORK(&hsudc_dev->ep0.ws, hsudc_ep0_work);
+ INIT_WORK(&hsudc_dev->ep0.comp, hsudc_ep0_completion);
+ INIT_WORK(&hsudc_dev->ep0_setup, hsudc_ep0_setup);
+
+ /* other ep init handling */
+ INIT_LIST_HEAD(&hsudc_dev->gadget.ep_list);
+ INIT_LIST_HEAD(&hsudc_dev->gadget.ep0->ep_list);
+ /* IN Endpoints */
+ for (i = 1; i < HSUDC_EP_COUNT; i++) {
+ struct cadence_hsudc_ep *ep = &hsudc_dev->ep_in[i];
+
+ if (!hsudc_dev->hw_config->ep_in_exist[i])
+ continue;
+
+ hsudc_dev->ep_in[i].num = i;
+ hsudc_dev->ep_in[i].hsudc_dev = hsudc_dev;
+ hsudc_dev->ep_in[i].is_available = 1;
+ hsudc_dev->ep_in[i].is_in = 1;
+ hsudc_dev->ep_in[i].maxpacket =
+ hsudc_dev->hw_config->ep_in_size[i];
+ hsudc_dev->ep_in[i].ep.name =
+ kasprintf(GFP_KERNEL, "ep%din-bulk", i);
+ usb_ep_set_maxpacket_limit(&hsudc_dev->ep_in[i].ep,
+ hsudc_dev->ep_in[i].maxpacket);
+ hsudc_dev->ep_in[i].ep.ops = &cadence_hsudc_ep_ops;
+ INIT_LIST_HEAD(&ep->queue);
+ list_add_tail(&ep->ep.ep_list, &hsudc_dev->gadget.ep_list);
+ INIT_WORK(&hsudc_dev->ep_in[i].ws, hsudc_ep_work);
+ INIT_WORK(&hsudc_dev->ep_in[i].comp, hsudc_ep_completion);
+ }
+ /* OUT Endpoints */
+ for (i = 1; i < HSUDC_EP_COUNT; i++) {
+ struct cadence_hsudc_ep *ep = &hsudc_dev->ep_out[i];
+
+ if (!hsudc_dev->hw_config->ep_out_exist[i])
+ continue;
+
+ hsudc_dev->ep_out[i].num = i;
+ hsudc_dev->ep_out[i].hsudc_dev = hsudc_dev;
+ hsudc_dev->ep_out[i].is_available = 1;
+ hsudc_dev->ep_out[i].maxpacket =
+ hsudc_dev->hw_config->ep_out_size[i];
+ hsudc_dev->ep_out[i].ep.name =
+ kasprintf(GFP_KERNEL, "ep%dout-bulk", i);
+ usb_ep_set_maxpacket_limit(&hsudc_dev->ep_out[i].ep,
+ hsudc_dev->ep_out[i].maxpacket);
+ hsudc_dev->ep_out[i].ep.ops = &cadence_hsudc_ep_ops;
+ INIT_LIST_HEAD(&ep->queue);
+ list_add_tail(&ep->ep.ep_list, &hsudc_dev->gadget.ep_list);
+ INIT_WORK(&hsudc_dev->ep_out[i].ws, hsudc_ep_work);
+ INIT_WORK(&hsudc_dev->ep_out[i].comp, hsudc_ep_completion);
+ }
+
+ /* DMA Channels */
+ if (hsudc_dev->hw_config->dma_enabled) {
+ for (i = 0; i < hsudc_dev->hw_config->dma_channels; ++i) {
+ hsudc_dev->dma_channels[i].num = i;
+ hsudc_dev->dma_channels[i].is_available = 1;
+ hsudc_dev->dma_channels[i].in_use = 0;
+ hsudc_dev->dma_channels[i].cur_ep = NULL;
+ }
+ sema_init(&hsudc_dev->dma_sem,
+ hsudc_dev->hw_config->dma_channels);
+ spin_lock_init(&hsudc_dev->dma_s);
+ }
+
+ /* Try to enable pclk */
+ if (!IS_ERR(pclk))
+ clk_prepare_enable(pclk);
+
+ if (!IS_ERR(reset))
+ reset_control_deassert(reset);
+
+ /* init hardware */
+ /* Configure each endpoints */
+ for (i = 1; i < HSUDC_EP_COUNT; i++) {
+ /* Clear irqs */
+ hsudc_write16(1 << i, HSUDC_ERRIRQ_OUT_REG16);
+ hsudc_write16(1 << i, HSUDC_ERRIRQ_IN_REG16);
+ hsudc_write16(1 << i, HSUDC_OUTIRQ_REG16);
+ hsudc_write16(1 << i, HSUDC_INIRQ_REG16);
+
+ /* OUT endpoint */
+ if (hsudc_dev->hw_config->ep_out_exist[i]) {
+ /* Configure buffer */
+ hsudc_write16(hsudc_dev->hw_config->ep_out_startbuff[i],
+ HSUDC_EP_OUT_STARTADDR_REG16(i));
+ /* Configure endpoint with maximum buffering,
+ * bulk, non stall and disabled
+ */
+ hsudc_write8(hsudc_dev->hw_config->ep_out_buffering[i] &
+ HSUDC_EP_CON_BUF_MSK,
+ HSUDC_EP_OUTCON_REG8(i));
+ } else
+ hsudc_write8(0x00, HSUDC_EP_OUTCON_REG8(i));
+
+ /* IN endpoint */
+ if (hsudc_dev->hw_config->ep_in_exist[i]) {
+ /* Configure buffer */
+ hsudc_write16(hsudc_dev->hw_config->ep_in_startbuff[i],
+ HSUDC_EP_IN_STARTADDR_REG16(i));
+ /* Configure endpoint with maximum buffering,
+ * bulk, non stall and disabled
+ */
+ hsudc_write8(hsudc_dev->hw_config->ep_in_buffering[i] &
+ HSUDC_EP_CON_BUF_MSK,
+ HSUDC_EP_INCON_REG8(i));
+ } else
+ hsudc_write8(0x00, HSUDC_EP_INCON_REG8(i));
+ }
+
+ /* Set FIFO access by the CPU */
+ hsudc_write8(HSUDC_FIFOCTRL_FIFOACC_MSK, HSUDC_FIFOCTRL_REG8);
+ hsudc_write8(HSUDC_FIFOCTRL_IO_MSK | HSUDC_FIFOCTRL_FIFOACC_MSK,
+ HSUDC_FIFOCTRL_REG8);
+
+ /* Clear USB start reset interrupt */
+ hsudc_write8(HSUDC_USBIRQ_URES_MSK, HSUDC_USBIRQ_REG8);
+
+ /* DMA Channels Init */
+ if (hsudc_dev->hw_config->dma_enabled) {
+ hsudc_write32(0xFFFFFFFF, HSUDC_DMA_IRQ_REG32);
+ hsudc_write32(0, HSUDC_DMA_IEN_REG32);
+ hsudc_write32(0xFFFFFFFF, HSUDC_DMA_SHORTIRQ_REG32);
+ hsudc_write32(0, HSUDC_DMA_SHORTIEN_REG32);
+ hsudc_write32(0xFFFFFFFF, HSUDC_DMA_ERRORIRQ_REG32);
+ hsudc_write32(0, HSUDC_DMA_ERRORIEN_REG32);
+ for (i = 0; i < hsudc_dev->hw_config->dma_channels; ++i) {
+ hsudc_write8(HSUDC_DMA_WORK_RESET,
+ HSUDC_DMA_WORK_REG8(i));
+ }
+
+ /* Set FIFO access by the DMA, CPU can still access FIFO */
+ hsudc_write8(HSUDC_FIFOCTRL_FIFOAUTO_MSK, HSUDC_FIFOCTRL_REG8);
+ hsudc_write8(HSUDC_FIFOCTRL_FIFOAUTO_MSK |
+ HSUDC_FIFOCTRL_IO_MSK, HSUDC_FIFOCTRL_REG8);
+ }
+
+ ret = usb_add_gadget_udc(&pdev->dev, &hsudc_dev->gadget);
+ if (ret < 0) {
+ dev_err(&hsudc_dev->pdev->dev,
+ "%s(): error device_register() failed\n",
+ __func__);
+
+ destroy_workqueue(hsudc_dev->wq_ep);
+
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, hsudc_dev);
+
+ dev_info(&hsudc_dev->pdev->dev, "%s %dx%dbytes FIFO\n",
+ hsudc_dev->ep0.ep.name,
+ hsudc_dev->hw_config->ep_in_buffering[0],
+ hsudc_dev->hw_config->ep_in_size[0]);
+ dev_info(&hsudc_dev->pdev->dev, "1 IN/OUT Control EP\n");
+
+ ret = 0;
+ for (i = 1 ; i < HSUDC_EP_COUNT ; ++i)
+ if (hsudc_dev->hw_config->ep_in_exist[i]) {
+ dev_info(&hsudc_dev->pdev->dev, "%s %dx%dbytes FIFO\n",
+ hsudc_dev->ep_in[i].ep.name,
+ hsudc_dev->hw_config->ep_in_buffering[i],
+ hsudc_dev->hw_config->ep_in_size[i]);
+ ret++;
+ }
+ dev_info(&hsudc_dev->pdev->dev, "%d IN EPs\n", ret);
+
+ ret = 0;
+ for (i = 1 ; i < HSUDC_EP_COUNT ; ++i)
+ if (hsudc_dev->hw_config->ep_out_exist[i]) {
+ dev_info(&hsudc_dev->pdev->dev, "%s %dx%dbytes FIFO\n",
+ hsudc_dev->ep_out[i].ep.name,
+ hsudc_dev->hw_config->ep_out_buffering[i],
+ hsudc_dev->hw_config->ep_out_size[i]);
+ ret++;
+ }
+ dev_info(&hsudc_dev->pdev->dev, "%d OUT EPs\n", ret);
+ if (hsudc_dev->hw_config->dma_enabled)
+ dev_info(&hsudc_dev->pdev->dev, "DMA Enabled with %d channels\n",
+ hsudc_dev->hw_config->dma_channels);
+ else
+ dev_info(&hsudc_dev->pdev->dev, "DMA Support is Disabled\n");
+
+ dev_info(&hsudc_dev->pdev->dev, "ready\n");
+
+ return 0;
+}
+
+static int cadence_hsudc_remove(struct platform_device *pdev)
+{
+ struct cadence_hsudc *hsudc_dev = platform_get_drvdata(pdev);
+ unsigned i;
+
+ if (hsudc_dev->driver) {
+ hsudc_dev->driver->disconnect(&hsudc_dev->gadget);
+ hsudc_dev->driver->unbind(&hsudc_dev->gadget);
+ }
+
+ hsudc_write8(hsudc_read8(HSUDC_USBCS_REG8) | HSUDC_USBCS_DISCON_MSK,
+ HSUDC_USBCS_REG8);
+
+ for (i = 1; i < HSUDC_EP_COUNT; i++) {
+ if (hsudc_dev->ep_in[i].is_available) {
+ cancel_work_sync(&hsudc_dev->ep_in[i].ws);
+ cancel_work_sync(&hsudc_dev->ep_in[i].comp);
+ kfree(hsudc_dev->ep_in[i].ep.name);
+ }
+ if (hsudc_dev->ep_out[i].is_available) {
+ cancel_work_sync(&hsudc_dev->ep_out[i].ws);
+ cancel_work_sync(&hsudc_dev->ep_out[i].comp);
+ kfree(hsudc_dev->ep_out[i].ep.name);
+ }
+ }
+
+ flush_workqueue(hsudc_dev->wq_ep);
+
+ device_unregister(&hsudc_dev->gadget.dev);
+
+ destroy_workqueue(hsudc_dev->wq_ep);
+
+ return 0;
+}
+
+static struct platform_driver cadence_hsudc_driver = {
+ .probe = cadence_hsudc_probe,
+ .remove = cadence_hsudc_remove,
+ .driver = {
+ .name = "cadence_hsudc",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cadence_hsudc_of_match),
+ },
+};
+
+module_platform_driver(cadence_hsudc_driver);
+
+MODULE_DESCRIPTION("Cadence USB2.0 Device Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@xxxxxxxxxxx>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/cadence_hsudc_regs.h b/drivers/usb/gadget/udc/cadence_hsudc_regs.h
new file mode 100644
index 0000000..45556af
--- /dev/null
+++ b/drivers/usb/gadget/udc/cadence_hsudc_regs.h
@@ -0,0 +1,283 @@
+/*
+ * linux/drivers/usb/gadget/udc/cadence_hsudc_regs.h
+ * - Cadence USB2.0 Device Controller Driver
+ *
+ * Copyright (C) 2015 Neotion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef HSUDC_UDC_REGS
+#define HSUDC_UDC_REGS
+
+/* General Defines */
+/* 0 is control endpoint, 1 to 15 are configurable endpoints */
+#define HSUDC_EP_COUNT 16
+#define HSUDC_FIFODAT_COUNT 15
+#define HSUDC_EP0_INBUF_LEN 64
+#define HSUDC_EP0_OUTBUF_LEN 64
+#define HSUDC_EP0_SETUPDAT_LEN 8
+#define HSUDC_DMA_CHANNELS 32
+
+/* Base Registers Addresses */
+#define HSUDC_EP_CTRL_BASE_REG(n) (0x0000 + 0x8*(n))
+#define HSUDC_FIFODAT_BASE_REG (0x0080)
+#define HSUDC_EP0_INBUF_BASE_REG (0x0100)
+#define HSUDC_EP0_OUTBUF_BASE_REG (0x0140)
+#define HSUDC_EP0_SETUPDAT_BASE_REG (0x0180)
+#define HSUDC_IRQ_REQ_BASE_REG (0x0188)
+#define HSUDC_IRQ_EN_BASE_REG (0x0194)
+#define HSUDC_IRQ_VEC_BASE_REG (0x01A0)
+#define HSUDC_CTRL_STAT_BASE_REG (0x01A2)
+#define HSUDC_IRQ_ERR_REG_BASE_REG (0x01B0)
+#define HSUDC_IRQ_ERR_EN_BASE_REG (0x01B8)
+#define HSUDC_OUT_MAXPACKET_BASE_REG (0x01E0)
+#define HSUDC_OUT_STARTADDR_BASE_REG (0x0300)
+#define HSUDC_IN_STARTADDR_BASE_REG (0x0340)
+#define HSUDC_IN_MAXPACKET_BASE_REG (0x03E0)
+
+/* DMA Base Registers Addresses */
+#define HSUDC_DMA_IRQ_BASE_REG (0x0400)
+#define HSUDC_DMA_CHANNEL_BASE_REG(n) (0x0420 + ((n)*0x10))
+
+/* Endpoints Registers */
+#define HSUDC_EP0_OUTBC_REG8 (HSUDC_EP_CTRL_BASE_REG(0) + 0x0)
+#define HSUDC_EP0_INBC_REG8 (HSUDC_EP_CTRL_BASE_REG(0) + 0x1)
+#define HSUDC_EP0_CS_REG8 (HSUDC_EP_CTRL_BASE_REG(0) + 0x2)
+#define HSUDC_EP0_LPMCTRL_REG16 (HSUDC_EP_CTRL_BASE_REG(0) + 0x4)
+
+/* chgsetup bit indicates change of the contents of the setup data buffer */
+#define HSUDC_EP0_CS_CHGSETUP_MSK 0x80
+#define HSUDC_EP0_CS_DSTALL_MSK 0x10
+#define HSUDC_EP0_CS_OUTBSY_MSK 0x08 /**< read only bit */
+#define HSUDC_EP0_CS_INBSY_MSK 0x04 /**< read only bit */
+#define HSUDC_EP0_CS_HSNAK_MSK 0x02 /* read/write bit, device mode */
+#define HSUDC_EP0_CS_STALL_MSK 0x01 /* endpoint 0 stall bit, device mode */
+
+/* Host Initiated Resume Duration mask */
+#define HSUDC_EP0_LPMCTRL_HIRD_MSK 0x00F0
+/* Host Initiated Resume Duration offset */
+#define HSUDC_EP0_LPMCTRL_HIRD_OFFSET 0x0004
+/* LPM bRemoteWakeup register */
+#define HSUDC_EP0_LPMCTRL_BREMOTEWAKEUP_MSK 0x0100
+/* It reflects value of the lpmnyet bit located in the usbcs(1) register. */
+#define HSUDC_EP0_LPMCTRL_LPMNYET_MSK 0x8000
+
+/* Use special HSUDC_EP0_XXXX for EP0 */
+#define HSUDC_EP_OUTBC_REG16(n) (HSUDC_EP_CTRL_BASE_REG(n) + 0x0)
+#define HSUDC_EP_OUTCON_REG8(n) (HSUDC_EP_CTRL_BASE_REG(n) + 0x2)
+#define HSUDC_EP_OUTCS_REG8(n) (HSUDC_EP_CTRL_BASE_REG(n) + 0x3)
+#define HSUDC_EP_INBC_REG16(n) (HSUDC_EP_CTRL_BASE_REG(n) + 0x4)
+#define HSUDC_EP_INCON_REG8(n) (HSUDC_EP_CTRL_BASE_REG(n) + 0x6)
+#define HSUDC_EP_INCS_REG8(n) (HSUDC_EP_CTRL_BASE_REG(n) + 0x7)
+
+#define HSUDC_EP_CON_BUF_SINGLE 0x00
+#define HSUDC_EP_CON_BUF_DOUBLE 0x01
+#define HSUDC_EP_CON_BUF_TRIPLE 0x02
+#define HSUDC_EP_CON_BUF_QUAD 0x03
+#define HSUDC_EP_CON_BUF_MSK 0x03
+#define HSUDC_EP_CON_TYPE_ISOCHRONOUS 0x04 /* "01" isochronous endpoint */
+#define HSUDC_EP_CON_TYPE_BULK 0x08 /* "10" bulk endpoint */
+#define HSUDC_EP_CON_TYPE_INTERRUPT 0x0C /* "11" interrupt endpoint */
+#define HSUDC_EP_CON_TYPE_MSK 0x0C
+#define HSUDC_EP_CON_STALL_MSK 0x40 /* OUT x endpoint stall bit */
+#define HSUDC_EP_CON_VAL_MSK 0x80 /* OUT x endpoint valid bit */
+
+/* read only bit, Data sequence error for ISO endpoints */
+#define HSUDC_EP_CS_ERR_MSK 0x01
+#define HSUDC_EP_CS_BUSY_MSK 0x02 /* OUT x endpoint busy bit */
+/* Number of received data packets that are stored in the OUT x buffer memory */
+#define HSUDC_EP_CS_NPAK0_MSK 0x12
+#define HSUDC_EP_CS_NPAK0_OFS 0x03
+#define HSUDC_EP_CS_AUTOOUT_MSK 0x10 /* Auto-OUT bit, device mode */
+
+/* FIFODAT Endpoints Registers */
+/* FIFODAT0 is not available */
+#define HSUDC_FIFODAT_REG32(n) (HSUDC_FIFODAT_BASE_REG + (0x4*(n)))
+
+/* Interrupts Request Registers */
+#define HSUDC_INIRQ_REG16 (HSUDC_IRQ_REQ_BASE_REG + 0x0)
+#define HSUDC_OUTIRQ_REG16 (HSUDC_IRQ_REQ_BASE_REG + 0x2)
+#define HSUDC_USBIRQ_REG8 (HSUDC_IRQ_REQ_BASE_REG + 0x4)
+#define HSUDC_OUT_PNGIRQ_REG16 (HSUDC_IRQ_REQ_BASE_REG + 0x6)
+#define HSUDC_IN_FULLIRQ_REG16 (HSUDC_IRQ_REQ_BASE_REG + 0x8)
+#define HSUDC_OUT_EMPTIRQ_REG16 (HSUDC_IRQ_REQ_BASE_REG + 0xA)
+
+/* SETUP data valid interrupt request, write 1 to clear */
+#define HSUDC_USBIRQ_SUDAV_MSK 0x01
+/* Start-of-frame interrupt request, write 1 to clear */
+#define HSUDC_USBIRQ_SOF_MSK 0x02
+/* SETUP token interrupt request, write 1 to clear */
+#define HSUDC_USBIRQ_SUTOK_MSK 0x04
+/* USB suspend interrupt request, write 1 to clear */
+#define HSUDC_USBIRQ_SUSP_MSK 0x08
+/* USB reset interrupt request, write 1 to clear */
+#define HSUDC_USBIRQ_URES_MSK 0x10
+/* USB high-speed mode interrupt request, write 1 to clear */
+#define HSUDC_USBIRQ_HSPPED_MSK 0x20
+/* Link Power Management interrupt request, write 1 to clear */
+#define HSUDC_USBIRQ_LPMIR_MSK 0x80
+
+/* Interrupts Enable Registers */
+#define HSUDC_INIEN_REG16 (HSUDC_IRQ_EN_BASE_REG + 0x0)
+#define HSUDC_OUTIEN_REG16 (HSUDC_IRQ_EN_BASE_REG + 0x2)
+#define HSUDC_USBIEN_REG8 (HSUDC_IRQ_EN_BASE_REG + 0x4)
+#define HSUDC_OUT_PNGIEN_REG16 (HSUDC_IRQ_EN_BASE_REG + 0x6)
+#define HSUDC_IN_FULLIEN_REG16 (HSUDC_IRQ_EN_BASE_REG + 0x8)
+#define HSUDC_OUT_EMPTIEN_REG16 (HSUDC_IRQ_EN_BASE_REG + 0xA)
+
+/* SETUP data valid interrupt enable, set this bit to enable interrupt */
+#define HSUDC_USBIEN_SUDAVIE_MSK 0x01
+/* Start-of-frame interrupt enable, set this bit to enable interrupt */
+#define HSUDC_USBIEN_SOFIE_MSK 0x02
+/* SETUP token interrupt enable, set this bit to enable interrupt */
+#define HSUDC_USBIEN_SUTOKIE_MSK 0x04
+/* USB suspend interrupt enable, set this bit to enable interrupt */
+#define HSUDC_USBIEN_SUSPIE_MSK 0x08
+/* USB reset interrupt enable, set this bit to enable interrupt */
+#define HSUDC_USBIEN_URESIE_MSK 0x10
+/* USB high speed mode interrupt enable, set this bit to enable interrupt */
+#define HSUDC_USBIEN_HSPIE_MSK 0x20
+/* Link Power Management interrupt request, set this bit to enable interrupt */
+#define HSUDC_USBIEN_LPMIE_MSK 0x80
+
+/* Interrupt Vector Registers */
+#define HSUDC_IVECT_REG8 (HSUDC_IRQ_VEC_BASE_REG + 0x0)
+#define HSUDC_FIFOIVECT_REG8 (HSUDC_IRQ_VEC_BASE_REG + 0x1)
+
+#define HSUDC_IVECT_SUDAV 0x00 /* usbirq(0)*/
+#define HSUDC_IVECT_SOF 0x04 /* usbirq(1)*/
+#define HSUDC_IVECT_SUTOK 0x08 /* usbirq(2)*/
+#define HSUDC_IVECT_SUSPEND 0x0C /* usbirq(3)*/
+#define HSUDC_IVECT_RESET 0x10 /* usbirq(4)*/
+#define HSUDC_IVECT_HSPEED 0x14 /* usbirq(6)*/
+#define HSUDC_IVECT_OVERFLOWIR 0x16 /* usbirq(6)*/
+#define HSUDC_IVECT_OTGIRQ 0xD8 /* OTG interrupt*/
+#define HSUDC_IVECT_LPMIRQ 0xDC /* LPM interrupt*/
+
+/* Error Interrupts Registers */
+#define HSUDC_ERRIRQ_IN_REG16 (HSUDC_IRQ_ERR_REG_BASE_REG + 0x4)
+#define HSUDC_ERRIRQ_OUT_REG16 (HSUDC_IRQ_ERR_REG_BASE_REG + 0x6)
+#define HSUDC_ERRIEN_IN_REG16 (HSUDC_IRQ_ERR_EN_BASE_REG + 0x0)
+#define HSUDC_ERRIEN_OUT_REG16 (HSUDC_IRQ_ERR_EN_BASE_REG + 0x2)
+
+/* Control and Status Registers */
+#define HSUDC_ENDPRST_REG8 (HSUDC_CTRL_STAT_BASE_REG + 0x0)
+#define HSUDC_USBCS_REG8 (HSUDC_CTRL_STAT_BASE_REG + 0x1)
+#define HSUDC_FRMNR_REG16 (HSUDC_CTRL_STAT_BASE_REG + 0x2)
+#define HSUDC_FNADDR_REG8 (HSUDC_CTRL_STAT_BASE_REG + 0x4)
+#define HSUDC_CLKGATE_REG8 (HSUDC_CTRL_STAT_BASE_REG + 0x5)
+#define HSUDC_FIFOCTRL_REG8 (HSUDC_CTRL_STAT_BASE_REG + 0x6)
+#define HSUDC_SPEEDCTRL_REG8 (HSUDC_CTRL_STAT_BASE_REG + 0x7)
+
+/* Direction bit '1' - IN endpoint selected, '0' - OUT endpoint selected */
+#define HSUDC_ENDPRST_IO_MSK 0x10
+#define HSUDC_ENDPRST_TOGRST_MSK 0x20 /**< Toggle reset bit */
+#define HSUDC_ENDPRST_FIFORST_MSK 0x40 /**< Fifo reset bit */
+/**< Read access: Data toggle value, Write access: Toggle set bit */
+#define HSUDC_ENDPRST_TOGSETQ_MSK 0x80
+
+/* Send NYET handshake for the LPM transaction */
+#define HSUDC_USBCS_LPMNYET_MSK 0x02
+/* Set Self Powered status bit */
+#define HSUDC_USBCS_SELFPWR_MSK 0x04
+/* read only bit, report remote wakeup enable from host */
+#define HSUDC_USBCS_RWAKEN_MSK 0x08
+/* Set enumeration from FSM */
+#define HSUDC_USBCS_ENUM_MSK 0x10
+
+/* Remote wakeup bit, device mode */
+#define HSUDC_USBCS_SIGRSUME_MSK 0x20
+/* Software disconnect bit, device mode */
+#define HSUDC_USBCS_DISCON_MSK 0x40
+/* Wakeup source. Wakesrc=1 indicates that a wakeup pin resumed the HC.
+ * The microprocessor resets this bit by writing a '1' to it
+ */
+#define HSUDC_USBCS_WAKESRC_MSK 0x80
+
+/* Direction bit '1' - IN endpoint selected, '0' - OUT endpoint selected */
+#define HSUDC_FIFOCTRL_IO_MSK 0x10
+#define HSUDC_FIFOCTRL_FIFOAUTO_MSK 0x20 /* FIFO auto bit */
+#define HSUDC_FIFOCTRL_FIFOCMIT_MSK 0x40 /* FIFO commit bit */
+#define HSUDC_FIFOCTRL_FIFOACC_MSK 0x80 /* FIFO access bit */
+
+#define HSUDC_SPEEDCTRL_FS_MSK 0x02 /* Enable Full-Speed */
+#define HSUDC_SPEEDCTRL_HS_MSK 0x04 /* Enable High-Speed */
+#define HSUDC_SPEEDCTRL_HSDIS_MSK 0x80 /* Disable High-Speed */
+
+/* EPs Maxpacket Registers */
+#define HSUDC_EP0_MAXPCK_REG8 (HSUDC_OUT_MAXPACKET_BASE_REG + 0x00)
+
+/* Use special HSUDC_EP0_MAXPCK for EP0 */
+#define HSUDC_EP_OUT_MAXPCK_REG16(n) (HSUDC_OUT_MAXPACKET_BASE_REG + 0x2*(n))
+#define HSUDC_EP_IN_MAXPCK_REG16(n) (HSUDC_IN_MAXPACKET_BASE_REG + 0x2*(n))
+
+/* EPs Start Address Registers */
+/* EP0 STARTADDR is not available */
+#define HSUDC_EP_OUT_STARTADDR_REG16(n) (HSUDC_OUT_STARTADDR_BASE_REG + 0x4*(n))
+#define HSUDC_EP_IN_STARTADDR_REG16(n) (HSUDC_IN_STARTADDR_BASE_REG + 0x4*(n))
+
+/* DMA Interrupt Registers */
+#define HSUDC_DMA_IEN_REG32 (HSUDC_DMA_IRQ_BASE_REG + 0x00)
+#define HSUDC_DMA_IRQ_REG32 (HSUDC_DMA_IRQ_BASE_REG + 0x04)
+#define HSUDC_DMA_SHORTIEN_REG32 (HSUDC_DMA_IRQ_BASE_REG + 0x08)
+#define HSUDC_DMA_SHORTIRQ_REG32 (HSUDC_DMA_IRQ_BASE_REG + 0x0C)
+#define HSUDC_DMA_IVECT_REG8 (HSUDC_DMA_IRQ_BASE_REG + 0x10)
+#define HSUDC_DMA_ERRORIEN_REG32 (HSUDC_DMA_IRQ_BASE_REG + 0x18)
+#define HSUDC_DMA_ERRORIRQ_REG32 (HSUDC_DMA_IRQ_BASE_REG + 0x1C)
+
+#define HSUDC_DMA_IVECT_CHANNEL_MASK 0x1F
+#define HSUDC_DMA_IVECT_SHORTPI_MASK 0x20
+#define HSUDC_DMA_IVECT_ERROR_MASK 0x80
+
+/* DMA Channels Registers */
+#define HSUDC_DMA_ADDR_REG32(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0x0)
+#define HSUDC_DMA_CNT_REG32(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0x4)
+#define HSUDC_DMA_MODE_REG8(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0x8)
+#define HSUDC_DMA_ECTRL_REG8(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0x9)
+#define HSUDC_DMA_WORK_REG8(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0xA)
+#define HSUDC_DMA_LINK_REG8(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0xB)
+#define HSUDC_DMA_ENDP_REG8(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0xC)
+#define HSUDC_DMA_BUSCTRL_REG8(n) (HSUDC_DMA_CHANNEL_BASE_REG(n) + 0xD)
+
+#define HSUDC_DMA_MODE_DIRECTION_IN 0x01 /* 1 for IN, 0 for OUT */
+#define HSUDC_DMA_MODE_ADDRESS_CONST 0x00 /* Constant address mode */
+#define HSUDC_DMA_MODE_ADDRESS_INC 0x02 /* Incremental address mode */
+#define HSUDC_DMA_MODE_ADDRESS_DEC 0x04 /* Decremental address mode */
+#define HSUDC_DMA_MODE_ADDRESS_MASK 0x06
+#define HSUDC_DMA_MODE_UNLIMITED 0x10 /* Unlimited transfer mode */
+#define HSUDC_DMA_MODE_SINGLE 0x20 /* Single packet tx mode */
+
+/**** Not Implemented External Transfer Request ****/
+
+#define HSUDC_DMA_WORK_START 1
+#define HSUDC_DMA_WORK_STOP 2
+#define HSUDC_DMA_WORK_RESET 4
+
+#define HSUDC_DMA_LINK_CHANNEL_MASK 0x1F
+#define HSUDC_DMA_LINK_EN 0x80
+
+#define HSUDC_DMA_ENDP_SHIFT 4
+#define HSUDC_DMA_ENDP_MASK 0xF0
+
+#define HSUDC_DMA_BUSCTRL_BIG_ENDIAN 1 /* 1 for big endian, 0 for little */
+#define HSUDC_DMA_BUSCTRL_HSIZE_8BIT (0 << 1) /* 8-bit data tranfer */
+#define HSUDC_DMA_BUSCTRL_HSIZE_16BIT (1 << 1) /* 16-bit data tranfer */
+#define HSUDC_DMA_BUSCTRL_HSIZE_32BIT (2 << 1) /* 32-bit data tranfer */
+#define HSUDC_DMA_BUSCTRL_HSIZE_MASK (3 << 1)
+#define HSUDC_DMA_BUSCTRL_BURST_SINGLE (0 << 4) /* Single tranfer */
+#define HSUDC_DMA_BUSCTRL_BURST_INC (1 << 4) /* Incrementing burst */
+#define HSUDC_DMA_BUSCTRL_BURST_4BEAT (2 << 4) /* 4beat incrementing burst */
+#define HSUDC_DMA_BUSCTRL_BURST_8BEAT (4 << 4) /* 8beat incrementing burst */
+#define HSUDC_DMA_BUSCTRL_BURST_16BEAT (6 << 4) /* 16beat incrementing burst */
+#define HSUDC_DMA_BUSCTRL_BURST_MASK (7 << 4)
+
+#endif /* HSUDC_UDC_REGS */
--
1.9.1
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/