/*	$NetBSD: dwc2.c,v 1.81 2024/04/05 18:57:10 riastradh Exp $	*/

/*-
 * Copyright (c) 2013 The NetBSD Foundation, Inc.
 * All rights reserved.
 *
 * This code is derived from software contributed to The NetBSD Foundation
 * by Nick Hudson
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.81 2024/04/05 18:57:10 riastradh Exp $");

#include "opt_usb.h"

#include <sys/param.h>

#include <sys/cpu.h>
#include <sys/device.h>
#include <sys/kernel.h>
#include <sys/kmem.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/select.h>
#include <sys/sysctl.h>
#include <sys/systm.h>

#include <machine/endian.h>

#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdivar.h>
#include <dev/usb/usb_mem.h>
#include <dev/usb/usbroothub.h>

#include <dwc2/dwc2.h>
#include <dwc2/dwc2var.h>

#include "dwc2_core.h"
#include "dwc2_hcd.h"

#ifdef DWC2_COUNTERS
#define	DWC2_EVCNT_ADD(a,b)	((void)((a).ev_count += (b)))
#else
#define	DWC2_EVCNT_ADD(a,b)	do { } while (/*CONSTCOND*/0)
#endif
#define	DWC2_EVCNT_INCR(a)	DWC2_EVCNT_ADD((a), 1)

#ifdef DWC2_DEBUG
#define	DPRINTFN(n,fmt,...) do {			\
	if (dwc2debug >= (n)) {			\
		printf("%s: " fmt,			\
		__FUNCTION__,## __VA_ARGS__);		\
	}						\
} while (0)
#define	DPRINTF(...)	DPRINTFN(1, __VA_ARGS__)
int dwc2debug = 0;

SYSCTL_SETUP(sysctl_hw_dwc2_setup, "sysctl hw.dwc2 setup")
{
	int err;
	const struct sysctlnode *rnode;
	const struct sysctlnode *cnode;

	err = sysctl_createv(clog, 0, NULL, &rnode,
	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "dwc2",
	    SYSCTL_DESCR("dwc2 global controls"),
	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);

	if (err)
		goto fail;

	/* control debugging printfs */
	err = sysctl_createv(clog, 0, &rnode, &cnode,
	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
	    "debug", SYSCTL_DESCR("Enable debugging output"),
	    NULL, 0, &dwc2debug, sizeof(dwc2debug), CTL_CREATE, CTL_EOL);
	if (err)
		goto fail;

	return;
fail:
	aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err);
}
#else
#define	DPRINTF(...) do { } while (0)
#define	DPRINTFN(...) do { } while (0)
#endif

Static usbd_status	dwc2_open(struct usbd_pipe *);
Static void		dwc2_poll(struct usbd_bus *);
Static void		dwc2_softintr(void *);

Static struct usbd_xfer *
			dwc2_allocx(struct usbd_bus *, unsigned int);
Static void		dwc2_freex(struct usbd_bus *, struct usbd_xfer *);
Static void		dwc2_get_lock(struct usbd_bus *, kmutex_t **);
Static bool		dwc2_dying(struct usbd_bus *);
Static int		dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *,
			    void *, int);

Static usbd_status	dwc2_root_intr_transfer(struct usbd_xfer *);
Static usbd_status	dwc2_root_intr_start(struct usbd_xfer *);
Static void		dwc2_root_intr_abort(struct usbd_xfer *);
Static void		dwc2_root_intr_close(struct usbd_pipe *);
Static void		dwc2_root_intr_done(struct usbd_xfer *);

Static usbd_status	dwc2_device_ctrl_transfer(struct usbd_xfer *);
Static usbd_status	dwc2_device_ctrl_start(struct usbd_xfer *);
Static void		dwc2_device_ctrl_abort(struct usbd_xfer *);
Static void		dwc2_device_ctrl_close(struct usbd_pipe *);
Static void		dwc2_device_ctrl_done(struct usbd_xfer *);

Static usbd_status	dwc2_device_bulk_transfer(struct usbd_xfer *);
Static void		dwc2_device_bulk_abort(struct usbd_xfer *);
Static void		dwc2_device_bulk_close(struct usbd_pipe *);
Static void		dwc2_device_bulk_done(struct usbd_xfer *);

Static usbd_status	dwc2_device_intr_transfer(struct usbd_xfer *);
Static usbd_status	dwc2_device_intr_start(struct usbd_xfer *);
Static void		dwc2_device_intr_abort(struct usbd_xfer *);
Static void		dwc2_device_intr_close(struct usbd_pipe *);
Static void		dwc2_device_intr_done(struct usbd_xfer *);

Static usbd_status	dwc2_device_isoc_transfer(struct usbd_xfer *);
Static void		dwc2_device_isoc_abort(struct usbd_xfer *);
Static void		dwc2_device_isoc_close(struct usbd_pipe *);
Static void		dwc2_device_isoc_done(struct usbd_xfer *);

Static usbd_status	dwc2_device_start(struct usbd_xfer *);

Static void		dwc2_close_pipe(struct usbd_pipe *);
Static void		dwc2_abortx(struct usbd_xfer *);

Static void		dwc2_device_clear_toggle(struct usbd_pipe *);
Static void		dwc2_noop(struct usbd_pipe *pipe);

Static int		dwc2_interrupt(struct dwc2_softc *);
Static void		dwc2_rhc(void *);


static inline void
dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
			    struct usbd_xfer *xfer)
{
}

static inline void
dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw,
			struct usbd_xfer *xfer)
{
}

Static const struct usbd_bus_methods dwc2_bus_methods = {
	.ubm_open =	dwc2_open,
	.ubm_softint =	dwc2_softintr,
	.ubm_dopoll =	dwc2_poll,
	.ubm_allocx =	dwc2_allocx,
	.ubm_freex =	dwc2_freex,
	.ubm_abortx =	dwc2_abortx,
	.ubm_dying =	dwc2_dying,
	.ubm_getlock =	dwc2_get_lock,
	.ubm_rhctrl =	dwc2_roothub_ctrl,
};

Static const struct usbd_pipe_methods dwc2_root_intr_methods = {
	.upm_transfer =	dwc2_root_intr_transfer,
	.upm_start =	dwc2_root_intr_start,
	.upm_abort =	dwc2_root_intr_abort,
	.upm_close =	dwc2_root_intr_close,
	.upm_cleartoggle =	dwc2_noop,
	.upm_done =	dwc2_root_intr_done,
};

Static const struct usbd_pipe_methods dwc2_device_ctrl_methods = {
	.upm_transfer =	dwc2_device_ctrl_transfer,
	.upm_start =	dwc2_device_ctrl_start,
	.upm_abort =	dwc2_device_ctrl_abort,
	.upm_close =	dwc2_device_ctrl_close,
	.upm_cleartoggle =	dwc2_noop,
	.upm_done =	dwc2_device_ctrl_done,
};

Static const struct usbd_pipe_methods dwc2_device_intr_methods = {
	.upm_transfer =	dwc2_device_intr_transfer,
	.upm_start =	dwc2_device_intr_start,
	.upm_abort =	dwc2_device_intr_abort,
	.upm_close =	dwc2_device_intr_close,
	.upm_cleartoggle =	dwc2_device_clear_toggle,
	.upm_done =	dwc2_device_intr_done,
};

Static const struct usbd_pipe_methods dwc2_device_bulk_methods = {
	.upm_transfer =	dwc2_device_bulk_transfer,
	.upm_abort =	dwc2_device_bulk_abort,
	.upm_close =	dwc2_device_bulk_close,
	.upm_cleartoggle =	dwc2_device_clear_toggle,
	.upm_done =	dwc2_device_bulk_done,
};

Static const struct usbd_pipe_methods dwc2_device_isoc_methods = {
	.upm_transfer =	dwc2_device_isoc_transfer,
	.upm_abort =	dwc2_device_isoc_abort,
	.upm_close =	dwc2_device_isoc_close,
	.upm_cleartoggle =	dwc2_noop,
	.upm_done =	dwc2_device_isoc_done,
};

struct usbd_xfer *
dwc2_allocx(struct usbd_bus *bus, unsigned int nframes)
{
	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
	struct dwc2_xfer *dxfer;

	DPRINTFN(10, "\n");

	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget);
	dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK);
	if (dxfer != NULL) {
		memset(dxfer, 0, sizeof(*dxfer));
		dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg,
		    nframes, GFP_KERNEL);
#ifdef DIAGNOSTIC
		dxfer->xfer.ux_state = XFER_BUSY;
#endif
	}
	return (struct usbd_xfer *)dxfer;
}

void
dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
{
	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
	struct dwc2_softc *sc = DWC2_BUS2SC(bus);

	DPRINTFN(10, "\n");

#ifdef DIAGNOSTIC
	if (xfer->ux_state != XFER_BUSY &&
	    xfer->ux_status != USBD_NOT_STARTED) {
		DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->ux_state);
	}
	xfer->ux_state = XFER_FREE;
#endif
	DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput);
	dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, dxfer->urb->packet_count);
	pool_cache_put(sc->sc_xferpool, xfer);
}

Static bool
dwc2_dying(struct usbd_bus *bus)
{
	struct dwc2_softc *sc = DWC2_BUS2SC(bus);

	return sc->sc_dying;
}

Static void
dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock)
{
	struct dwc2_softc *sc = DWC2_BUS2SC(bus);

	*lock = &sc->sc_lock;
}

Static void
dwc2_rhc(void *addr)
{
	struct dwc2_softc *sc = addr;
	struct usbd_xfer *xfer;
	u_char *p;

	DPRINTF("\n");
	mutex_enter(&sc->sc_lock);
	xfer = sc->sc_intrxfer;

	if (xfer == NULL) {
		/* Just ignore the change. */
		mutex_exit(&sc->sc_lock);
		return;

	}
	KASSERT(xfer->ux_status == USBD_IN_PROGRESS);

	/* set port bit */
	p = KERNADDR(&xfer->ux_dmabuf, 0);

	p[0] = 0x02;	/* we only have one port (1 << 1) */

	xfer->ux_actlen = xfer->ux_length;
	xfer->ux_status = USBD_NORMAL_COMPLETION;

	usb_transfer_complete(xfer);
	mutex_exit(&sc->sc_lock);
}

Static void
dwc2_softintr(void *v)
{
	struct usbd_bus *bus = v;
	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
	struct dwc2_xfer *dxfer, *next;
	TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed);

	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));

	/*
	 * Grab all the xfers that have not been aborted or timed out.
	 * Do so under a single lock -- without dropping it to run
	 * usb_transfer_complete as we go -- so that dwc2_abortx won't
	 * remove next out from under us during iteration when we've
	 * dropped the lock.
	 */
	mutex_spin_enter(&hsotg->lock);
	TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) {
		if (!usbd_xfer_trycomplete(&dxfer->xfer))
			/*
			 * The hard interrput handler decided to
			 * complete the xfer, and put it on sc_complete
			 * to pass it to us in the soft interrupt
			 * handler, but in the time between hard
			 * interrupt and soft interrupt, the xfer was
			 * aborted or timed out and we lost the race.
			 */
			continue;
		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
		KASSERT(dxfer->intr_status != USBD_CANCELLED);
		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
		TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
		TAILQ_INSERT_TAIL(&claimed, dxfer, xnext);
	}
	mutex_spin_exit(&hsotg->lock);

	/* Now complete them.  */
	while (!TAILQ_EMPTY(&claimed)) {
		dxfer = TAILQ_FIRST(&claimed);
		KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS);
		KASSERT(dxfer->intr_status != USBD_CANCELLED);
		KASSERT(dxfer->intr_status != USBD_TIMEOUT);
		TAILQ_REMOVE(&claimed, dxfer, xnext);

		dxfer->xfer.ux_status = dxfer->intr_status;
		usb_transfer_complete(&dxfer->xfer);
	}
}

usbd_status
dwc2_open(struct usbd_pipe *pipe)
{
	struct usbd_device *dev = pipe->up_dev;
	struct dwc2_softc *sc = DWC2_PIPE2SC(pipe);
	struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe);
	usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc;
	uint8_t addr = dev->ud_addr;
	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);

	DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype,
	    UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out");

	if (sc->sc_dying) {
		return USBD_IOERROR;
	}

	if (addr == dev->ud_bus->ub_rhaddr) {
		switch (ed->bEndpointAddress) {
		case USB_CONTROL_ENDPOINT:
			pipe->up_methods = &roothub_ctrl_methods;
			break;
		case UE_DIR_IN | USBROOTHUB_INTR_ENDPT:
			pipe->up_methods = &dwc2_root_intr_methods;
			break;
		default:
			DPRINTF("bad bEndpointAddress 0x%02x\n",
			    ed->bEndpointAddress);
			return USBD_INVAL;
		}
		DPRINTF("root hub pipe open\n");
		return USBD_NORMAL_COMPLETION;
	}

	switch (xfertype) {
	case UE_CONTROL:
		pipe->up_methods = &dwc2_device_ctrl_methods;
		int err = usb_allocmem(sc->sc_bus.ub_dmatag, sizeof(usb_device_request_t),
		    0, USBMALLOC_COHERENT, &dpipe->req_dma);
		if (err)
			return USBD_NOMEM;
		break;
	case UE_INTERRUPT:
		pipe->up_methods = &dwc2_device_intr_methods;
		break;
	case UE_ISOCHRONOUS:
		pipe->up_serialise = false;
		pipe->up_methods = &dwc2_device_isoc_methods;
		break;
	case UE_BULK:
		pipe->up_serialise = false;
		pipe->up_methods = &dwc2_device_bulk_methods;
		break;
	default:
		DPRINTF("bad xfer type %d\n", xfertype);
		return USBD_INVAL;
	}

	/* QH */
	dpipe->priv = NULL;

	return USBD_NORMAL_COMPLETION;
}

Static void
dwc2_poll(struct usbd_bus *bus)
{
	struct dwc2_softc *sc = DWC2_BUS2SC(bus);
	struct dwc2_hsotg *hsotg = sc->sc_hsotg;

	mutex_spin_enter(&hsotg->lock);
	dwc2_interrupt(sc);
	mutex_spin_exit(&hsotg->lock);
}

/*
 * Close a reqular pipe.
 * Assumes that there are no pending transactions.
 */
Static void
dwc2_close_pipe(struct usbd_pipe *pipe)
{
	struct dwc2_softc *sc __diagused = pipe->up_dev->ud_bus->ub_hcpriv;

	KASSERT(mutex_owned(&sc->sc_lock));
}

/*
 * Abort a device request.
 */
Static void
dwc2_abortx(struct usbd_xfer *xfer)
{
	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
	struct dwc2_xfer *d;
	int err;

	DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe,
	    xfer->ux_status);

	KASSERT(mutex_owned(&sc->sc_lock));
	ASSERT_SLEEPABLE();

	KASSERTMSG((xfer->ux_status == USBD_CANCELLED ||
		xfer->ux_status == USBD_TIMEOUT),
	    "bad abort status: %d", xfer->ux_status);

	mutex_spin_enter(&hsotg->lock);

	/*
	 * Check whether we aborted or timed out after the hardware
	 * completion interrupt determined that it's done but before
	 * the soft interrupt could actually complete it.  If so, it's
	 * too late for the soft interrupt -- at this point we've
	 * already committed to abort it or time it out, so we need to
	 * take it off the softint's list of work in case the caller,
	 * say, frees the xfer before the softint runs.
	 *
	 * This logic is unusual among host controller drivers, and
	 * happens because dwc2 decides to complete xfers in the hard
	 * interrupt handler rather than in the soft interrupt handler,
	 * but usb_transfer_complete must be deferred to softint -- and
	 * we happened to swoop in between the hard interrupt and the
	 * soft interrupt.  Other host controller drivers do almost all
	 * processing in the softint so there's no intermediate stage.
	 *
	 * Fortunately, this linear search to discern the intermediate
	 * stage is not likely to be a serious performance impact
	 * because it happens only on abort or timeout.
	 */
	TAILQ_FOREACH(d, &sc->sc_complete, xnext) {
		if (d == dxfer) {
			TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext);
			break;
		}
	}

	/*
	 * If we're dying, skip the hardware action and just notify the
	 * software that we're done.
	 */
	if (sc->sc_dying) {
		DPRINTFN(4, "xfer %p dying 0x%08x", xfer, xfer->ux_status);
		goto dying;
	}

	/*
	 * Handle the hardware.
	 */
	err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb);
	if (err) {
		DPRINTF("dwc2_hcd_urb_dequeue failed\n");
	}

dying:
	mutex_spin_exit(&hsotg->lock);
	KASSERT(mutex_owned(&sc->sc_lock));
}

Static void
dwc2_noop(struct usbd_pipe *pipe)
{

}

Static void
dwc2_device_clear_toggle(struct usbd_pipe *pipe)
{

	DPRINTF("toggle %d -> 0", pipe->up_endpoint->ue_toggle);
}

/***********************************************************************/

Static int
dwc2_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req,
    void *buf, int buflen)
{
	struct dwc2_softc *sc = bus->ub_hcpriv;
	usbd_status err = USBD_IOERROR;
	uint16_t len, value, index;
	int totlen = 0;

	if (sc->sc_dying)
		return -1;

	DPRINTFN(4, "type=0x%02x request=%02x\n",
	    req->bmRequestType, req->bRequest);

	len = UGETW(req->wLength);
	value = UGETW(req->wValue);
	index = UGETW(req->wIndex);

#define C(x,y) ((x) | ((y) << 8))
	switch (C(req->bRequest, req->bmRequestType)) {
	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
		DPRINTFN(8, "wValue=0x%04x\n", value);

		if (len == 0)
			break;
		switch (value) {
#define sd ((usb_string_descriptor_t *)buf)
		case C(2, UDESC_STRING):
			/* Product */
			totlen = usb_makestrdesc(sd, len, "DWC2 root hub");
			break;
#undef sd
		default:
			/* default from usbroothub */
			return buflen;
		}
		break;

	case C(UR_GET_CONFIG, UT_READ_DEVICE):
	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
	case C(UR_GET_STATUS, UT_READ_INTERFACE):
	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
		/* default from usbroothub */
		DPRINTFN(4, "returning %d (usbroothub default)", buflen);

		return buflen;

	default:
		/* Hub requests */
		err = dwc2_hcd_hub_control(sc->sc_hsotg,
		    C(req->bRequest, req->bmRequestType), value, index,
		    buf, len);
		if (err) {
			return -1;
		}
		totlen = len;
	}

	return totlen;
}

Static usbd_status
dwc2_root_intr_transfer(struct usbd_xfer *xfer)
{

	DPRINTF("\n");

	/* Pipe isn't running, start first */
	return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
}

Static usbd_status
dwc2_root_intr_start(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);

	DPRINTF("\n");

	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));

	if (sc->sc_dying)
		return USBD_IOERROR;

	KASSERT(sc->sc_intrxfer == NULL);
	sc->sc_intrxfer = xfer;
	xfer->ux_status = USBD_IN_PROGRESS;

	return USBD_IN_PROGRESS;
}

/* Abort a root interrupt request. */
Static void
dwc2_root_intr_abort(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);

	DPRINTF("xfer=%p\n", xfer);

	KASSERT(mutex_owned(&sc->sc_lock));
	KASSERT(xfer->ux_pipe->up_intrxfer == xfer);

	/* If xfer has already completed, nothing to do here.  */
	if (sc->sc_intrxfer == NULL)
		return;

	/*
	 * Otherwise, sc->sc_intrxfer had better be this transfer.
	 * Cancel it.
	 */
	KASSERT(sc->sc_intrxfer == xfer);
	KASSERT(xfer->ux_status == USBD_IN_PROGRESS);
	xfer->ux_status = USBD_CANCELLED;
	usb_transfer_complete(xfer);
}

Static void
dwc2_root_intr_close(struct usbd_pipe *pipe)
{
	struct dwc2_softc *sc __diagused = DWC2_PIPE2SC(pipe);

	DPRINTF("\n");

	KASSERT(mutex_owned(&sc->sc_lock));

	/*
	 * Caller must guarantee the xfer has completed first, by
	 * closing the pipe only after normal completion or an abort.
	 */
	KASSERT(sc->sc_intrxfer == NULL);
}

Static void
dwc2_root_intr_done(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);

	DPRINTF("\n");

	/* Claim the xfer so it doesn't get completed again.  */
	KASSERT(sc->sc_intrxfer == xfer);
	KASSERT(xfer->ux_status != USBD_IN_PROGRESS);
	sc->sc_intrxfer = NULL;
}

/***********************************************************************/

Static usbd_status
dwc2_device_ctrl_transfer(struct usbd_xfer *xfer)
{

	DPRINTF("\n");

	/* Pipe isn't running, start first */
	return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
}

Static usbd_status
dwc2_device_ctrl_start(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
	usbd_status err;

	DPRINTF("\n");

	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));

	xfer->ux_status = USBD_IN_PROGRESS;
	err = dwc2_device_start(xfer);
	if (err)
		return err;

	return USBD_IN_PROGRESS;
}

Static void
dwc2_device_ctrl_abort(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);

	KASSERT(mutex_owned(&sc->sc_lock));

	DPRINTF("xfer=%p\n", xfer);
	usbd_xfer_abort(xfer);
}

Static void
dwc2_device_ctrl_close(struct usbd_pipe *pipe)
{
	struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe);

	DPRINTF("pipe=%p\n", pipe);
	dwc2_close_pipe(pipe);

	usb_freemem(&dpipe->req_dma);
}

Static void
dwc2_device_ctrl_done(struct usbd_xfer *xfer)
{

	DPRINTF("xfer=%p\n", xfer);
}

/***********************************************************************/

Static usbd_status
dwc2_device_bulk_transfer(struct usbd_xfer *xfer)
{

	DPRINTF("xfer=%p\n", xfer);

	KASSERT(xfer->ux_status == USBD_NOT_STARTED);
	xfer->ux_status = USBD_IN_PROGRESS;
	return dwc2_device_start(xfer);
}

Static void
dwc2_device_bulk_abort(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);

	KASSERT(mutex_owned(&sc->sc_lock));

	DPRINTF("xfer=%p\n", xfer);
	usbd_xfer_abort(xfer);
}

Static void
dwc2_device_bulk_close(struct usbd_pipe *pipe)
{

	DPRINTF("pipe=%p\n", pipe);

	dwc2_close_pipe(pipe);
}

Static void
dwc2_device_bulk_done(struct usbd_xfer *xfer)
{

	DPRINTF("xfer=%p\n", xfer);
}

/***********************************************************************/

Static usbd_status
dwc2_device_intr_transfer(struct usbd_xfer *xfer)
{

	DPRINTF("xfer=%p\n", xfer);

	/* Pipe isn't running, start first */
	return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue));
}

Static usbd_status
dwc2_device_intr_start(struct usbd_xfer *xfer)
{
	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer)
	struct usbd_device *dev = dpipe->pipe.up_dev;
	struct dwc2_softc *sc = dev->ud_bus->ub_hcpriv;
	usbd_status err;

	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));

	xfer->ux_status = USBD_IN_PROGRESS;
	err = dwc2_device_start(xfer);
	if (err)
		return err;

	return USBD_IN_PROGRESS;
}

/* Abort a device interrupt request. */
Static void
dwc2_device_intr_abort(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);

	KASSERT(mutex_owned(&sc->sc_lock));

	DPRINTF("xfer=%p\n", xfer);
	usbd_xfer_abort(xfer);
}

Static void
dwc2_device_intr_close(struct usbd_pipe *pipe)
{

	DPRINTF("pipe=%p\n", pipe);

	dwc2_close_pipe(pipe);
}

Static void
dwc2_device_intr_done(struct usbd_xfer *xfer)
{

	DPRINTF("\n");
}

/***********************************************************************/

usbd_status
dwc2_device_isoc_transfer(struct usbd_xfer *xfer)
{

	DPRINTF("xfer=%p\n", xfer);

	KASSERT(xfer->ux_status == USBD_NOT_STARTED);
	xfer->ux_status = USBD_IN_PROGRESS;
	return dwc2_device_start(xfer);
}

void
dwc2_device_isoc_abort(struct usbd_xfer *xfer)
{
	struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer);
	KASSERT(mutex_owned(&sc->sc_lock));

	DPRINTF("xfer=%p\n", xfer);
	usbd_xfer_abort(xfer);
}

void
dwc2_device_isoc_close(struct usbd_pipe *pipe)
{
	DPRINTF("\n");

	dwc2_close_pipe(pipe);
}

void
dwc2_device_isoc_done(struct usbd_xfer *xfer)
{

	DPRINTF("\n");
}


usbd_status
dwc2_device_start(struct usbd_xfer *xfer)
{
 	struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer);
	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
	struct dwc2_softc *sc = DWC2_XFER2SC(xfer);
	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
	struct dwc2_hcd_urb *dwc2_urb;

	struct usbd_device *dev = xfer->ux_pipe->up_dev;
	usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc;
	uint8_t addr = dev->ud_addr;
	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
	uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress);
	uint8_t dir = UE_GET_DIR(ed->bEndpointAddress);
	uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize));
	uint32_t len;

	uint32_t flags = 0;
	uint32_t off = 0;
	int retval, err;
	int alloc_bandwidth = 0;

	DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->ux_pipe);

	KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock));

	if (xfertype == UE_ISOCHRONOUS ||
	    xfertype == UE_INTERRUPT) {
		mutex_spin_enter(&hsotg->lock);
		if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer))
			alloc_bandwidth = 1;
		mutex_spin_exit(&hsotg->lock);
	}

	/*
	 * For Control pipe the direction is from the request, all other
	 * transfers have been set correctly at pipe open time.
	 */
	if (xfertype == UE_CONTROL) {
		usb_device_request_t *req = &xfer->ux_request;

		DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x "
		    "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d "
		    "mps=%d\n",
		    xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue),
		    UGETW(req->wIndex), UGETW(req->wLength), dev->ud_addr,
		    epnum, dir == UT_READ ? "in" :"out", dev->ud_speed, mps);

		/* Copy request packet to our DMA buffer */
		memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req));
		usb_syncmem(&dpipe->req_dma, 0, sizeof(*req),
		    BUS_DMASYNC_PREWRITE);
		len = UGETW(req->wLength);
		if ((req->bmRequestType & UT_READ) == UT_READ) {
			dir = UE_DIR_IN;
		} else {
			dir = UE_DIR_OUT;
		}

		DPRINTFN(3, "req = %p dma = %" PRIxBUSADDR " len %d dir %s\n",
		    KERNADDR(&dpipe->req_dma, 0), DMAADDR(&dpipe->req_dma, 0),
		    len, dir == UE_DIR_IN ? "in" : "out");
	} else if (xfertype == UE_ISOCHRONOUS) {
		DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d,"
		    " mps=%d dir %s\n", xfer, xfer->ux_nframes, xfer->ux_flags, addr,
		    epnum, mps, dir == UT_READ ? "in" :"out");

#ifdef DIAGNOSTIC
		len = 0;
		for (size_t i = 0; i < xfer->ux_nframes; i++)
			len += xfer->ux_frlengths[i];
		if (len != xfer->ux_length)
			panic("len (%d) != xfer->ux_length (%d)", len,
			    xfer->ux_length);
#endif
		len = xfer->ux_length;
	} else {
		DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d,"
		    " mps=%d dir %s\n", xfer, xfer->ux_length, xfer->ux_flags, addr,
		    epnum, mps, dir == UT_READ ? "in" :"out");

		len = xfer->ux_length;
	}

	dwc2_urb = dxfer->urb;
	if (!dwc2_urb)
		return USBD_NOMEM;

	KASSERT(dwc2_urb->packet_count == xfer->ux_nframes);
	memset(dwc2_urb, 0, sizeof(*dwc2_urb) +
	    sizeof(dwc2_urb->iso_descs[0]) * dwc2_urb->packet_count);

	dwc2_urb->priv = xfer;
	dwc2_urb->packet_count = xfer->ux_nframes;

	dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir,
	    mps);

	if (xfertype == UE_CONTROL) {
		dwc2_urb->setup_usbdma = &dpipe->req_dma;
		dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0);
		dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0);
	} else {
		/* XXXNH - % mps required? */
		if ((xfer->ux_flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0)
		    flags |= URB_SEND_ZERO_PACKET;
	}
	flags |= URB_GIVEBACK_ASAP;

	/*
	 * control transfers with no data phase don't touch usbdma, but
	 * everything else does.
	 */
	if (!(xfertype == UE_CONTROL && len == 0)) {
		dwc2_urb->usbdma = &xfer->ux_dmabuf;
		dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0);
		dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0);

		usb_syncmem(&xfer->ux_dmabuf, 0, len,
		    dir == UE_DIR_IN ?
			BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
 	}
	dwc2_urb->length = len;
 	dwc2_urb->flags = flags;
	dwc2_urb->status = -EINPROGRESS;

	if (xfertype == UE_INTERRUPT ||
	    xfertype == UE_ISOCHRONOUS) {
		uint16_t ival;

		if (xfertype == UE_INTERRUPT &&
		    dpipe->pipe.up_interval != USBD_DEFAULT_INTERVAL) {
			ival = dpipe->pipe.up_interval;
		} else {
			ival = ed->bInterval;
		}

		if (ival < 1) {
			retval = -ENODEV;
			goto fail;
		}
		if (dev->ud_speed == USB_SPEED_HIGH ||
		   (dev->ud_speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) {
			if (ival > 16) {
				/*
				 * illegal with HS/FS, but there were
				 * documentation bugs in the spec
				 */
				ival = 256;
			} else {
				ival = (1 << (ival - 1));
			}
		} else {
			if (xfertype == UE_INTERRUPT && ival < 10)
				ival = 10;
		}
		dwc2_urb->interval = ival;
	}

	/* XXXNH bring down from callers?? */
// 	mutex_enter(&sc->sc_lock);

	xfer->ux_actlen = 0;

	KASSERT(xfertype != UE_ISOCHRONOUS ||
	    xfer->ux_nframes <= dwc2_urb->packet_count);
	KASSERTMSG(xfer->ux_nframes == 0 || xfertype == UE_ISOCHRONOUS,
	    "nframes %d xfertype %d\n", xfer->ux_nframes, xfertype);

	off = 0;
	for (size_t i = 0; i < xfer->ux_nframes; ++i) {
		DPRINTFN(3, "xfer=%p frame=%zd offset=%d length=%d\n", xfer, i,
		    off, xfer->ux_frlengths[i]);

		dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off,
		    xfer->ux_frlengths[i]);
		off += xfer->ux_frlengths[i];
	}

	struct dwc2_qh *qh = dpipe->priv;
	struct dwc2_qtd *qtd;
	bool qh_allocated = false;

	/* Create QH for the endpoint if it doesn't exist */
	if (!qh) {
		qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, GFP_ATOMIC);
		if (!qh) {
			retval = -ENOMEM;
			goto fail;
		}
		dpipe->priv = qh;
		qh_allocated = true;
	}

	qtd = pool_cache_get(sc->sc_qtdpool, PR_NOWAIT);
	if (!qtd) {
		retval = -ENOMEM;
		goto fail1;
	}
	memset(qtd, 0, sizeof(*qtd));

	/* might need to check cpu_intr_p */
	mutex_spin_enter(&hsotg->lock);
	retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
	if (retval)
		goto fail2;
	xfer->ux_status = USBD_IN_PROGRESS;
	usbd_xfer_schedule_timeout(xfer);

	if (alloc_bandwidth) {
		dwc2_allocate_bus_bandwidth(hsotg,
				dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
				xfer);
	}
	mutex_spin_exit(&hsotg->lock);

	return USBD_IN_PROGRESS;

fail2:
	dwc2_urb->priv = NULL;
	mutex_spin_exit(&hsotg->lock);
	pool_cache_put(sc->sc_qtdpool, qtd);

fail1:
	if (qh_allocated) {
		dpipe->priv = NULL;
		dwc2_hcd_qh_free(hsotg, qh);
	}
fail:

	switch (retval) {
	case -EINVAL:
	case -ENODEV:
		err = USBD_INVAL;
		break;
	case -ENOMEM:
		err = USBD_NOMEM;
		break;
	default:
		err = USBD_IOERROR;
	}

	return err;

}

int dwc2_intr(void *p)
{
	struct dwc2_softc *sc = p;
	struct dwc2_hsotg *hsotg;
	int ret = 0;

	if (sc == NULL)
		return 0;

	hsotg = sc->sc_hsotg;
	mutex_spin_enter(&hsotg->lock);

	if (sc->sc_dying || !device_has_power(sc->sc_dev))
		goto done;

	if (sc->sc_bus.ub_usepolling) {
		uint32_t intrs;

		intrs = dwc2_read_core_intr(hsotg);
		DWC2_WRITE_4(hsotg, GINTSTS, intrs);
	} else {
		ret = dwc2_interrupt(sc);
	}

done:
	mutex_spin_exit(&hsotg->lock);

	return ret;
}

int
dwc2_interrupt(struct dwc2_softc *sc)
{
	int ret = 0;

	if (sc->sc_hcdenabled) {
		ret |= dwc2_handle_hcd_intr(sc->sc_hsotg);
	}

	ret |= dwc2_handle_common_intr(sc->sc_hsotg);

	return ret;
}

/***********************************************************************/

int
dwc2_detach(struct dwc2_softc *sc, int flags)
{
	int rv = 0;

	if (sc->sc_child != NULL)
		rv = config_detach(sc->sc_child, flags);

	return rv;
}

bool
dwc2_shutdown(device_t self, int flags)
{
	struct dwc2_softc *sc = device_private(self);

	sc = sc;

	return true;
}

void
dwc2_childdet(device_t self, device_t child)
{
	struct dwc2_softc *sc = device_private(self);

	sc = sc;
}

int
dwc2_activate(device_t self, enum devact act)
{
	struct dwc2_softc *sc = device_private(self);

	sc = sc;

	return 0;
}

bool
dwc2_resume(device_t dv, const pmf_qual_t *qual)
{
	struct dwc2_softc *sc = device_private(dv);

	sc = sc;

	return true;
}

bool
dwc2_suspend(device_t dv, const pmf_qual_t *qual)
{
	struct dwc2_softc *sc = device_private(dv);

	sc = sc;

	return true;
}

/***********************************************************************/
int
dwc2_init(struct dwc2_softc *sc)
{
	int err = 0;

	err = linux_workqueue_init();
	if (err)
		return err;

	sc->sc_bus.ub_hcpriv = sc;
	sc->sc_bus.ub_revision = USBREV_2_0;
	sc->sc_bus.ub_methods = &dwc2_bus_methods;
	sc->sc_bus.ub_pipesize = sizeof(struct dwc2_pipe);
	sc->sc_bus.ub_usedma = true;
	sc->sc_hcdenabled = false;

	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB);

	TAILQ_INIT(&sc->sc_complete);

	sc->sc_rhc_si = softint_establish(SOFTINT_USB | SOFTINT_MPSAFE,
	    dwc2_rhc, sc);

	sc->sc_xferpool = pool_cache_init(sizeof(struct dwc2_xfer), 0, 0, 0,
	    "dwc2xfer", NULL, IPL_USB, NULL, NULL, NULL);
	sc->sc_qhpool = pool_cache_init(sizeof(struct dwc2_qh), 0, 0, 0,
	    "dwc2qh", NULL, IPL_USB, NULL, NULL, NULL);
	sc->sc_qtdpool = pool_cache_init(sizeof(struct dwc2_qtd), 0, 0, 0,
	    "dwc2qtd", NULL, IPL_USB, NULL, NULL, NULL);

	sc->sc_hsotg = kmem_zalloc(sizeof(struct dwc2_hsotg), KM_SLEEP);
	sc->sc_hsotg->hsotg_sc = sc;
	sc->sc_hsotg->dev = sc->sc_dev;
	sc->sc_hcdenabled = true;

	struct dwc2_hsotg *hsotg = sc->sc_hsotg;
	struct dwc2_core_params defparams;
	int retval;

	if (sc->sc_params == NULL) {
		/* Default all params to autodetect */
		dwc2_set_all_params(&defparams, -1);
		sc->sc_params = &defparams;

		/*
		 * Disable descriptor dma mode by default as the HW can support
		 * it, but does not support it for SPLIT transactions.
		 */
		defparams.dma_desc_enable = 0;
	}
	hsotg->dr_mode = USB_DR_MODE_HOST;

	/* Detect config values from hardware */
	retval = dwc2_get_hwparams(hsotg);
	if (retval) {
		goto fail2;
	}

	hsotg->core_params = kmem_zalloc(sizeof(*hsotg->core_params), KM_SLEEP);
	dwc2_set_all_params(hsotg->core_params, -1);

	/* Validate parameter values */
	dwc2_set_parameters(hsotg, sc->sc_params);

#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
    IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
	if (hsotg->dr_mode != USB_DR_MODE_HOST) {
		retval = dwc2_gadget_init(hsotg);
		if (retval)
			goto fail2;
		hsotg->gadget_enabled = 1;
	}
#endif
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \
    IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
	if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) {
		retval = dwc2_hcd_init(hsotg);
		if (retval) {
			if (hsotg->gadget_enabled)
				dwc2_hsotg_remove(hsotg);
			goto fail2;
		}
	    hsotg->hcd_enabled = 1;
        }
#endif

	uint32_t snpsid = hsotg->hw_params.snpsid;
	aprint_verbose_dev(sc->sc_dev, "Core Release: %x.%x%x%x (snpsid=%x)\n",
	    snpsid >> 12 & 0xf, snpsid >> 8 & 0xf,
	    snpsid >> 4 & 0xf, snpsid & 0xf, snpsid);

	return 0;

fail2:
	err = -retval;
	kmem_free(sc->sc_hsotg, sizeof(struct dwc2_hsotg));
	softint_disestablish(sc->sc_rhc_si);

	return err;
}

#if 0
/*
 * curmode is a mode indication bit 0 = device, 1 = host
 */
static const char * const intnames[32] = {
	"curmode",	"modemis",	"otgint",	"sof",
	"rxflvl",	"nptxfemp",	"ginnakeff",	"goutnakeff",
	"ulpickint",	"i2cint",	"erlysusp",	"usbsusp",
	"usbrst",	"enumdone",	"isooutdrop",	"eopf",
	"restore_done",	"epmis",	"iepint",	"oepint",
	"incompisoin",	"incomplp",	"fetsusp",	"resetdet",
	"prtint",	"hchint",	"ptxfemp",	"lpm",
	"conidstschng",	"disconnint",	"sessreqint",	"wkupint"
};


/***********************************************************************/

#endif

void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr,
			int *hub_port)
{
	struct usbd_xfer *xfer = context;
	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
	struct usbd_device *dev = dpipe->pipe.up_dev;

	*hub_addr = dev->ud_myhsport->up_parent->ud_addr;
 	*hub_port = dev->ud_myhsport->up_portno;
}

int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context)
{
	struct usbd_xfer *xfer = context;
	struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);
	struct usbd_device *dev = dpipe->pipe.up_dev;

	return dev->ud_speed;
}

/*
 * Sets the final status of an URB and returns it to the upper layer. Any
 * required cleanup of the URB is performed.
 *
 * Must be called with interrupt disabled and spinlock held
 */
void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
    int status)
{
	struct usbd_xfer *xfer;
	struct dwc2_xfer *dxfer;
	struct dwc2_softc *sc;
	usb_endpoint_descriptor_t *ed;
	uint8_t xfertype;

	KASSERT(mutex_owned(&hsotg->lock));

	if (!qtd) {
		dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
		return;
	}

	if (!qtd->urb) {
		dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
		return;
	}

	xfer = qtd->urb->priv;
	if (!xfer) {
		dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
		return;
	}

	dxfer = DWC2_XFER2DXFER(xfer);
	sc = DWC2_XFER2SC(xfer);
	ed = xfer->ux_pipe->up_endpoint->ue_edesc;
	xfertype = UE_GET_XFERTYPE(ed->bmAttributes);

	struct dwc2_hcd_urb *urb = qtd->urb;
	xfer->ux_actlen = dwc2_hcd_urb_get_actual_length(urb);

	DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->ux_actlen);

	if (xfertype == UE_ISOCHRONOUS) {
		xfer->ux_actlen = 0;
		for (size_t i = 0; i < xfer->ux_nframes; ++i) {
			xfer->ux_frlengths[i] =
				dwc2_hcd_urb_get_iso_desc_actual_length(
						urb, i);
			DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i,
			    xfer->ux_frlengths[i]);
			xfer->ux_actlen += xfer->ux_frlengths[i];
		}
		DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->ux_actlen);
	}

	if (xfertype == UE_ISOCHRONOUS && dbg_perio()) {
		for (size_t i = 0; i < xfer->ux_nframes; i++)
			dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n",
				 i, urb->iso_descs[i].status);
	}

	if (!status) {
		if (!(xfer->ux_flags & USBD_SHORT_XFER_OK) &&
		    xfer->ux_actlen < xfer->ux_length)
			status = -EIO;
	}

	switch (status) {
	case 0:
		dxfer->intr_status = USBD_NORMAL_COMPLETION;
		break;
	case -EPIPE:
		dxfer->intr_status = USBD_STALLED;
		break;
	case -EPROTO:
		dxfer->intr_status = USBD_INVAL;
		break;
	case -EIO:
		dxfer->intr_status = USBD_IOERROR;
		break;
	case -EOVERFLOW:
		dxfer->intr_status = USBD_IOERROR;
		break;
	default:
		dxfer->intr_status = USBD_IOERROR;
		printf("%s: unknown error status %d\n", __func__, status);
	}

	if (dxfer->intr_status == USBD_NORMAL_COMPLETION) {
		/*
		 * control transfers with no data phase don't touch dmabuf, but
		 * everything else does.
		 */
		if (!(xfertype == UE_CONTROL &&
		    UGETW(xfer->ux_request.wLength) == 0) &&
		    xfer->ux_actlen > 0	/* XXX PR/53503 */
		    ) {
			int rd = usbd_xfer_isread(xfer);

			usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_actlen,
			    rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
		}
	}

	if (xfertype == UE_ISOCHRONOUS ||
	    xfertype == UE_INTERRUPT) {
		struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer);

		dwc2_free_bus_bandwidth(hsotg,
					dwc2_hcd_get_ep_bandwidth(hsotg, dpipe),
					xfer);
	}

	qtd->urb = NULL;
	KASSERT(mutex_owned(&hsotg->lock));

	TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext);

	mutex_spin_exit(&hsotg->lock);
	usb_schedsoftintr(&sc->sc_bus);
	mutex_spin_enter(&hsotg->lock);
}


int
_dwc2_hcd_start(struct dwc2_hsotg *hsotg)
{
	dev_dbg(hsotg->dev, "DWC OTG HCD START\n");

	mutex_spin_enter(&hsotg->lock);

	hsotg->lx_state = DWC2_L0;

	if (dwc2_is_device_mode(hsotg)) {
		mutex_spin_exit(&hsotg->lock);
		return 0;	/* why 0 ?? */
	}

	dwc2_hcd_reinit(hsotg);

	mutex_spin_exit(&hsotg->lock);
	return 0;
}

int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg)
{

	return false;
}