2 * MUSB OTG driver peripheral support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/list.h>
37 #include <linux/timer.h>
38 #include <linux/module.h>
39 #include <linux/smp.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
42 #include <linux/moduleparam.h>
43 #include <linux/stat.h>
44 #include <linux/dma-mapping.h>
46 #include "musb_core.h"
49 /* MUSB PERIPHERAL status 3-mar-2006:
51 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
54 * + remote wakeup to Linux hosts work, but saw USBCV failures;
55 * in one test run (operator error?)
56 * + endpoint halt tests -- in both usbtest and usbcv -- seem
57 * to break when dma is enabled ... is something wrongly
60 * - Mass storage behaved ok when last tested. Network traffic patterns
61 * (with lots of short transfers etc) need retesting; they turn up the
62 * worst cases of the DMA, since short packets are typical but are not
66 * + both pio and dma behave in with network and g_zero tests
67 * + no cppi throughput issues other than no-hw-queueing
68 * + failed with FLAT_REG (DaVinci)
69 * + seems to behave with double buffering, PIO -and- CPPI
70 * + with gadgetfs + AIO, requests got lost?
73 * + both pio and dma behave in with network and g_zero tests
74 * + dma is slow in typical case (short_not_ok is clear)
75 * + double buffering ok with PIO
76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77 * + request lossage observed with gadgetfs
79 * - ISO not tested ... might work, but only weakly isochronous
81 * - Gadget driver disabling of softconnect during bind() is ignored; so
82 * drivers can't hold off host requests until userspace is ready.
83 * (Workaround: they can turn it off later.)
85 * - PORTABILITY (assumes PIO works):
86 * + DaVinci, basically works with cppi dma
87 * + OMAP 2430, ditto with mentor dma
88 * + TUSB 6010, platform-specific dma in the works
91 /* ----------------------------------------------------------------------- */
94 * Immediately complete a request.
96 * @param request the request to complete
97 * @param status the status to complete the request with
98 * Context: controller locked, IRQs blocked.
100 void musb_g_giveback(
102 struct usb_request *request,
104 __releases(ep->musb->lock)
105 __acquires(ep->musb->lock)
107 struct musb_request *req;
110 req = to_musb_request(request);
111 req->complete = false;
113 list_del(&request->list);
114 if (req->request.status == -EINPROGRESS)
115 req->request.status = status;
118 spin_unlock(&musb->lock);
119 if (request->status == 0) {
120 DBG(5, "%s done request %p, %d/%d\n",
121 ep->name, request, req->request.actual,
122 req->request.length);
124 DBG(2, "%s request %p, %d/%d fault %d\n",
126 req->request.actual, req->request.length,
128 req->request.complete(&req->ep->end_point, &req->request);
129 spin_lock(&musb->lock);
133 * start_dma - starts dma for a transfer
134 * @musb: musb controller pointer
135 * @epnum: endpoint number to kick dma
136 * @req: musb request to be received
138 * Context: controller locked, IRQs blocked, endpoint selected
140 static int start_dma(struct musb *musb, struct musb_request *req)
142 struct musb_ep *musb_ep = req->ep;
143 struct dma_controller *cntr = musb->dma_controller;
144 struct musb_hw_ep *hw_ep = musb_ep->hw_ep;
145 struct dma_channel *dma;
147 size_t transfer_size;
151 if (!musb->use_dma || musb->dma_controller == NULL)
154 if (musb_ep->type == USB_ENDPOINT_XFER_INT) {
155 DBG(5, "not allocating dma for interrupt endpoint\n");
159 if (((unsigned long) req->request.buf) & 0x01) {
160 DBG(5, "unaligned buffer %p for %s\n", req->request.buf,
165 packet_sz = musb_ep->packet_sz;
166 transfer_size = req->request.length;
168 if (transfer_size < packet_sz ||
169 (transfer_size == packet_sz && packet_sz < 512)) {
170 DBG(4, "small transfer, using pio\n");
174 epio = musb->endpoints[musb_ep->current_epnum].regs;
175 if (!musb_ep->is_in) {
176 csr = musb_readw(epio, MUSB_RXCSR);
178 /* If RXPKTRDY we might have something already waiting
179 * in the fifo. If that something is less than packet_sz
180 * it means we only have a short packet waiting in the fifo
181 * so we unload it with pio.
183 if (csr & MUSB_RXCSR_RXPKTRDY) {
186 count = musb_readw(epio, MUSB_RXCOUNT);
187 if (count < packet_sz) {
188 DBG(4, "small packet in FIFO (%d bytes), "
189 "using PIO\n", count);
195 dma = cntr->channel_alloc(cntr, hw_ep, musb_ep->is_in);
197 DBG(4, "unable to allocate dma channel for %s\n",
202 if (transfer_size > dma->max_len)
203 transfer_size = dma->max_len;
205 if (req->request.dma == DMA_ADDR_INVALID) {
206 req->request.dma = dma_map_single(musb->controller,
214 dma_sync_single_for_device(musb->controller,
217 musb_ep->is_in ? DMA_TO_DEVICE :
222 if (musb_ep->is_in) {
223 csr = musb_readw(epio, MUSB_TXCSR);
224 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE;
225 csr |= MUSB_TXCSR_AUTOSET | MUSB_TXCSR_MODE;
226 csr &= ~MUSB_TXCSR_P_UNDERRUN;
227 musb_writew(epio, MUSB_TXCSR, csr);
229 /* We only use mode1 dma and assume we never know the size of
230 * the data we're receiving. For anything else, we're gonna use
234 /* this special sequence is necessary to get DMAReq to
237 csr = musb_readw(epio, MUSB_RXCSR);
238 csr |= MUSB_RXCSR_AUTOCLEAR;
239 musb_writew(epio, MUSB_RXCSR, csr);
241 csr |= MUSB_RXCSR_DMAENAB;
242 musb_writew(epio, MUSB_RXCSR, csr);
244 csr |= MUSB_RXCSR_DMAMODE;
245 musb_writew(epio, MUSB_RXCSR, csr);
246 musb_writew(epio, MUSB_RXCSR, csr);
248 csr = musb_readw(epio, MUSB_RXCSR);
253 (void) cntr->channel_program(dma, packet_sz, true, req->request.dma,
256 DBG(4, "%s dma started (addr 0x%08x, len %u, CSR %04x)\n",
257 musb_ep->name, req->request.dma, transfer_size, csr);
263 * stop_dma - stops a dma transfer and unmaps a buffer
264 * @musb: the musb controller pointer
265 * @ep: the enpoint being used
266 * @req: the request to stop
268 static void stop_dma(struct musb *musb, struct musb_ep *ep,
269 struct musb_request *req)
273 DBG(4, "%s dma stopped (addr 0x%08x, len %d)\n", ep->name,
274 req->request.dma, req->request.actual);
277 dma_unmap_single(musb->controller, req->request.dma,
278 req->request.actual, req->tx ?
279 DMA_TO_DEVICE : DMA_FROM_DEVICE);
280 req->request.dma = DMA_ADDR_INVALID;
283 dma_sync_single_for_cpu(musb->controller, req->request.dma,
284 req->request.actual, req->tx ?
285 DMA_TO_DEVICE : DMA_FROM_DEVICE);
288 epio = musb->endpoints[ep->current_epnum].regs;
292 csr = musb_readw(epio, MUSB_TXCSR);
293 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_AUTOSET);
294 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_P_WZC_BITS);
295 csr &= ~MUSB_TXCSR_DMAMODE;
296 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_P_WZC_BITS);
300 csr = musb_readw(epio, MUSB_RXCSR);
301 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
302 musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_P_WZC_BITS);
303 csr &= ~MUSB_RXCSR_DMAMODE;
304 musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_P_WZC_BITS);
307 musb->dma_controller->channel_release(ep->dma);
312 * Abort requests queued to an endpoint using the status. Synchronous.
313 * caller locked controller and blocked irqs, and selected this ep.
315 static void nuke(struct musb_ep *ep, const int status)
318 struct musb_request *req = NULL;
322 epio = musb->endpoints[ep->current_epnum].regs;
325 DBG(2, "%s nuke, DMA %p RxCSR %04x TxCSR %04x\n", ep->name, ep->dma,
326 musb_readw(epio, MUSB_RXCSR), musb_readw(epio, MUSB_TXCSR));
328 struct dma_controller *c = musb->dma_controller;
330 BUG_ON(next_request(ep) == NULL);
331 req = to_musb_request(next_request(ep));
332 (void) c->channel_abort(ep->dma);
333 stop_dma(musb, ep, req);
338 csr = musb_readw(epio, MUSB_TXCSR);
339 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_DMAENAB
340 | MUSB_TXCSR_FLUSHFIFO);
341 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_FLUSHFIFO);
342 if (csr & MUSB_TXCSR_TXPKTRDY) {
343 /* If TxPktRdy was set, an extra IRQ was just
344 * generated. This IRQ will confuse things if
345 * a we don't handle it before a new TX request
346 * is started. So we clear it here, in a bit
347 * unsafe fashion (if nuke() is called outside
348 * musb_interrupt(), we might have a delay in
349 * handling other TX EPs.) */
350 musb->int_tx |= musb_readw(musb->mregs,
352 musb->int_tx &= ~(1 << ep->current_epnum);
355 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_DMAENAB
356 | MUSB_RXCSR_FLUSHFIFO);
357 musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_FLUSHFIFO);
361 musb_writew(epio, MUSB_TXCSR, 0);
363 musb_writew(epio, MUSB_RXCSR, 0);
365 ep->rx_pending = false;
367 while (!list_empty(&(ep->req_list))) {
368 req = container_of(ep->req_list.next, struct musb_request,
370 musb_g_giveback(ep, &req->request, status);
374 /* ----------------------------------------------------------------------- */
376 /* Data transfers - pure PIO, pure DMA, or mixed mode */
379 * This assumes the separate CPPI engine is responding to DMA requests
380 * from the usb core ... sequenced a bit differently from mentor dma.
383 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
385 if (can_bulk_split(musb, ep->type))
386 return ep->hw_ep->max_packet_sz_tx;
388 return ep->packet_sz;
392 * do_pio_tx - kicks TX pio transfer
393 * @musb: musb controller pointer
394 * @req: the request to be transfered via pio
396 * An endpoint is transmitting data. This can be called from
399 * Context: controller locked, IRQs blocked, endpoint selected
401 static void do_pio_tx(struct musb *musb, struct musb_request *req)
403 u8 epnum = req->epnum;
404 struct musb_ep *musb_ep;
405 void __iomem *epio = musb->endpoints[epnum].regs;
406 struct usb_request *request;
407 u16 fifo_count = 0, csr;
411 /* read TXCSR before */
412 csr = musb_readw(epio, MUSB_TXCSR);
414 request = &req->request;
416 fifo_count = min(max_ep_writesize(musb, musb_ep),
417 (int)(request->length - request->actual));
419 if (csr & MUSB_TXCSR_TXPKTRDY) {
420 DBG(5, "%s old packet still ready , txcsr %03x\n",
425 if (csr & MUSB_TXCSR_P_SENDSTALL) {
426 DBG(5, "%s stalling, txcsr %03x\n",
431 DBG(4, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
432 epnum, musb_ep->packet_sz, fifo_count,
435 musb_write_fifo(musb_ep->hw_ep, fifo_count,
436 (u8 *) (request->buf + request->actual));
437 request->actual += fifo_count;
438 csr |= MUSB_TXCSR_TXPKTRDY;
439 /* REVISIT wasn't this cleared by musb_g_tx() ? */
440 csr &= ~MUSB_TXCSR_P_UNDERRUN;
441 musb_writew(epio, MUSB_TXCSR, csr);
443 /* host may already have the data when this message shows... */
444 DBG(3, "%s TX/IN pio len %d/%d, txcsr %04x, fifo %d/%d\n",
446 request->actual, request->length,
447 musb_readw(epio, MUSB_TXCSR),
449 musb_readw(epio, MUSB_TXMAXP));
453 * Context: controller locked, IRQs blocked.
455 static void musb_ep_restart(struct musb *musb, struct musb_request *req)
457 DBG(3, "<== TX/IN request %p len %u on hw_ep%d%s\n",
458 &req->request, req->request.length, req->epnum,
459 req->ep->dma ? " (dma)" : "(pio)");
461 musb_ep_select(musb->mregs, req->epnum);
463 if (start_dma(musb, req) < 0)
464 do_pio_tx(musb, req);
468 * FIFO state update (e.g. data ready).
469 * Called from IRQ, with controller locked.
471 void musb_g_tx(struct musb *musb, u8 epnum)
474 struct musb_request *req;
475 struct usb_request *request;
476 u8 __iomem *mbase = musb->mregs;
477 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
478 void __iomem *epio = musb->endpoints[epnum].regs;
479 struct dma_channel *dma;
482 musb_ep_select(mbase, epnum);
483 request = next_request(musb_ep);
485 csr = musb_readw(epio, MUSB_TXCSR);
487 DBG(4, "<== %s, TxCSR %04x, DMA %p\n", musb_ep->name, csr, dma);
489 if (csr & MUSB_TXCSR_P_SENDSTALL) {
490 DBG(5, "%s stalling, txcsr %04x\n",
495 /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX
496 * probably rates reporting as a host error
498 if (csr & MUSB_TXCSR_P_SENTSTALL) {
499 DBG(5, "ep%d is halted, cannot transfer\n", epnum);
500 csr |= MUSB_TXCSR_P_WZC_BITS;
501 csr &= ~MUSB_TXCSR_P_SENTSTALL;
502 musb_writew(epio, MUSB_TXCSR, csr);
504 BUG_ON(request == NULL);
505 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
506 musb->dma_controller->channel_abort(dma);
507 stop_dma(musb, musb_ep, to_musb_request(request));
511 if (request && musb_ep->stalled)
512 musb_g_giveback(musb_ep, request, -EPIPE);
517 if (csr & MUSB_TXCSR_P_UNDERRUN) {
518 /* we NAKed, no big deal ... little reason to care */
519 csr |= MUSB_TXCSR_P_WZC_BITS;
520 csr &= ~MUSB_TXCSR_P_UNDERRUN;
521 musb_writew(epio, MUSB_TXCSR, csr);
522 DBG(2, "underrun on ep%d, req %p\n", epnum, request);
525 /* The interrupt is generated when this bit gets cleared,
526 * if we fall here while TXPKTRDY is still set, then that's
527 * a really messed up case. One such case seems to be due to
528 * the HW -- sometimes the IRQ is generated early.
531 while (csr & MUSB_TXCSR_TXPKTRDY) {
534 DBG(1, "TX IRQ while TxPktRdy still set "
535 "(CSR %04x)\n", csr);
538 csr = musb_readw(epio, MUSB_TXCSR);
541 if (dma != NULL && dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
542 /* SHOULD NOT HAPPEN ... has with cppi though, after
543 * changing SENDSTALL (and other cases); harmless?
545 DBG(3, "%s dma still busy?\n", musb_ep->name);
549 if (request == NULL) {
550 DBG(2, "%s, spurious TX IRQ", musb_ep->name);
554 req = to_musb_request(request);
557 int short_packet = 0;
559 BUG_ON(!(csr & MUSB_TXCSR_DMAENAB));
561 request->actual += dma->actual_len;
562 DBG(4, "TxCSR%d %04x, dma finished, len %zu, req %p\n",
563 epnum, csr, dma->actual_len, request);
565 stop_dma(musb, musb_ep, req);
567 WARN(request->actual != request->length,
568 "actual %d length %d\n", request->actual,
571 if (request->length % musb_ep->packet_sz)
574 req->complete = true;
575 if (request->zero || short_packet) {
576 csr = musb_readw(epio, MUSB_TXCSR);
577 DBG(4, "sending zero pkt, DMA, TxCSR %04x\n", csr);
578 musb_writew(epio, MUSB_TXCSR,
579 csr | MUSB_TXCSR_TXPKTRDY);
584 if (request->actual == request->length) {
585 if (!req->complete) {
586 /* Maybe we have to send a zero length packet */
587 if (request->zero && request->length &&
588 (request->length % musb_ep->packet_sz) == 0) {
589 csr = musb_readw(epio, MUSB_TXCSR);
590 DBG(4, "sending zero pkt, TxCSR %04x\n", csr);
591 musb_writew(epio, MUSB_TXCSR,
592 csr | MUSB_TXCSR_TXPKTRDY);
593 req->complete = true;
598 musb_g_giveback(musb_ep, request, 0);
601 request = musb_ep->desc ? next_request(musb_ep) : NULL;
603 DBG(4, "%s idle now\n", musb_ep->name);
606 musb_ep_restart(musb, to_musb_request(request));
610 do_pio_tx(musb, to_musb_request(request));
613 /* ------------------------------------------------------------ */
616 * do_pio_rx - kicks RX pio transfer
617 * @musb: musb controller pointer
618 * @req: the request to be transfered via pio
620 * Context: controller locked, IRQs blocked, endpoint selected
622 static void do_pio_rx(struct musb *musb, struct musb_request *req)
625 const u8 epnum = req->epnum;
626 struct usb_request *request = &req->request;
627 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
628 void __iomem *epio = musb->endpoints[epnum].regs;
629 unsigned fifo_count = 0;
630 u16 count = musb_ep->packet_sz;
633 csr = musb_readw(epio, MUSB_RXCSR);
635 /* RxPktRdy should be the only possibility here.
636 * Sometimes the IRQ is generated before
637 * RxPktRdy gets set, so we'll wait a while. */
638 while (!(csr & MUSB_RXCSR_RXPKTRDY)) {
639 if (retries-- == 0) {
640 DBG(1, "RxPktRdy did not get set (CSR %04x)\n", csr);
641 BUG_ON(!(csr & MUSB_RXCSR_RXPKTRDY));
643 csr = musb_readw(epio, MUSB_RXCSR);
648 count = musb_readw(epio, MUSB_RXCOUNT);
649 if (request->actual < request->length) {
650 fifo_count = request->length - request->actual;
651 DBG(3, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
656 fifo_count = min_t(unsigned, count, fifo_count);
658 musb_read_fifo(musb_ep->hw_ep, fifo_count,
659 (u8 *) (request->buf + request->actual));
660 request->actual += fifo_count;
662 /* REVISIT if we left anything in the fifo, flush
663 * it and report -EOVERFLOW
667 csr |= MUSB_RXCSR_P_WZC_BITS;
668 csr &= ~MUSB_RXCSR_RXPKTRDY;
669 musb_writew(epio, MUSB_RXCSR, csr);
674 /* we just received a short packet, it's ok to
675 * giveback() the request already
677 if (request->actual == request->length || count < musb_ep->packet_sz)
678 musb_g_giveback(musb_ep, request, 0);
682 * Data ready for a request; called from IRQ
684 void musb_g_rx(struct musb *musb, u8 epnum, bool is_dma)
687 struct musb_request *req;
688 struct usb_request *request;
689 void __iomem *mbase = musb->mregs;
690 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out;
691 void __iomem *epio = musb->endpoints[epnum].regs;
692 struct dma_channel *dma;
694 musb_ep_select(mbase, epnum);
696 csr = musb_readw(epio, MUSB_RXCSR);
699 DBG(3, "spurious IRQ\n");
703 request = next_request(musb_ep);
705 DBG(1, "waiting for request for %s (csr %04x)\n",
707 musb_ep->rx_pending = true;
713 DBG(4, "<== %s, rxcsr %04x %p (dma %s, %s)\n", musb_ep->name,
714 csr, request, dma ? "enabled" : "disabled",
715 is_dma ? "true" : "false");
717 if (csr & MUSB_RXCSR_P_SENTSTALL) {
718 DBG(5, "ep%d is halted, cannot transfer\n", epnum);
719 csr |= MUSB_RXCSR_P_WZC_BITS;
720 csr &= ~MUSB_RXCSR_P_SENTSTALL;
721 musb_writew(epio, MUSB_RXCSR, csr);
724 dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
725 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
726 musb->dma_controller->channel_abort(dma);
729 if (musb_ep->stalled)
730 musb_g_giveback(musb_ep, request, -EPIPE);
734 if (csr & MUSB_RXCSR_P_OVERRUN) {
735 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
736 csr &= ~MUSB_RXCSR_P_OVERRUN;
737 musb_writew(epio, MUSB_RXCSR, csr);
739 DBG(3, "%s iso overrun on %p\n", musb_ep->name, request);
740 if (request->status == -EINPROGRESS)
741 request->status = -EOVERFLOW;
744 if (csr & MUSB_RXCSR_INCOMPRX) {
745 /* REVISIT not necessarily an error */
746 DBG(4, "%s, incomprx\n", musb_ep->name);
749 req = to_musb_request(request);
751 BUG_ON(dma == NULL && (csr & MUSB_RXCSR_DMAENAB));
756 /* We don't handle stalls yet. */
757 BUG_ON(csr & MUSB_RXCSR_P_SENDSTALL);
759 /* We abort() so dma->actual_len gets updated */
760 musb->dma_controller->channel_abort(dma);
762 /* We only expect full packets. */
763 BUG_ON(dma->actual_len & (musb_ep->packet_sz - 1));
765 request->actual += dma->actual_len;
766 len = dma->actual_len;
768 stop_dma(musb, musb_ep, req);
771 DBG(4, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
772 epnum, csr, musb_readw(epio, MUSB_RXCSR), len, request);
775 /* Unload with pio */
776 do_pio_rx(musb, req);
778 BUG_ON(request->actual != request->length);
779 musb_g_giveback(musb_ep, request, 0);
784 if (dma == NULL && musb->use_dma) {
785 if (start_dma(musb, req) == 0)
790 do_pio_rx(musb, req);
791 csr = musb_readw(epio, MUSB_RXCSR);
792 if (csr & MUSB_RXCSR_RXPKTRDY) {
793 DBG(2, "new packet in FIFO, restarting RX "
794 "(CSR %04x)\n", csr);
800 /* ------------------------------------------------------------ */
802 static int musb_gadget_enable(struct usb_ep *ep,
803 const struct usb_endpoint_descriptor *desc)
806 struct musb_ep *musb_ep;
807 struct musb_hw_ep *hw_ep;
814 int status = -EINVAL;
819 DBG(1, "===> enabling %s\n", ep->name);
821 musb_ep = to_musb_ep(ep);
822 hw_ep = musb_ep->hw_ep;
824 musb = musb_ep->musb;
826 epnum = musb_ep->current_epnum;
828 spin_lock_irqsave(&musb->lock, flags);
834 musb_ep->type = usb_endpoint_type(desc);
836 /* check direction and (later) maxpacket size against endpoint */
837 if (usb_endpoint_num(desc) != epnum)
840 /* REVISIT this rules out high bandwidth periodic transfers */
841 tmp = le16_to_cpu(desc->wMaxPacketSize);
844 musb_ep->packet_sz = tmp;
846 /* enable the interrupts for the endpoint, set the endpoint
847 * packet size (or fail), set the mode, clear the fifo
849 musb_ep_select(mbase, epnum);
850 if (usb_endpoint_dir_in(desc)) {
851 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
853 if (hw_ep->is_shared_fifo)
857 if (tmp > hw_ep->max_packet_sz_tx)
860 int_txe |= (1 << epnum);
861 musb_writew(mbase, MUSB_INTRTXE, int_txe);
863 /* REVISIT if can_bulk_split(), use by updating "tmp";
864 * likewise high bandwidth periodic tx
866 musb_writew(regs, MUSB_TXMAXP, tmp);
868 /* clear DATAx toggle */
869 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
871 if (musb_readw(regs, MUSB_TXCSR)
872 & MUSB_TXCSR_FIFONOTEMPTY)
873 csr |= MUSB_TXCSR_FLUSHFIFO;
874 if (usb_endpoint_xfer_isoc(desc))
875 csr |= MUSB_TXCSR_P_ISO;
876 musb_writew(regs, MUSB_TXCSR, csr);
878 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
880 if (hw_ep->is_shared_fifo)
884 if (tmp > hw_ep->max_packet_sz_rx)
887 int_rxe |= (1 << epnum);
888 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
890 /* REVISIT if can_bulk_combine() use by updating "tmp"
891 * likewise high bandwidth periodic rx
893 musb_writew(regs, MUSB_RXMAXP, tmp);
895 /* force shared fifo to OUT-only mode */
896 if (hw_ep->is_shared_fifo) {
897 csr = musb_readw(regs, MUSB_TXCSR);
898 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
899 musb_writew(regs, MUSB_TXCSR, csr);
902 /* clear DATAx toggle */
903 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
905 if (usb_endpoint_xfer_isoc(desc))
906 csr |= MUSB_RXCSR_P_ISO;
907 else if (usb_endpoint_xfer_int(desc))
908 csr |= MUSB_RXCSR_DISNYET;
909 musb_writew(regs, MUSB_RXCSR, csr);
912 /* NOTE: all the I/O code _should_ work fine without DMA, in case
913 * for some reason you run out of channels here.
916 musb_ep->desc = desc;
920 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
921 musb_driver_name, musb_ep->name,
922 ({ char *s; switch (musb_ep->type) {
923 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
924 case USB_ENDPOINT_XFER_INT: s = "int"; break;
925 default: s = "iso"; break;
927 musb_ep->is_in ? "IN" : "OUT",
928 musb_ep->dma ? "dma, " : "",
931 schedule_work(&musb->irq_work);
934 musb_ep_select(mbase, 0);
935 spin_unlock_irqrestore(&musb->lock, flags);
940 * Disable an endpoint flushing all requests queued.
942 static int musb_gadget_disable(struct usb_ep *ep)
947 struct musb_ep *musb_ep;
951 musb_ep = to_musb_ep(ep);
952 DBG(4, "disabling %s\n", musb_ep->name);
953 musb = musb_ep->musb;
954 epnum = musb_ep->current_epnum;
955 epio = musb->endpoints[epnum].regs;
957 spin_lock_irqsave(&musb->lock, flags);
958 musb_ep_select(musb->mregs, epnum);
960 /* zero the endpoint sizes */
961 if (musb_ep->is_in) {
962 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
963 int_txe &= ~(1 << epnum);
964 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
965 musb_writew(epio, MUSB_TXMAXP, 0);
966 musb_writew(epio, MUSB_TXCSR, 0);
968 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
969 int_rxe &= ~(1 << epnum);
970 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
971 musb_writew(epio, MUSB_RXMAXP, 0);
972 musb_writew(epio, MUSB_RXCSR, 0);
975 musb_ep->desc = NULL;
977 /* abort all pending DMA and requests */
978 nuke(musb_ep, -ESHUTDOWN);
980 schedule_work(&musb->irq_work);
982 spin_unlock_irqrestore(&(musb->lock), flags);
984 DBG(2, "%s\n", musb_ep->name);
990 * Allocate a request for an endpoint.
991 * Reused by ep0 code.
993 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
995 struct musb_ep *musb_ep = to_musb_ep(ep);
996 struct musb *musb = musb_ep->musb;
997 struct musb_request *request = NULL;
999 request = kzalloc(sizeof *request, gfp_flags);
1001 dev_err(musb->controller, "not enough memory\n");
1005 INIT_LIST_HEAD(&request->request.list);
1006 request->request.dma = DMA_ADDR_INVALID;
1007 request->epnum = musb_ep->current_epnum;
1008 request->ep = musb_ep;
1010 return &request->request;
1015 * Reused by ep0 code.
1017 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1019 kfree(to_musb_request(req));
1022 static LIST_HEAD(buffers);
1024 struct free_record {
1025 struct list_head list;
1031 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1034 struct musb_ep *musb_ep;
1035 struct musb_request *request;
1038 unsigned long lockflags;
1045 musb_ep = to_musb_ep(ep);
1046 musb = musb_ep->musb;
1048 request = to_musb_request(req);
1049 request->musb = musb;
1051 if (request->ep != musb_ep)
1054 DBG(4, "<== to %s request %p length %d\n", ep->name, req, req->length);
1056 /* request is mine now... */
1057 request->request.actual = 0;
1058 request->request.status = -EINPROGRESS;
1059 request->epnum = musb_ep->current_epnum;
1060 request->tx = musb_ep->is_in;
1061 request->mapped = 0;
1063 spin_lock_irqsave(&musb->lock, lockflags);
1065 /* don't queue if the ep is down */
1066 if (!musb_ep->desc) {
1067 DBG(4, "req %p queued to %s while ep %s\n",
1068 req, ep->name, "disabled");
1069 status = -ESHUTDOWN;
1073 /* add request to the list */
1074 list_add_tail(&(request->request.list), &(musb_ep->req_list));
1076 /* we can only start i/o if this is the head of the queue and
1077 * endpoint is not stalled (halted) or busy
1079 if (!musb_ep->stalled && !musb_ep->busy &&
1080 &request->request.list == musb_ep->req_list.next &&
1082 DBG(1, "restarting\n");
1083 musb_ep_restart(musb, request);
1086 /* if we received an RX packet before the request was queued,
1087 * process it here. */
1088 if (!request->tx && musb_ep->rx_pending) {
1089 DBG(1, "processing pending RX\n");
1090 musb_ep->rx_pending = false;
1091 musb_g_rx(musb, musb_ep->current_epnum, false);
1095 spin_unlock_irqrestore(&musb->lock, lockflags);
1099 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1101 struct musb_ep *musb_ep = to_musb_ep(ep);
1102 struct usb_request *r;
1103 unsigned long flags;
1105 struct musb *musb = musb_ep->musb;
1107 DBG(4, "%s, dequeueing request %p\n", ep->name, request);
1108 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1111 spin_lock_irqsave(&musb->lock, flags);
1113 list_for_each_entry(r, &musb_ep->req_list, list) {
1118 DBG(3, "request %p not queued to %s\n", request, ep->name);
1123 /* if the hardware doesn't have the request, easy ... */
1124 if (musb_ep->req_list.next != &request->list) {
1125 musb_g_giveback(musb_ep, request, -ECONNRESET);
1126 /* ... else abort the dma transfer ... */
1127 } else if (musb_ep->dma) {
1128 struct dma_controller *c = musb->dma_controller;
1130 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1131 if (c->channel_abort)
1132 status = c->channel_abort(musb_ep->dma);
1135 stop_dma(musb, musb_ep, to_musb_request(request));
1137 musb_g_giveback(musb_ep, request, -ECONNRESET);
1139 /* NOTE: by sticking to easily tested hardware/driver states,
1140 * we leave counting of in-flight packets imprecise.
1142 musb_g_giveback(musb_ep, request, -ECONNRESET);
1146 spin_unlock_irqrestore(&musb->lock, flags);
1151 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1152 * data but will queue requests.
1154 * exported to ep0 code
1156 int musb_gadget_set_halt(struct usb_ep *ep, int value)
1158 struct musb_ep *musb_ep = to_musb_ep(ep);
1159 u8 epnum = musb_ep->current_epnum;
1160 struct musb *musb = musb_ep->musb;
1161 void __iomem *epio = musb->endpoints[epnum].regs;
1162 void __iomem *mbase;
1163 unsigned long flags;
1165 struct musb_request *request = NULL;
1170 mbase = musb->mregs;
1172 spin_lock_irqsave(&musb->lock, flags);
1174 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1179 musb_ep_select(mbase, epnum);
1181 /* cannot portably stall with non-empty FIFO */
1182 request = to_musb_request(next_request(musb_ep));
1183 if (value && musb_ep->is_in) {
1184 csr = musb_readw(epio, MUSB_TXCSR);
1185 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1186 DBG(3, "%s fifo busy, cannot halt\n", ep->name);
1187 spin_unlock_irqrestore(&musb->lock, flags);
1193 /* set/clear the stall and toggle bits */
1194 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1195 if (musb_ep->is_in) {
1196 csr = musb_readw(epio, MUSB_TXCSR);
1197 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
1198 csr |= MUSB_TXCSR_FLUSHFIFO;
1199 csr |= MUSB_TXCSR_P_WZC_BITS
1200 | MUSB_TXCSR_CLRDATATOG;
1202 csr |= MUSB_TXCSR_P_SENDSTALL;
1204 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1205 | MUSB_TXCSR_P_SENTSTALL);
1206 csr &= ~MUSB_TXCSR_TXPKTRDY;
1207 musb_writew(epio, MUSB_TXCSR, csr);
1209 csr = musb_readw(epio, MUSB_RXCSR);
1210 csr |= MUSB_RXCSR_P_WZC_BITS
1211 | MUSB_RXCSR_FLUSHFIFO
1212 | MUSB_RXCSR_CLRDATATOG;
1214 csr |= MUSB_RXCSR_P_SENDSTALL;
1216 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1217 | MUSB_RXCSR_P_SENTSTALL);
1218 musb_writew(epio, MUSB_RXCSR, csr);
1221 musb_ep->stalled = value;
1225 /* maybe start the first request in the queue */
1226 if (!musb_ep->stalled && request) {
1227 DBG(3, "restarting the request\n");
1228 musb_ep_restart(musb, request);
1231 spin_unlock_irqrestore(&musb->lock, flags);
1235 static int musb_gadget_fifo_status(struct usb_ep *ep)
1237 struct musb_ep *musb_ep = to_musb_ep(ep);
1238 void __iomem *epio = musb_ep->hw_ep->regs;
1239 int retval = -EINVAL;
1241 if (musb_ep->desc && !musb_ep->is_in) {
1242 struct musb *musb = musb_ep->musb;
1243 int epnum = musb_ep->current_epnum;
1244 void __iomem *mbase = musb->mregs;
1245 unsigned long flags;
1247 spin_lock_irqsave(&musb->lock, flags);
1249 musb_ep_select(mbase, epnum);
1250 /* FIXME return zero unless RXPKTRDY is set */
1251 retval = musb_readw(epio, MUSB_RXCOUNT);
1253 spin_unlock_irqrestore(&musb->lock, flags);
1258 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1260 struct musb_ep *musb_ep = to_musb_ep(ep);
1261 struct musb *musb = musb_ep->musb;
1262 u8 epnum = musb_ep->current_epnum;
1263 void __iomem *epio = musb->endpoints[epnum].regs;
1264 void __iomem *mbase;
1265 unsigned long flags;
1268 mbase = musb->mregs;
1270 spin_lock_irqsave(&musb->lock, flags);
1271 musb_ep_select(mbase, (u8) epnum);
1273 /* disable interrupts */
1274 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1275 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1277 if (musb_ep->is_in) {
1278 csr = musb_readw(epio, MUSB_TXCSR);
1279 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1280 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1281 musb_writew(epio, MUSB_TXCSR, csr);
1282 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1283 musb_writew(epio, MUSB_TXCSR, csr);
1286 csr = musb_readw(epio, MUSB_RXCSR);
1287 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1288 musb_writew(epio, MUSB_RXCSR, csr);
1289 musb_writew(epio, MUSB_RXCSR, csr);
1292 /* re-enable interrupt */
1293 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1294 spin_unlock_irqrestore(&musb->lock, flags);
1297 static const struct usb_ep_ops musb_ep_ops = {
1298 .enable = musb_gadget_enable,
1299 .disable = musb_gadget_disable,
1300 .alloc_request = musb_alloc_request,
1301 .free_request = musb_free_request,
1302 .queue = musb_gadget_queue,
1303 .dequeue = musb_gadget_dequeue,
1304 .set_halt = musb_gadget_set_halt,
1305 .fifo_status = musb_gadget_fifo_status,
1306 .fifo_flush = musb_gadget_fifo_flush
1309 /* ----------------------------------------------------------------------- */
1311 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1313 struct musb *musb = gadget_to_musb(gadget);
1315 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1318 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1320 struct musb *musb = gadget_to_musb(gadget);
1321 void __iomem *mregs = musb->mregs;
1322 unsigned long flags;
1323 int status = -EINVAL;
1327 spin_lock_irqsave(&musb->lock, flags);
1329 switch (musb->xceiv->state) {
1330 case OTG_STATE_B_PERIPHERAL:
1331 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1332 * that's part of the standard usb 1.1 state machine, and
1333 * doesn't affect OTG transitions.
1335 if (musb->may_wakeup && musb->is_suspended)
1338 case OTG_STATE_B_IDLE:
1339 /* Start SRP ... OTG not required. */
1340 devctl = musb_readb(mregs, MUSB_DEVCTL);
1341 DBG(2, "Sending SRP: devctl: %02x\n", devctl);
1342 devctl |= MUSB_DEVCTL_SESSION;
1343 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1344 devctl = musb_readb(mregs, MUSB_DEVCTL);
1346 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1347 devctl = musb_readb(mregs, MUSB_DEVCTL);
1352 while (devctl & MUSB_DEVCTL_SESSION) {
1353 devctl = musb_readb(mregs, MUSB_DEVCTL);
1358 /* Block idling for at least 1s */
1359 musb_platform_try_idle(musb,
1360 jiffies + msecs_to_jiffies(1 * HZ));
1365 DBG(2, "Unhandled wake: %s\n", otg_state_string(musb));
1371 power = musb_readb(mregs, MUSB_POWER);
1372 power |= MUSB_POWER_RESUME;
1373 musb_writeb(mregs, MUSB_POWER, power);
1374 DBG(2, "issue wakeup\n");
1376 /* FIXME do this next chunk in a timer callback, no udelay */
1379 power = musb_readb(mregs, MUSB_POWER);
1380 power &= ~MUSB_POWER_RESUME;
1381 musb_writeb(mregs, MUSB_POWER, power);
1383 spin_unlock_irqrestore(&musb->lock, flags);
1388 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1390 struct musb *musb = gadget_to_musb(gadget);
1392 musb->is_self_powered = !!is_selfpowered;
1396 static void musb_pullup(struct musb *musb, int is_on)
1400 power = musb_readb(musb->mregs, MUSB_POWER);
1401 /** UGLY UGLY HACK: Windows problems with multiple
1404 * This is necessary to prevent a RESET irq to
1405 * come when we fake a usb disconnection in order
1406 * to change the configuration on the gadget driver.
1410 power |= MUSB_POWER_SOFTCONN;
1412 r = musb_readb(musb->mregs, MUSB_INTRUSBE);
1413 /* disable RESET interrupt */
1414 musb_writeb(musb->mregs, MUSB_INTRUSBE, ~(r & BIT(1)));
1417 r = musb_readb(musb->mregs, MUSB_POWER);
1418 r |= MUSB_POWER_RESUME;
1419 musb_writeb(musb->mregs, MUSB_POWER, r);
1423 r &= ~MUSB_POWER_RESUME;
1424 musb_writeb(musb->mregs, MUSB_POWER, r);
1426 /* enable interrupts */
1427 musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
1429 /* some delay required for this to work */
1432 power &= ~MUSB_POWER_SOFTCONN;
1435 /* FIXME if on, HdrcStart; if off, HdrcStop */
1437 DBG(3, "gadget %s D+ pullup %s\n",
1438 musb->gadget_driver->function, is_on ? "on" : "off");
1439 musb_writeb(musb->mregs, MUSB_POWER, power);
1443 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1445 DBG(2, "<= %s =>\n", __func__);
1448 * FIXME iff driver's softconnect flag is set (as it is during probe,
1449 * though that can clear it), just musb_pullup().
1456 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1458 struct musb *musb = gadget_to_musb(gadget);
1460 if (!musb->xceiv->set_power)
1463 musb->power_draw = mA;
1464 schedule_work(&musb->irq_work);
1466 return otg_set_power(musb->xceiv, mA);
1469 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1471 struct musb *musb = gadget_to_musb(gadget);
1472 unsigned long flags;
1476 /* NOTE: this assumes we are sensing vbus; we'd rather
1477 * not pullup unless the B-session is active.
1479 spin_lock_irqsave(&musb->lock, flags);
1480 if (is_on != musb->softconnect) {
1481 musb->softconnect = is_on;
1482 musb_pullup(musb, is_on);
1484 spin_unlock_irqrestore(&musb->lock, flags);
1488 static const struct usb_gadget_ops musb_gadget_operations = {
1489 .get_frame = musb_gadget_get_frame,
1490 .wakeup = musb_gadget_wakeup,
1491 .set_selfpowered = musb_gadget_set_self_powered,
1492 /* .vbus_session = musb_gadget_vbus_session, */
1493 .vbus_draw = musb_gadget_vbus_draw,
1494 .pullup = musb_gadget_pullup,
1497 /* ----------------------------------------------------------------------- */
1501 /* Only this registration code "knows" the rule (from USB standards)
1502 * about there being only one external upstream port. It assumes
1503 * all peripheral ports are external...
1505 static struct musb *the_gadget;
1507 static void musb_gadget_release(struct device *dev)
1509 /* kref_put(WHAT) */
1510 dev_dbg(dev, "%s\n", __func__);
1515 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1517 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1519 memset(ep, 0, sizeof *ep);
1521 ep->current_epnum = epnum;
1526 INIT_LIST_HEAD(&ep->req_list);
1528 sprintf(ep->name, "ep%d%s", epnum,
1529 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1530 is_in ? "in" : "out"));
1531 ep->end_point.name = ep->name;
1532 INIT_LIST_HEAD(&ep->end_point.ep_list);
1534 ep->end_point.maxpacket = 64;
1535 ep->end_point.ops = &musb_g_ep0_ops;
1536 musb->g.ep0 = &ep->end_point;
1539 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1541 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1542 ep->end_point.ops = &musb_ep_ops;
1543 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1548 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1549 * to the rest of the driver state.
1551 static inline void __init musb_g_init_endpoints(struct musb *musb)
1554 struct musb_hw_ep *hw_ep;
1557 /* intialize endpoint list just once */
1558 INIT_LIST_HEAD(&(musb->g.ep_list));
1560 for (epnum = 0, hw_ep = musb->endpoints;
1561 epnum < musb->nr_endpoints;
1563 if (hw_ep->is_shared_fifo /* || !epnum */) {
1564 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1567 if (hw_ep->max_packet_sz_tx) {
1568 init_peripheral_ep(musb, &hw_ep->ep_in,
1572 if (hw_ep->max_packet_sz_rx) {
1573 init_peripheral_ep(musb, &hw_ep->ep_out,
1581 /* called once during driver setup to initialize and link into
1582 * the driver model; memory is zeroed.
1584 int __init musb_gadget_setup(struct musb *musb)
1588 /* REVISIT minor race: if (erroneously) setting up two
1589 * musb peripherals at the same time, only the bus lock
1596 musb->g.ops = &musb_gadget_operations;
1597 musb->g.is_dualspeed = 1;
1598 musb->g.speed = USB_SPEED_UNKNOWN;
1600 /* this "gadget" abstracts/virtualizes the controller */
1601 dev_set_name(&musb->g.dev, "gadget");
1602 musb->g.dev.parent = musb->controller;
1603 musb->g.dev.dma_mask = musb->controller->dma_mask;
1604 musb->g.dev.release = musb_gadget_release;
1605 musb->g.name = musb_driver_name;
1607 if (is_otg_enabled(musb))
1610 musb_g_init_endpoints(musb);
1612 musb->is_active = 0;
1613 musb_platform_try_idle(musb, 0);
1615 status = device_register(&musb->g.dev);
1621 void musb_gadget_cleanup(struct musb *musb)
1623 if (musb != the_gadget)
1626 device_unregister(&musb->g.dev);
1631 * Register the gadget driver. Used by gadget drivers when
1632 * registering themselves with the controller.
1634 * -EINVAL something went wrong (not driver)
1635 * -EBUSY another gadget is already using the controller
1636 * -ENOMEM no memeory to perform the operation
1638 * @param driver the gadget driver
1639 * @return <0 if error, 0 if everything is fine
1641 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1644 unsigned long flags;
1645 struct musb *musb = the_gadget;
1648 || driver->speed != USB_SPEED_HIGH
1653 /* driver must be initialized to support peripheral mode */
1654 if (!musb || !(musb->board_mode == MUSB_OTG
1655 || musb->board_mode != MUSB_OTG)) {
1656 DBG(1, "%s, no dev??\n", __func__);
1660 DBG(3, "registering driver %s\n", driver->function);
1661 spin_lock_irqsave(&musb->lock, flags);
1663 if (musb->gadget_driver) {
1664 DBG(1, "%s is already bound to %s\n",
1666 musb->gadget_driver->driver.name);
1669 musb->gadget_driver = driver;
1670 musb->g.dev.driver = &driver->driver;
1671 driver->driver.bus = NULL;
1672 musb->softconnect = 1;
1676 spin_unlock_irqrestore(&musb->lock, flags);
1679 /* Clocks need to be turned on with OFF mode */
1680 if (musb->set_clock)
1681 musb->set_clock(musb->clock, 1);
1683 clk_enable(musb->clock);
1685 retval = driver->bind(&musb->g);
1687 DBG(3, "bind to driver %s failed --> %d\n",
1688 driver->driver.name, retval);
1689 musb->gadget_driver = NULL;
1690 musb->g.dev.driver = NULL;
1693 spin_lock_irqsave(&musb->lock, flags);
1695 /* REVISIT always use otg_set_peripheral(), handling
1696 * issues including the root hub one below ...
1698 musb->xceiv->gadget = &musb->g;
1699 musb->xceiv->state = OTG_STATE_B_IDLE;
1700 musb->is_active = 1;
1702 /* FIXME this ignores the softconnect flag. Drivers are
1703 * allowed hold the peripheral inactive until for example
1704 * userspace hooks up printer hardware or DSP codecs, so
1705 * hosts only see fully functional devices.
1708 if (!is_otg_enabled(musb))
1711 spin_unlock_irqrestore(&musb->lock, flags);
1713 if (is_otg_enabled(musb)) {
1714 DBG(3, "OTG startup...\n");
1716 /* REVISIT: funcall to other code, which also
1717 * handles power budgeting ... this way also
1718 * ensures HdrcStart is indirectly called.
1720 retval = usb_add_hcd(musb_to_hcd(musb), -1, 0);
1722 DBG(1, "add_hcd failed, %d\n", retval);
1723 spin_lock_irqsave(&musb->lock, flags);
1724 musb->xceiv->gadget = NULL;
1725 musb->xceiv->state = OTG_STATE_UNDEFINED;
1726 musb->gadget_driver = NULL;
1727 musb->g.dev.driver = NULL;
1728 spin_unlock_irqrestore(&musb->lock, flags);
1732 musb_save_ctx(musb);
1736 EXPORT_SYMBOL(usb_gadget_register_driver);
1738 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1741 struct musb_hw_ep *hw_ep;
1743 /* don't disconnect if it's not connected */
1744 if (musb->g.speed == USB_SPEED_UNKNOWN)
1747 musb->g.speed = USB_SPEED_UNKNOWN;
1749 /* deactivate the hardware */
1750 if (musb->softconnect) {
1751 musb->softconnect = 0;
1752 musb_pullup(musb, 0);
1756 /* killing any outstanding requests will quiesce the driver;
1757 * then report disconnect
1760 for (i = 0, hw_ep = musb->endpoints;
1761 i < musb->nr_endpoints;
1763 musb_ep_select(musb->mregs, i);
1764 if (hw_ep->is_shared_fifo /* || !epnum */) {
1765 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1767 if (hw_ep->max_packet_sz_tx)
1768 nuke(&hw_ep->ep_in, -ESHUTDOWN);
1769 if (hw_ep->max_packet_sz_rx)
1770 nuke(&hw_ep->ep_out, -ESHUTDOWN);
1774 spin_unlock(&musb->lock);
1775 driver->disconnect(&musb->g);
1776 spin_lock(&musb->lock);
1781 * Unregister the gadget driver. Used by gadget drivers when
1782 * unregistering themselves from the controller.
1784 * @param driver the gadget driver to unregister
1786 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
1788 unsigned long flags;
1790 struct musb *musb = the_gadget;
1792 if (!driver || !driver->unbind || !musb)
1795 /* REVISIT always use otg_set_peripheral() here too;
1796 * this needs to shut down the OTG engine.
1799 spin_lock_irqsave(&musb->lock, flags);
1801 if (musb->set_clock)
1802 musb->set_clock(musb->clock, 1);
1804 clk_enable(musb->clock);
1806 #ifdef CONFIG_USB_MUSB_OTG
1807 musb_hnp_stop(musb);
1810 if (musb->gadget_driver == driver) {
1812 (void) musb_gadget_vbus_draw(&musb->g, 0);
1814 musb->xceiv->state = OTG_STATE_UNDEFINED;
1815 stop_activity(musb, driver);
1817 DBG(3, "unregistering driver %s\n", driver->function);
1818 spin_unlock_irqrestore(&musb->lock, flags);
1819 driver->unbind(&musb->g);
1820 spin_lock_irqsave(&musb->lock, flags);
1822 musb->gadget_driver = NULL;
1823 musb->g.dev.driver = NULL;
1825 musb->is_active = 0;
1826 musb_platform_try_idle(musb, 0);
1829 spin_unlock_irqrestore(&musb->lock, flags);
1831 if (is_otg_enabled(musb) && retval == 0) {
1832 usb_remove_hcd(musb_to_hcd(musb));
1833 /* FIXME we need to be able to register another
1834 * gadget driver here and have everything work;
1835 * that currently misbehaves.
1838 musb_save_ctx(musb);
1842 EXPORT_SYMBOL(usb_gadget_unregister_driver);
1845 /* ----------------------------------------------------------------------- */
1847 /* lifecycle operations called through plat_uds.c */
1849 void musb_g_resume(struct musb *musb)
1851 musb->is_suspended = 0;
1852 switch (musb->xceiv->state) {
1853 case OTG_STATE_B_IDLE:
1855 case OTG_STATE_B_WAIT_ACON:
1856 case OTG_STATE_B_PERIPHERAL:
1857 musb->is_active = 1;
1858 if (musb->gadget_driver && musb->gadget_driver->resume) {
1859 spin_unlock(&musb->lock);
1860 musb->gadget_driver->resume(&musb->g);
1861 spin_lock(&musb->lock);
1865 WARNING("unhandled RESUME transition (%s)\n",
1866 otg_state_string(musb));
1870 /* called when SOF packets stop for 3+ msec */
1871 void musb_g_suspend(struct musb *musb)
1875 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1876 DBG(3, "devctl %02x\n", devctl);
1878 switch (musb->xceiv->state) {
1879 case OTG_STATE_B_IDLE:
1880 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
1881 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
1883 case OTG_STATE_B_PERIPHERAL:
1884 musb->is_suspended = 1;
1885 if (musb->gadget_driver && musb->gadget_driver->suspend) {
1886 spin_unlock(&musb->lock);
1887 musb->gadget_driver->suspend(&musb->g);
1888 spin_lock(&musb->lock);
1892 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
1893 * A_PERIPHERAL may need care too
1895 WARNING("unhandled SUSPEND transition (%s)\n",
1896 otg_state_string(musb));
1900 /* Called during SRP */
1901 void musb_g_wakeup(struct musb *musb)
1903 musb_gadget_wakeup(&musb->g);
1906 /* called when VBUS drops below session threshold, and in other cases */
1907 void musb_g_disconnect(struct musb *musb)
1909 void __iomem *mregs = musb->mregs;
1910 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
1912 DBG(3, "devctl %02x\n", devctl);
1915 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
1917 /* don't draw vbus until new b-default session */
1918 (void) musb_gadget_vbus_draw(&musb->g, 0);
1920 musb->g.speed = USB_SPEED_UNKNOWN;
1921 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
1922 spin_unlock(&musb->lock);
1923 musb->gadget_driver->disconnect(&musb->g);
1924 spin_lock(&musb->lock);
1927 switch (musb->xceiv->state) {
1929 #ifdef CONFIG_USB_MUSB_OTG
1930 DBG(2, "Unhandled disconnect %s, setting a_idle\n",
1931 otg_state_string(musb));
1932 musb->xceiv->state = OTG_STATE_A_IDLE;
1934 case OTG_STATE_A_PERIPHERAL:
1935 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
1937 case OTG_STATE_B_WAIT_ACON:
1938 case OTG_STATE_B_HOST:
1940 case OTG_STATE_B_PERIPHERAL:
1941 case OTG_STATE_B_IDLE:
1942 musb->xceiv->state = OTG_STATE_B_IDLE;
1944 case OTG_STATE_B_SRP_INIT:
1948 musb->is_active = 0;
1951 void musb_g_reset(struct musb *musb)
1952 __releases(musb->lock)
1953 __acquires(musb->lock)
1955 void __iomem *mbase = musb->mregs;
1956 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
1959 DBG(3, "<== %s addr=%x driver '%s'\n",
1960 (devctl & MUSB_DEVCTL_BDEVICE)
1961 ? "B-Device" : "A-Device",
1962 musb_readb(mbase, MUSB_FADDR),
1964 ? musb->gadget_driver->driver.name
1968 /* report disconnect, if we didn't already (flushing EP state) */
1969 if (musb->g.speed != USB_SPEED_UNKNOWN)
1970 musb_g_disconnect(musb);
1973 else if (devctl & MUSB_DEVCTL_HR)
1974 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
1977 /* what speed did we negotiate? */
1978 power = musb_readb(mbase, MUSB_POWER);
1979 musb->g.speed = (power & MUSB_POWER_HSMODE)
1980 ? USB_SPEED_HIGH : USB_SPEED_FULL;
1982 /* start in USB_STATE_DEFAULT */
1983 musb->is_active = 1;
1984 musb->is_suspended = 0;
1985 MUSB_DEV_MODE(musb);
1987 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
1989 musb->may_wakeup = 0;
1990 musb->g.b_hnp_enable = 0;
1991 musb->g.a_alt_hnp_support = 0;
1992 musb->g.a_hnp_support = 0;
1994 /* Normal reset, as B-Device;
1995 * or else after HNP, as A-Device
1997 if (devctl & MUSB_DEVCTL_BDEVICE) {
1998 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
1999 musb->g.is_a_peripheral = 0;
2000 } else if (is_otg_enabled(musb)) {
2001 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
2002 musb->g.is_a_peripheral = 1;
2006 /* start with default limits on VBUS power draw */
2007 (void) musb_gadget_vbus_draw(&musb->g,
2008 is_otg_enabled(musb) ? 8 : 100);