2 * f_raw.c -- USB Raw Access Function Driver
4 * Copyright (C) 2009 Nokia Corporation
5 * Contact: Felipe Balbi <felipe.balbi@nokia.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 /* #define VERBOSE_DEBUG */
24 #include <linux/kernel.h>
25 #include <linux/utsname.h>
28 #include <linux/device.h>
29 #include <linux/wait.h>
30 #include <linux/list.h>
31 #include <linux/cdev.h>
32 #include <linux/poll.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/uaccess.h>
36 #include <linux/usb/ch9.h>
37 #include <linux/usb/gadget.h>
38 #include <linux/usb/raw.h>
40 #include "gadget_chips.h"
45 struct usb_gadget *gadget;
46 struct usb_function func;
52 static struct graw *the_graw;
53 static struct f_raw *the_raw;
56 struct usb_request *req;
57 struct list_head list;
58 wait_queue_head_t wait;
61 unsigned queued:1, completed:1;
66 struct usb_endpoint_descriptor *raw_out;
70 /* pool of read requests */
71 struct list_head read_pool;
74 /* synchronize with userland access */
78 struct raw_request *allocated_req;
81 struct raw_ep_descs fs;
82 struct raw_ep_descs hs;
88 unsigned can_activate:1;
93 static inline struct f_raw *func_to_raw(struct usb_function *f)
95 return container_of(f, struct f_raw, graw.func);
98 static u64 raw_dmamask = DMA_BIT_MASK(64);
100 /*-------------------------------------------------------------------------*/
102 #define RAW_INTF_IDX 1
104 static struct usb_string raw_string_defs[] = {
105 [RAW_INTF_IDX].s = "Device Upgrade Interface",
106 { }, /* end of list */
109 static struct usb_gadget_strings raw_string_table = {
110 .language = 0x0409, /* en-US */
111 .strings = raw_string_defs,
114 static struct usb_gadget_strings *raw_strings[] = {
119 /*-------------------------------------------------------------------------*/
121 static struct usb_interface_descriptor raw_intf __initdata = {
122 .bLength = sizeof(raw_intf),
123 .bDescriptorType = USB_DT_INTERFACE,
124 .bInterfaceNumber = 0,
126 .bAlternateSetting = 0,
128 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
131 /* High-Speed Support */
133 static struct usb_endpoint_descriptor raw_hs_ep_out_desc = {
134 .bLength = USB_DT_ENDPOINT_SIZE,
135 .bDescriptorType = USB_DT_ENDPOINT,
137 .bEndpointAddress = USB_DIR_OUT,
138 .bmAttributes = USB_ENDPOINT_XFER_BULK,
139 .wMaxPacketSize = __constant_cpu_to_le16(512),
142 static struct usb_descriptor_header *hs_function[] __initdata = {
143 (struct usb_descriptor_header *) &raw_intf,
144 (struct usb_descriptor_header *) &raw_hs_ep_out_desc,
148 /* Full-Speed Support */
150 static struct usb_endpoint_descriptor raw_fs_ep_out_desc = {
151 .bLength = USB_DT_ENDPOINT_SIZE,
152 .bDescriptorType = USB_DT_ENDPOINT,
154 .bEndpointAddress = USB_DIR_OUT,
155 .bmAttributes = USB_ENDPOINT_XFER_BULK,
158 static struct usb_descriptor_header *fs_function[] __initdata = {
159 (struct usb_descriptor_header *) &raw_intf,
160 (struct usb_descriptor_header *) &raw_fs_ep_out_desc,
164 /*-------------------------------------------------------------------------*/
166 static void raw_complete(struct usb_ep *ep, struct usb_request *req);
168 static struct raw_request *raw_alloc_request(struct f_raw *raw, unsigned buflen)
170 struct list_head *pool = &raw->read_pool;
171 struct usb_request *req;
172 struct raw_request *raw_req;
175 raw_req = kzalloc(sizeof(*raw_req), GFP_KERNEL);
179 INIT_LIST_HEAD(&raw_req->list);
181 req = usb_ep_alloc_request(raw->out, GFP_KERNEL);
185 req->length = buflen;
186 req->complete = raw_complete;
187 req->context = raw_req;
189 buf = dma_alloc_coherent(&raw->graw.gadget->dev, buflen,
190 &req->dma, GFP_KERNEL);
196 raw_req->len = buflen;
198 if (raw->nr_reqs == MAX_NR_REQUESTS)
201 raw_req->nr = raw->nr_reqs;
203 list_add_tail(&raw_req->list, pool);
208 dma_free_coherent(&raw->graw.gadget->dev, buflen,
212 usb_ep_free_request(raw->out, req);
221 static void raw_complete(struct usb_ep *ep, struct usb_request *req)
223 struct f_raw *raw = ep->driver_data;
224 struct raw_request *raw_req = req->context;
225 struct usb_composite_dev *cdev = raw->graw.func.config->cdev;
226 int status = req->status;
229 case 0: /* normal completion */
231 case -ECONNABORTED: /* hardware forced ep reset */
232 case -ECONNRESET: /* request dequeued */
233 case -ESHUTDOWN: /* disconnected from host */
234 VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status,
235 req->actual, req->length);
237 case -EOVERFLOW: /* not big enough buffer */
239 DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name,
240 status, req->actual, req->length);
241 case -EREMOTEIO: /* short read */
246 raw_req->completed = 1;
247 wake_up_interruptible(&raw_req->wait);
250 static struct raw_request *find_request(struct f_raw *raw, int value)
252 struct raw_request *req;
254 list_for_each_entry(req, &raw->read_pool, list)
255 if (req->nr == value)
261 static inline int enable_raw(struct usb_composite_dev *cdev, struct f_raw *raw)
263 const struct usb_endpoint_descriptor *out_desc;
268 /* choose endpoint */
269 out_desc = ep_choose(cdev->gadget, &raw_hs_ep_out_desc,
270 &raw_fs_ep_out_desc);
274 status = usb_ep_enable(ep, out_desc);
277 ep->driver_data = raw;
279 DBG(cdev, "%s enabled\n", raw->graw.func.name);
284 static inline void disable_raw(struct f_raw *raw)
286 struct usb_composite_dev *cdev;
291 cdev = raw->graw.func.config->cdev;
294 if (ep->driver_data) {
295 status = usb_ep_disable(ep);
297 DBG(cdev, "disable %s --> %d\n",
299 ep->driver_data = NULL;
302 VDBG(cdev, "%s disabled\n", raw->graw.func.name);
305 static int raw_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
307 struct usb_composite_dev *cdev = f->config->cdev;
308 struct f_raw *raw = func_to_raw(f);
310 /* we konw alt is zero */
311 if (raw->out->driver_data)
314 return enable_raw(cdev, raw);
317 static void raw_disable(struct usb_function *f)
319 struct f_raw *raw = func_to_raw(f);
324 static int raw_queue_request(struct f_raw *raw, struct raw_queue_request *qr)
326 struct usb_ep *ep = raw->out;
327 struct raw_request *raw_req;
330 raw_req = find_request(raw, qr->nr);
334 if (qr->nr_bytes > raw_req->len)
337 /* FIXME: lock with irqsave and check if transfer already in progress,
340 raw_req->req->length = qr->nr_bytes;
342 init_waitqueue_head(&raw_req->wait);
343 raw_req->completed = 0;
345 status = usb_ep_queue(ep, raw_req->req, GFP_KERNEL);
347 struct usb_composite_dev *cdev;
349 cdev = raw->graw.func.config->cdev;
350 ERROR(cdev, "start %s %s --> %d\n", "OUT", ep->name, status);
357 static int raw_free_request(struct f_raw *raw, int nr)
359 struct raw_request *raw_req;
360 struct usb_request *req;
362 raw_req = find_request(raw, nr);
366 if (raw->allocated_req == raw_req)
367 raw->allocated_req = NULL;
371 /* FIXME: spinlocking? */
373 usb_ep_dequeue(raw->out, req);
375 dma_free_coherent(&raw->graw.gadget->dev, raw_req->len, req->buf,
377 usb_ep_free_request(raw->out, req);
378 list_del(&raw_req->list);
384 static int raw_get_request_status(struct f_raw *raw,
385 struct raw_request_status *st)
387 struct raw_request *raw_req;
389 raw_req = find_request(raw, st->nr);
393 if (!raw_req->queued) {
394 st->status = raw_req->req->status;
395 st->nr_bytes = raw_req->req->actual;
396 raw_req->completed = 0;
405 static void get_completion_map(struct f_raw *raw, unsigned int *mask_out)
407 struct raw_request *req;
408 unsigned int mask = 0;
410 list_for_each_entry(req, &raw->read_pool, list)
412 mask |= (1 << req->nr);
417 static long fraw_ioctl(struct file *filp, unsigned code, unsigned long value)
419 struct f_raw *raw = filp->private_data;
420 struct usb_ep *ep = raw->out;
423 struct raw_request_status req_st;
424 struct raw_queue_request que_req;
429 mutex_lock(&raw->mutex);
432 case RAW_FIFO_STATUS:
433 status = usb_ep_fifo_status(ep);
436 usb_ep_fifo_flush(ep);
439 status = usb_ep_clear_halt(ep);
441 case RAW_ALLOC_REQUEST:
442 if (raw->allocated_req != NULL) {
446 if (value > MAX_REQUEST_LEN || (value % PAGE_SIZE) != 0) {
450 raw->allocated_req = raw_alloc_request(raw, value);
451 if (raw->allocated_req == NULL) {
455 status = raw->allocated_req->nr;
457 case RAW_QUEUE_REQUEST:
458 status = copy_from_user(&que_req, (void __user *) value,
462 status = raw_queue_request(raw, &que_req);
464 case RAW_FREE_REQUEST:
465 status = raw_free_request(raw, value);
467 case RAW_GET_COMPLETION_MAP:
468 get_completion_map(raw, &map);
469 status = put_user(map, (unsigned int __user *) value);
471 case RAW_GET_REQUEST_STATUS:
472 status = copy_from_user(&req_st, (void __user *) value,
476 status = raw_get_request_status(raw, &req_st);
479 status = copy_to_user((void __user *) value, &req_st,
483 mutex_unlock(&raw->mutex);
488 static int fraw_mmap(struct file *filp, struct vm_area_struct *vma)
490 size_t size = vma->vm_end - vma->vm_start;
491 struct f_raw *raw = filp->private_data;
492 struct raw_request *raw_req;
493 struct usb_request *req;
496 mutex_lock(&raw->mutex);
497 raw_req = raw->allocated_req;
498 if (raw_req == NULL) {
504 if (size != raw_req->len) {
509 vma->vm_private_data = raw;
511 ret = dma_mmap_coherent(&raw->graw.gadget->dev, vma, req->buf,
512 req->dma, raw_req->len);
516 raw->allocated_req = NULL;
519 mutex_unlock(&raw->mutex);
524 static int fraw_open(struct inode *inode, struct file *filp)
529 filp->private_data = the_raw;
534 static int fraw_release(struct inode *inode, struct file *filp)
536 struct f_raw *raw = filp->private_data;
538 while (!list_empty(&raw->read_pool)) {
539 struct raw_request *req;
541 req = list_first_entry(&raw->read_pool, struct raw_request,
543 raw_free_request(raw, req->nr);
546 filp->private_data = NULL;
551 static unsigned int fraw_poll(struct file *filp, struct poll_table_struct *pt)
553 struct f_raw *raw = filp->private_data;
554 struct raw_request *req;
557 mutex_lock(&raw->mutex);
558 list_for_each_entry(req, &raw->read_pool, list) {
559 poll_wait(filp, &req->wait, pt);
561 if (req->completed) {
562 ret = POLLIN | POLLRDNORM;
566 mutex_unlock(&raw->mutex);
571 static struct file_operations fraw_fops = {
572 .owner = THIS_MODULE,
574 .release = fraw_release,
575 .unlocked_ioctl = fraw_ioctl,
580 /*-------------------------------------------------------------------------*/
582 static int __init raw_bind(struct usb_configuration *c, struct usb_function *f)
584 struct usb_composite_dev *cdev = c->cdev;
585 struct f_raw *raw = func_to_raw(f);
590 /* allocate instance-specific interface IDs and patch descriptors */
592 status = usb_interface_id(c, f);
595 raw->intf_id = status;
597 raw_intf.bInterfaceNumber = status;
599 /* allocate instance-specific endpoints */
601 ep = usb_ep_autoconfig(cdev->gadget, &raw_fs_ep_out_desc);
605 ep->driver_data = cdev; /* claim */
607 /* copy descriptors and track endpoint copies */
608 f->descriptors = usb_copy_descriptors(fs_function);
610 raw->fs.raw_out = usb_find_endpoint(fs_function,
611 f->descriptors, &raw_fs_ep_out_desc);
613 /* support all relevant hardware speeds... we expect that when
614 * hardware is dual speed, all bulk-capable endpoints work at
617 if (gadget_is_dualspeed(c->cdev->gadget)) {
618 raw_hs_ep_out_desc.bEndpointAddress =
619 raw_fs_ep_out_desc.bEndpointAddress;
621 /* copy descriptors and track endpoint copies */
622 f->hs_descriptors = usb_copy_descriptors(hs_function);
624 raw->hs.raw_out = usb_find_endpoint(hs_function,
625 f->hs_descriptors, &raw_hs_ep_out_desc);
628 INIT_LIST_HEAD(&raw->read_pool);
629 mutex_init(&raw->mutex);
631 /* create device nodes */
632 raw->class = class_create(THIS_MODULE, "fraw");
633 device_create(raw->class, &cdev->gadget->dev,
634 MKDEV(raw->graw.major, 0), raw, "%s", f->name);
636 cdev->gadget->dev.dma_mask = &raw_dmamask;
637 cdev->gadget->dev.coherent_dma_mask = DMA_64BIT_MASK;
638 raw->graw.gadget = cdev->gadget;
641 DBG(cdev, "raw: %s speed OUT/%s\n",
642 gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
649 class_destroy(raw->class);
652 raw->out->driver_data = NULL;
654 ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
659 static void raw_unbind(struct usb_configuration *c, struct usb_function *f)
661 struct f_raw *raw = func_to_raw(f);
663 if (gadget_is_dualspeed(c->cdev->gadget))
664 usb_free_descriptors(f->hs_descriptors);
665 usb_free_descriptors(f->descriptors);
666 device_destroy(raw->class, MKDEV(raw->graw.major, 0));
667 class_destroy(raw->class);
672 * raw_bind_config - add a RAW function to a configuration
673 * @c: the configuration to support the RAW instance
674 * Context: single threaded during gadget setup
676 * Returns zero on success, else negative errno.
678 static int __init raw_bind_config(struct usb_configuration *c)
683 if (raw_string_defs[RAW_INTF_IDX].id == 0) {
684 status = usb_string_id(c->cdev);
688 raw_string_defs[RAW_INTF_IDX].id = status;
689 raw_intf.iInterface = status;
692 /* allocate and initialize one new instance */
693 raw = kzalloc(sizeof(*raw), GFP_KERNEL);
697 raw->graw.func.name = "raw";
698 raw->graw.func.strings = raw_strings;
699 /* descriptors are per-instance copies */
700 raw->graw.func.bind = raw_bind;
701 raw->graw.func.unbind = raw_unbind;
702 raw->graw.func.set_alt = raw_set_alt;
703 raw->graw.func.disable = raw_disable;
705 status = usb_add_function(c, &raw->graw.func);
713 * graw_setup - initialize character driver for one rx
714 * @g: gadget to associate with
717 * Returns negative errno or zero.
719 static int __init graw_setup(struct usb_gadget *g)
731 graw = kzalloc(sizeof(*graw), GFP_KERNEL);
737 status = alloc_chrdev_region(&dev, 0, 1, "fraw");
743 cdev_init(&graw->chdev, &fraw_fops);
744 graw->chdev.owner = THIS_MODULE;
748 status = cdev_add(&graw->chdev, dev, 1);
757 /* cdev_put(&graw->cdev); */
758 unregister_chrdev_region(graw->dev, 1);
767 static void __exit graw_cleanup(void)
769 struct graw *graw = the_graw;
774 cdev_del(&graw->chdev);
775 /* cdev_put(&graw->chdev); */
776 unregister_chrdev_region(graw->dev, 1);