summaryrefslogtreecommitdiffstats
path: root/drivers/usb/gadget/function/f_fs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/gadget/function/f_fs.c')
-rw-r--r--drivers/usb/gadget/function/f_fs.c323
1 files changed, 178 insertions, 145 deletions
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 63314ede7ba6..175c9956cbe3 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -31,6 +31,7 @@
#include <linux/aio.h>
#include <linux/mmu_context.h>
#include <linux/poll.h>
+#include <linux/eventfd.h>
#include "u_fs.h"
#include "u_f.h"
@@ -143,16 +144,17 @@ struct ffs_io_data {
bool read;
struct kiocb *kiocb;
- const struct iovec *iovec;
- unsigned long nr_segs;
- char __user *buf;
- size_t len;
+ struct iov_iter data;
+ const void *to_free;
+ char *buf;
struct mm_struct *mm;
struct work_struct work;
struct usb_ep *ep;
struct usb_request *req;
+
+ struct ffs_data *ffs;
};
struct ffs_desc_helper {
@@ -390,17 +392,20 @@ done_spin:
return ret;
}
+/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
size_t n)
{
/*
- * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
- * to release them.
+ * n cannot be bigger than ffs->ev.count, which cannot be bigger than
+ * size of ffs->ev.types array (which is four) so that's how much space
+ * we reserve.
*/
- struct usb_functionfs_event events[n];
+ struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
+ const size_t size = n * sizeof *events;
unsigned i = 0;
- memset(events, 0, sizeof events);
+ memset(events, 0, size);
do {
events[i].type = ffs->ev.types[i];
@@ -410,19 +415,15 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
}
} while (++i < n);
- if (n < ffs->ev.count) {
- ffs->ev.count -= n;
+ ffs->ev.count -= n;
+ if (ffs->ev.count)
memmove(ffs->ev.types, ffs->ev.types + n,
ffs->ev.count * sizeof *ffs->ev.types);
- } else {
- ffs->ev.count = 0;
- }
spin_unlock_irq(&ffs->ev.waitq.lock);
mutex_unlock(&ffs->mutex);
- return unlikely(__copy_to_user(buf, events, sizeof events))
- ? -EFAULT : sizeof events;
+ return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size;
}
static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
@@ -606,6 +607,8 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
}
case FFS_CLOSING:
break;
+ case FFS_DEACTIVATED:
+ break;
}
mutex_unlock(&ffs->mutex);
@@ -645,39 +648,23 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->req->actual;
if (io_data->read && ret > 0) {
- int i;
- size_t pos = 0;
-
- /*
- * Since req->length may be bigger than io_data->len (after
- * being rounded up to maxpacketsize), we may end up with more
- * data then user space has space for.
- */
- ret = min_t(int, ret, io_data->len);
-
use_mm(io_data->mm);
- for (i = 0; i < io_data->nr_segs; i++) {
- size_t len = min_t(size_t, ret - pos,
- io_data->iovec[i].iov_len);
- if (!len)
- break;
- if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
- &io_data->buf[pos], len))) {
- ret = -EFAULT;
- break;
- }
- pos += len;
- }
+ ret = copy_to_iter(io_data->buf, ret, &io_data->data);
+ if (iov_iter_count(&io_data->data))
+ ret = -EFAULT;
unuse_mm(io_data->mm);
}
aio_complete(io_data->kiocb, ret, ret);
+ if (io_data->ffs->ffs_eventfd && !io_data->kiocb->ki_eventfd)
+ eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+
usb_ep_free_request(io_data->ep, io_data->req);
io_data->kiocb->private = NULL;
if (io_data->read)
- kfree(io_data->iovec);
+ kfree(io_data->to_free);
kfree(io_data->buf);
kfree(io_data);
}
@@ -736,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
* before the waiting completes, so do not assign to 'gadget' earlier
*/
struct usb_gadget *gadget = epfile->ffs->gadget;
+ size_t copied;
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
@@ -743,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
return -ESHUTDOWN;
}
+ data_len = iov_iter_count(&io_data->data);
/*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
- data_len = io_data->read ?
- usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
- io_data->len;
+ if (io_data->read)
+ data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
spin_unlock_irq(&epfile->ffs->eps_lock);
data = kmalloc(data_len, GFP_KERNEL);
if (unlikely(!data))
return -ENOMEM;
- if (io_data->aio && !io_data->read) {
- int i;
- size_t pos = 0;
- for (i = 0; i < io_data->nr_segs; i++) {
- if (unlikely(copy_from_user(&data[pos],
- io_data->iovec[i].iov_base,
- io_data->iovec[i].iov_len))) {
- ret = -EFAULT;
- goto error;
- }
- pos += io_data->iovec[i].iov_len;
- }
- } else {
- if (!io_data->read &&
- unlikely(__copy_from_user(data, io_data->buf,
- io_data->len))) {
+ if (!io_data->read) {
+ copied = copy_from_iter(data, data_len, &io_data->data);
+ if (copied != data_len) {
ret = -EFAULT;
goto error;
}
@@ -826,6 +801,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
io_data->buf = data;
io_data->ep = ep->ep;
io_data->req = req;
+ io_data->ffs = epfile->ffs;
req->context = io_data;
req->complete = ffs_epfile_async_io_complete;
@@ -868,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
*/
ret = ep->status;
if (io_data->read && ret > 0) {
- ret = min_t(size_t, ret, io_data->len);
-
- if (unlikely(copy_to_user(io_data->buf,
- data, ret)))
+ ret = copy_to_iter(data, ret, &io_data->data);
+ if (unlikely(iov_iter_count(&io_data->data)))
ret = -EFAULT;
}
}
@@ -890,37 +864,6 @@ error:
return ret;
}
-static ssize_t
-ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
- loff_t *ptr)
-{
- struct ffs_io_data io_data;
-
- ENTER();
-
- io_data.aio = false;
- io_data.read = false;
- io_data.buf = (char * __user)buf;
- io_data.len = len;
-
- return ffs_epfile_io(file, &io_data);
-}
-
-static ssize_t
-ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
-{
- struct ffs_io_data io_data;
-
- ENTER();
-
- io_data.aio = false;
- io_data.read = true;
- io_data.buf = buf;
- io_data.len = len;
-
- return ffs_epfile_io(file, &io_data);
-}
-
static int
ffs_epfile_open(struct inode *inode, struct file *file)
{
@@ -957,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
return value;
}
-static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
- const struct iovec *iovec,
- unsigned long nr_segs, loff_t loff)
+static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
- struct ffs_io_data *io_data;
+ struct ffs_io_data io_data, *p = &io_data;
+ ssize_t res;
ENTER();
- io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
- if (unlikely(!io_data))
- return -ENOMEM;
+ if (!is_sync_kiocb(kiocb)) {
+ p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ if (unlikely(!p))
+ return -ENOMEM;
+ p->aio = true;
+ } else {
+ p->aio = false;
+ }
- io_data->aio = true;
- io_data->read = false;
- io_data->kiocb = kiocb;
- io_data->iovec = iovec;
- io_data->nr_segs = nr_segs;
- io_data->len = kiocb->ki_nbytes;
- io_data->mm = current->mm;
+ p->read = false;
+ p->kiocb = kiocb;
+ p->data = *from;
+ p->mm = current->mm;
- kiocb->private = io_data;
+ kiocb->private = p;
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
- return ffs_epfile_io(kiocb->ki_filp, io_data);
+ res = ffs_epfile_io(kiocb->ki_filp, p);
+ if (res == -EIOCBQUEUED)
+ return res;
+ if (p->aio)
+ kfree(p);
+ else
+ *from = p->data;
+ return res;
}
-static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
- const struct iovec *iovec,
- unsigned long nr_segs, loff_t loff)
+static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
- struct ffs_io_data *io_data;
- struct iovec *iovec_copy;
+ struct ffs_io_data io_data, *p = &io_data;
+ ssize_t res;
ENTER();
- iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
- if (unlikely(!iovec_copy))
- return -ENOMEM;
-
- memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
-
- io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
- if (unlikely(!io_data)) {
- kfree(iovec_copy);
- return -ENOMEM;
+ if (!is_sync_kiocb(kiocb)) {
+ p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ if (unlikely(!p))
+ return -ENOMEM;
+ p->aio = true;
+ } else {
+ p->aio = false;
}
- io_data->aio = true;
- io_data->read = true;
- io_data->kiocb = kiocb;
- io_data->iovec = iovec_copy;
- io_data->nr_segs = nr_segs;
- io_data->len = kiocb->ki_nbytes;
- io_data->mm = current->mm;
+ p->read = true;
+ p->kiocb = kiocb;
+ if (p->aio) {
+ p->to_free = dup_iter(&p->data, to, GFP_KERNEL);
+ if (!p->to_free) {
+ kfree(p);
+ return -ENOMEM;
+ }
+ } else {
+ p->data = *to;
+ p->to_free = NULL;
+ }
+ p->mm = current->mm;
- kiocb->private = io_data;
+ kiocb->private = p;
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
- return ffs_epfile_io(kiocb->ki_filp, io_data);
+ res = ffs_epfile_io(kiocb->ki_filp, p);
+ if (res == -EIOCBQUEUED)
+ return res;
+
+ if (p->aio) {
+ kfree(p->to_free);
+ kfree(p);
+ } else {
+ *to = p->data;
+ }
+ return res;
}
static int
@@ -1097,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = {
.llseek = no_llseek,
.open = ffs_epfile_open,
- .write = ffs_epfile_write,
- .read = ffs_epfile_read,
- .aio_write = ffs_epfile_aio_write,
- .aio_read = ffs_epfile_aio_read,
+ .write = new_sync_write,
+ .read = new_sync_read,
+ .write_iter = ffs_epfile_write_iter,
+ .read_iter = ffs_epfile_read_iter,
.release = ffs_epfile_release,
.unlocked_ioctl = ffs_epfile_ioctl,
};
@@ -1180,6 +1142,7 @@ struct ffs_sb_fill_data {
struct ffs_file_perms perms;
umode_t root_mode;
const char *dev_name;
+ bool no_disconnect;
struct ffs_data *ffs_data;
};
@@ -1250,6 +1213,12 @@ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
/* Interpret option */
switch (eq - opts) {
+ case 13:
+ if (!memcmp(opts, "no_disconnect", 13))
+ data->no_disconnect = !!value;
+ else
+ goto invalid;
+ break;
case 5:
if (!memcmp(opts, "rmode", 5))
data->root_mode = (value & 0555) | S_IFDIR;
@@ -1314,6 +1283,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
.gid = GLOBAL_ROOT_GID,
},
.root_mode = S_IFDIR | 0500,
+ .no_disconnect = false,
};
struct dentry *rv;
int ret;
@@ -1330,6 +1300,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
if (unlikely(!ffs))
return ERR_PTR(-ENOMEM);
ffs->file_perms = data.perms;
+ ffs->no_disconnect = data.no_disconnect;
ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
if (unlikely(!ffs->dev_name)) {
@@ -1361,6 +1332,7 @@ ffs_fs_kill_sb(struct super_block *sb)
kill_litter_super(sb);
if (sb->s_fs_info) {
ffs_release_dev(sb->s_fs_info);
+ ffs_data_closed(sb->s_fs_info);
ffs_data_put(sb->s_fs_info);
}
}
@@ -1417,7 +1389,11 @@ static void ffs_data_opened(struct ffs_data *ffs)
ENTER();
atomic_inc(&ffs->ref);
- atomic_inc(&ffs->opened);
+ if (atomic_add_return(1, &ffs->opened) == 1 &&
+ ffs->state == FFS_DEACTIVATED) {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
}
static void ffs_data_put(struct ffs_data *ffs)
@@ -1439,6 +1415,21 @@ static void ffs_data_closed(struct ffs_data *ffs)
ENTER();
if (atomic_dec_and_test(&ffs->opened)) {
+ if (ffs->no_disconnect) {
+ ffs->state = FFS_DEACTIVATED;
+ if (ffs->epfiles) {
+ ffs_epfiles_destroy(ffs->epfiles,
+ ffs->eps_count);
+ ffs->epfiles = NULL;
+ }
+ if (ffs->setup_state == FFS_SETUP_PENDING)
+ __ffs_ep0_stall(ffs);
+ } else {
+ ffs->state = FFS_CLOSING;
+ ffs_data_reset(ffs);
+ }
+ }
+ if (atomic_read(&ffs->opened) < 0) {
ffs->state = FFS_CLOSING;
ffs_data_reset(ffs);
}
@@ -1480,6 +1471,9 @@ static void ffs_data_clear(struct ffs_data *ffs)
if (ffs->epfiles)
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ if (ffs->ffs_eventfd)
+ eventfd_ctx_put(ffs->ffs_eventfd);
+
kfree(ffs->raw_descs_data);
kfree(ffs->raw_strings);
kfree(ffs->stringtabs);
@@ -1581,10 +1575,10 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
mutex_init(&epfile->mutex);
init_waitqueue_head(&epfile->wait);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
- sprintf(epfiles->name, "ep%02x", ffs->eps_addrmap[i]);
+ sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
- sprintf(epfiles->name, "ep%u", i);
- epfile->dentry = ffs_sb_create_file(ffs->sb, epfiles->name,
+ sprintf(epfile->name, "ep%u", i);
+ epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name,
epfile,
&ffs_epfile_operations);
if (unlikely(!epfile->dentry)) {
@@ -1616,7 +1610,6 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
kfree(epfiles);
}
-
static void ffs_func_eps_disable(struct ffs_function *func)
{
struct ffs_ep *ep = func->eps;
@@ -1629,10 +1622,12 @@ static void ffs_func_eps_disable(struct ffs_function *func)
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
- epfile->ep = NULL;
-
++ep;
- ++epfile;
+
+ if (epfile) {
+ epfile->ep = NULL;
+ ++epfile;
+ }
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
@@ -2138,7 +2133,8 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
FUNCTIONFS_HAS_HS_DESC |
FUNCTIONFS_HAS_SS_DESC |
FUNCTIONFS_HAS_MS_OS_DESC |
- FUNCTIONFS_VIRTUAL_ADDR)) {
+ FUNCTIONFS_VIRTUAL_ADDR |
+ FUNCTIONFS_EVENTFD)) {
ret = -ENOSYS;
goto error;
}
@@ -2149,6 +2145,20 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
goto error;
}
+ if (flags & FUNCTIONFS_EVENTFD) {
+ if (len < 4)
+ goto error;
+ ffs->ffs_eventfd =
+ eventfd_ctx_fdget((int)get_unaligned_le32(data));
+ if (IS_ERR(ffs->ffs_eventfd)) {
+ ret = PTR_ERR(ffs->ffs_eventfd);
+ ffs->ffs_eventfd = NULL;
+ goto error;
+ }
+ data += 4;
+ len -= 4;
+ }
+
/* Read fs_count, hs_count and ss_count (if present) */
for (i = 0; i < 3; ++i) {
if (!(flags & (1 << i))) {
@@ -2377,6 +2387,13 @@ static void __ffs_event_add(struct ffs_data *ffs,
if (ffs->setup_state == FFS_SETUP_PENDING)
ffs->setup_state = FFS_SETUP_CANCELLED;
+ /*
+ * Logic of this function guarantees that there are at most four pending
+ * evens on ffs->ev.types queue. This is important because the queue
+ * has space for four elements only and __ffs_ep0_read_events function
+ * depends on that limit as well. If more event types are added, those
+ * limits have to be revisited or guaranteed to still hold.
+ */
switch (type) {
case FUNCTIONFS_RESUME:
rem_type2 = FUNCTIONFS_SUSPEND;
@@ -2416,6 +2433,8 @@ static void __ffs_event_add(struct ffs_data *ffs,
pr_vdebug("adding event %d\n", type);
ffs->ev.types[ffs->ev.count++] = type;
wake_up_locked(&ffs->ev.waitq);
+ if (ffs->ffs_eventfd)
+ eventfd_signal(ffs->ffs_eventfd, 1);
}
static void ffs_event_add(struct ffs_data *ffs,
@@ -2888,6 +2907,13 @@ static int ffs_func_bind(struct usb_configuration *c,
/* Other USB function hooks *************************************************/
+static void ffs_reset_work(struct work_struct *work)
+{
+ struct ffs_data *ffs = container_of(work,
+ struct ffs_data, reset_work);
+ ffs_data_reset(ffs);
+}
+
static int ffs_func_set_alt(struct usb_function *f,
unsigned interface, unsigned alt)
{
@@ -2904,6 +2930,13 @@ static int ffs_func_set_alt(struct usb_function *f,
if (ffs->func)
ffs_func_eps_disable(ffs->func);
+ if (ffs->state == FFS_DEACTIVATED) {
+ ffs->state = FFS_CLOSING;
+ INIT_WORK(&ffs->reset_work, ffs_reset_work);
+ schedule_work(&ffs->reset_work);
+ return -ENODEV;
+ }
+
if (ffs->state != FFS_ACTIVE)
return -ENODEV;