summaryrefslogtreecommitdiffstats
path: root/drivers/char/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/mem.c')
-rw-r--r--drivers/char/mem.c161
1 files changed, 59 insertions, 102 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index fba76fb55abf..be832b6f8279 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -34,6 +34,16 @@
# include <linux/efi.h>
#endif
+static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+{
+ unsigned long sz;
+
+ sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
+
+ return min(sz, size);
+}
+
/*
* Architectures vary in how they handle caching for addresses
* outside of main memory.
@@ -126,9 +136,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
- sz = PAGE_SIZE - p;
- if (sz > count)
- sz = count;
+ sz = size_inside_page(p, count);
if (sz > 0) {
if (clear_user(buf, sz))
return -EFAULT;
@@ -141,15 +149,9 @@ static ssize_t read_mem(struct file * file, char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
+ unsigned long remaining;
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
return -EPERM;
@@ -163,12 +165,10 @@ static ssize_t read_mem(struct file * file, char __user * buf,
if (!ptr)
return -EFAULT;
- if (copy_to_user(buf, ptr, sz)) {
- unxlate_dev_mem_ptr(p, ptr);
- return -EFAULT;
- }
-
+ remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
buf += sz;
p += sz;
@@ -196,9 +196,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE - p;
- if (sz > count)
- sz = count;
+ sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
@@ -208,15 +206,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
@@ -234,16 +224,14 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
}
copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
if (copied) {
written += sz - copied;
- unxlate_dev_mem_ptr(p, ptr);
if (written)
break;
return -EFAULT;
}
- unxlate_dev_mem_ptr(p, ptr);
-
buf += sz;
p += sz;
count -= sz;
@@ -417,27 +405,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
if (p < PAGE_SIZE && low_count > 0) {
- size_t tmp = PAGE_SIZE - p;
- if (tmp > low_count) tmp = low_count;
- if (clear_user(buf, tmp))
+ sz = size_inside_page(p, low_count);
+ if (clear_user(buf, sz))
return -EFAULT;
- buf += tmp;
- p += tmp;
- read += tmp;
- low_count -= tmp;
- count -= tmp;
+ buf += sz;
+ p += sz;
+ read += sz;
+ low_count -= sz;
+ count -= sz;
}
#endif
while (low_count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, low_count);
+ sz = size_inside_page(p, low_count);
/*
* On ia64 if a page has been mapped somewhere as
@@ -461,21 +440,18 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
if (!kbuf)
return -ENOMEM;
while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len = vread(kbuf, (char *)p, len);
- if (!len)
+ sz = size_inside_page(p, count);
+ sz = vread(kbuf, (char *)p, sz);
+ if (!sz)
break;
- if (copy_to_user(buf, kbuf, len)) {
+ if (copy_to_user(buf, kbuf, sz)) {
free_page((unsigned long)kbuf);
return -EFAULT;
}
- count -= len;
- buf += len;
- read += len;
- p += len;
+ count -= sz;
+ buf += sz;
+ read += sz;
+ p += sz;
}
free_page((unsigned long)kbuf);
}
@@ -485,7 +461,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
static inline ssize_t
-do_write_kmem(void *p, unsigned long realp, const char __user * buf,
+do_write_kmem(unsigned long p, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t written, sz;
@@ -494,14 +470,11 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
/* we don't have page 0 mapped on sparc and m68k.. */
- if (realp < PAGE_SIZE) {
- unsigned long sz = PAGE_SIZE - realp;
- if (sz > count)
- sz = count;
+ if (p < PAGE_SIZE) {
+ sz = size_inside_page(p, count);
/* Hmm. Do something? */
buf += sz;
p += sz;
- realp += sz;
count -= sz;
written += sz;
}
@@ -509,22 +482,15 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
while (count > 0) {
char *ptr;
- /*
- * Handle first page in case it's not aligned
- */
- if (-realp & (PAGE_SIZE - 1))
- sz = -realp & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur
*/
- ptr = xlate_dev_kmem_ptr(p);
+ ptr = xlate_dev_kmem_ptr((char *)p);
copied = copy_from_user(ptr, buf, sz);
if (copied) {
@@ -535,7 +501,6 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
}
buf += sz;
p += sz;
- realp += sz;
count -= sz;
written += sz;
}
@@ -554,19 +519,14 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
unsigned long p = *ppos;
ssize_t wrote = 0;
ssize_t virtr = 0;
- ssize_t written;
char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
if (p < (unsigned long) high_memory) {
-
- wrote = count;
- if (count > (unsigned long) high_memory - p)
- wrote = (unsigned long) high_memory - p;
-
- written = do_write_kmem((void*)p, p, buf, wrote, ppos);
- if (written != wrote)
- return written;
- wrote = written;
+ unsigned long to_write = min_t(unsigned long, count,
+ (unsigned long)high_memory - p);
+ wrote = do_write_kmem(p, buf, to_write, ppos);
+ if (wrote != to_write)
+ return wrote;
p += wrote;
buf += wrote;
count -= wrote;
@@ -577,24 +537,21 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
if (!kbuf)
return wrote ? wrote : -ENOMEM;
while (count > 0) {
- int len = count;
-
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- if (len) {
- written = copy_from_user(kbuf, buf, len);
- if (written) {
- if (wrote + virtr)
- break;
- free_page((unsigned long)kbuf);
- return -EFAULT;
- }
+ unsigned long sz = size_inside_page(p, count);
+ unsigned long n;
+
+ n = copy_from_user(kbuf, buf, sz);
+ if (n) {
+ if (wrote + virtr)
+ break;
+ free_page((unsigned long)kbuf);
+ return -EFAULT;
}
- len = vwrite(kbuf, (char *)p, len);
- count -= len;
- buf += len;
- virtr += len;
- p += len;
+ sz = vwrite(kbuf, (char *)p, sz);
+ count -= sz;
+ buf += sz;
+ virtr += sz;
+ p += sz;
}
free_page((unsigned long)kbuf);
}