summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorJean Delvare <jdelvare@suse.de>2007-04-26 00:44:22 -0700
committerDavid S. Miller <davem@davemloft.net>2007-04-26 00:44:22 -0700
commiteefa3906283a2b60a6d02a2cda593a7d7d7946c5 (patch)
treea4e1f3b8dca04b8dff3cd99dc43f771f798558fb /net/core
parent28d8909bc790d936ce33f4402adf7577533bbd4b (diff)
downloadlinux-eefa3906283a2b60a6d02a2cda593a7d7d7946c5.tar.gz
linux-eefa3906283a2b60a6d02a2cda593a7d7d7946c5.tar.bz2
linux-eefa3906283a2b60a6d02a2cda593a7d7d7946c5.zip
[NET]: Clean up sk_buff walkers.
I noticed recently that, in skb_checksum(), "offset" and "start" are essentially the same thing and have the same value throughout the function, despite being computed differently. Using a single variable allows some cleanups and makes the skb_checksum() function smaller, more readable, and presumably marginally faster. We appear to have many other "sk_buff walker" functions built on the exact same model, so the cleanup applies to them, too. Here is a list of the functions I found to be affected: net/appletalk/ddp.c:atalk_sum_skb() net/core/datagram.c:skb_copy_datagram_iovec() net/core/datagram.c:skb_copy_and_csum_datagram() net/core/skbuff.c:skb_copy_bits() net/core/skbuff.c:skb_store_bits() net/core/skbuff.c:skb_checksum() net/core/skbuff.c:skb_copy_and_csum_bit() net/core/user_dma.c:dma_skb_copy_datagram_iovec() net/xfrm/xfrm_algo.c:skb_icv_walk() net/xfrm/xfrm_algo.c:skb_to_sgvec() OTOH, I admit I'm a bit surprised, the cleanup is rather obvious so I'm really wondering if I am missing something. Can anyone please comment on this? Signed-off-by: Jean Delvare <jdelvare@suse.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c50
-rw-r--r--net/core/skbuff.c122
-rw-r--r--net/core/user_dma.c25
3 files changed, 69 insertions, 128 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cb056f476126..e1afa7679445 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -247,8 +247,8 @@ EXPORT_SYMBOL(skb_kill_datagram);
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
struct iovec *to, int len)
{
- int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int end = skb_headlen(skb);
+ int i, copy = end - offset;
/* Copy header. */
if (copy > 0) {
@@ -263,11 +263,9 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
+ BUG_TRAP(len >= 0);
- BUG_TRAP(start <= offset + len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = offset + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
@@ -277,8 +275,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
if (copy > len)
copy = len;
vaddr = kmap(page);
- err = memcpy_toiovec(to, vaddr + frag->page_offset +
- offset - start, copy);
+ err = memcpy_toiovec(to, vaddr + frag->page_offset,
+ copy);
kunmap(page);
if (err)
goto fault;
@@ -286,30 +284,24 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
return 0;
offset += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + list->len;
+ end = offset + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_datagram_iovec(list,
- offset - start,
- to, copy))
+ if (skb_copy_datagram_iovec(list, 0, to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
}
- start = end;
}
}
if (!len)
@@ -323,9 +315,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
{
- int start = skb_headlen(skb);
+ int end = skb_headlen(skb);
int pos = 0;
- int i, copy = start - offset;
+ int i, copy = end - offset;
/* Copy header. */
if (copy > 0) {
@@ -344,11 +336,9 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
+ BUG_TRAP(len >= 0);
- BUG_TRAP(start <= offset + len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = offset + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
__wsum csum2;
int err = 0;
@@ -360,8 +350,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
copy = len;
vaddr = kmap(page);
csum2 = csum_and_copy_to_user(vaddr +
- frag->page_offset +
- offset - start,
+ frag->page_offset,
to, copy, 0, &err);
kunmap(page);
if (err)
@@ -373,24 +362,20 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
to += copy;
pos += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list=list->next) {
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + list->len;
+ end = offset + list->len;
if ((copy = end - offset) > 0) {
__wsum csum2 = 0;
if (copy > len)
copy = len;
- if (skb_copy_and_csum_datagram(list,
- offset - start,
+ if (skb_copy_and_csum_datagram(list, 0,
to, copy,
&csum2))
goto fault;
@@ -401,7 +386,6 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
to += copy;
pos += copy;
}
- start = end;
}
}
if (!len)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 142257307fa2..32f087b5233e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1045,13 +1045,13 @@ pull_pages:
int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
{
int i, copy;
- int start = skb_headlen(skb);
+ int end = skb_headlen(skb);
if (offset > (int)skb->len - len)
goto fault;
/* Copy header. */
- if ((copy = start - offset) > 0) {
+ if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_copy_from_linear_data_offset(skb, offset, to, copy);
@@ -1062,11 +1062,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
+ BUG_TRAP(len >= 0);
- BUG_TRAP(start <= offset + len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = offset + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
u8 *vaddr;
@@ -1075,8 +1073,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
memcpy(to,
- vaddr + skb_shinfo(skb)->frags[i].page_offset+
- offset - start, copy);
+ vaddr + skb_shinfo(skb)->frags[i].page_offset,
+ copy);
kunmap_skb_frag(vaddr);
if ((len -= copy) == 0)
@@ -1084,30 +1082,25 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
offset += copy;
to += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + list->len;
+ end = offset + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_bits(list, offset - start,
- to, copy))
+ if (skb_copy_bits(list, 0, to, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
to += copy;
}
- start = end;
}
}
if (!len)
@@ -1132,12 +1125,12 @@ fault:
int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
{
int i, copy;
- int start = skb_headlen(skb);
+ int end = skb_headlen(skb);
if (offset > (int)skb->len - len)
goto fault;
- if ((copy = start - offset) > 0) {
+ if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
skb_copy_to_linear_data_offset(skb, offset, from, copy);
@@ -1149,11 +1142,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + frag->size;
+ end = offset + frag->size;
if ((copy = end - offset) > 0) {
u8 *vaddr;
@@ -1161,8 +1152,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
copy = len;
vaddr = kmap_skb_frag(frag);
- memcpy(vaddr + frag->page_offset + offset - start,
- from, copy);
+ memcpy(vaddr + frag->page_offset, from, copy);
kunmap_skb_frag(vaddr);
if ((len -= copy) == 0)
@@ -1170,30 +1160,25 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
offset += copy;
from += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
- int end;
+ BUG_TRAP(len >= 0);
- BUG_TRAP(start <= offset + len);
-
- end = start + list->len;
+ end = offset + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_store_bits(list, offset - start,
- from, copy))
+ if (skb_store_bits(list, 0, from, copy))
goto fault;
if ((len -= copy) == 0)
return 0;
offset += copy;
from += copy;
}
- start = end;
}
}
if (!len)
@@ -1210,8 +1195,8 @@ EXPORT_SYMBOL(skb_store_bits);
__wsum skb_checksum(const struct sk_buff *skb, int offset,
int len, __wsum csum)
{
- int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int end = skb_headlen(skb);
+ int i, copy = end - offset;
int pos = 0;
/* Checksum header. */
@@ -1226,11 +1211,9 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = offset + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
@@ -1239,8 +1222,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
if (copy > len)
copy = len;
vaddr = kmap_skb_frag(frag);
- csum2 = csum_partial(vaddr + frag->page_offset +
- offset - start, copy, 0);
+ csum2 = csum_partial(vaddr + frag->page_offset,
+ copy, 0);
kunmap_skb_frag(vaddr);
csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy))
@@ -1248,31 +1231,26 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
offset += copy;
pos += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
- int end;
+ BUG_TRAP(len >= 0);
- BUG_TRAP(start <= offset + len);
-
- end = start + list->len;
+ end = offset + list->len;
if ((copy = end - offset) > 0) {
__wsum csum2;
if (copy > len)
copy = len;
- csum2 = skb_checksum(list, offset - start,
- copy, 0);
+ csum2 = skb_checksum(list, 0, copy, 0);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
return csum;
offset += copy;
pos += copy;
}
- start = end;
}
}
BUG_ON(len);
@@ -1285,8 +1263,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
u8 *to, int len, __wsum csum)
{
- int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int end = skb_headlen(skb);
+ int i, copy = end - offset;
int pos = 0;
/* Copy header. */
@@ -1303,11 +1281,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = offset + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
@@ -1317,9 +1293,8 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
copy = len;
vaddr = kmap_skb_frag(frag);
csum2 = csum_partial_copy_nocheck(vaddr +
- frag->page_offset +
- offset - start, to,
- copy, 0);
+ frag->page_offset,
+ to, copy, 0);
kunmap_skb_frag(vaddr);
csum = csum_block_add(csum, csum2, pos);
if (!(len -= copy))
@@ -1328,7 +1303,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
to += copy;
pos += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
@@ -1336,16 +1310,13 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
for (; list; list = list->next) {
__wsum csum2;
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + list->len;
+ end = offset + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- csum2 = skb_copy_and_csum_bits(list,
- offset - start,
+ csum2 = skb_copy_and_csum_bits(list, 0,
to, copy, 0);
csum = csum_block_add(csum, csum2, pos);
if ((len -= copy) == 0)
@@ -1354,7 +1325,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
to += copy;
pos += copy;
}
- start = end;
}
}
BUG_ON(len);
@@ -2026,8 +1996,8 @@ void __init skb_init(void)
int
skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
- int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int end = skb_headlen(skb);
+ int i, copy = end - offset;
int elt = 0;
if (copy > 0) {
@@ -2043,45 +2013,39 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
+ BUG_TRAP(len >= 0);
- BUG_TRAP(start <= offset + len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = offset + skb_shinfo(skb)->frags[i].size;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
sg[elt].page = frag->page;
- sg[elt].offset = frag->page_offset+offset-start;
+ sg[elt].offset = frag->page_offset;
sg[elt].length = copy;
elt++;
if (!(len -= copy))
return elt;
offset += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + list->len;
+ end = offset + list->len;
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
+ elt += skb_to_sgvec(list, sg+elt, 0, copy);
if ((len -= copy) == 0)
return elt;
offset += copy;
}
- start = end;
}
}
BUG_ON(len);
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 0ad1cd57bc39..89241cdeea3f 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -49,8 +49,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
struct sk_buff *skb, int offset, struct iovec *to,
size_t len, struct dma_pinned_list *pinned_list)
{
- int start = skb_headlen(skb);
- int i, copy = start - offset;
+ int end = skb_headlen(skb);
+ int i, copy = end - offset;
dma_cookie_t cookie = 0;
/* Copy header. */
@@ -69,11 +69,9 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
+ BUG_TRAP(len >= 0);
- BUG_TRAP(start <= offset + len);
-
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = offset + skb_shinfo(skb)->frags[i].size;
copy = end - offset;
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -82,8 +80,8 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
if (copy > len)
copy = len;
- cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
- frag->page_offset + offset - start, copy);
+ cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list,
+ page, frag->page_offset, copy);
if (cookie < 0)
goto fault;
len -= copy;
@@ -91,25 +89,21 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
goto end;
offset += copy;
}
- start = end;
}
if (skb_shinfo(skb)->frag_list) {
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list; list = list->next) {
- int end;
-
- BUG_TRAP(start <= offset + len);
+ BUG_TRAP(len >= 0);
- end = start + list->len;
+ end = offset + list->len;
copy = end - offset;
if (copy > 0) {
if (copy > len)
copy = len;
cookie = dma_skb_copy_datagram_iovec(chan, list,
- offset - start, to, copy,
- pinned_list);
+ 0, to, copy, pinned_list);
if (cookie < 0)
goto fault;
len -= copy;
@@ -117,7 +111,6 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
goto end;
offset += copy;
}
- start = end;
}
}