diff options
author | Bob Peterson <rpeterso@redhat.com> | 2010-11-12 11:15:20 -0600 |
---|---|---|
committer | David Teigland <teigland@redhat.com> | 2010-11-12 11:15:20 -0600 |
commit | f92c8dd7a0eb18124521e2b549f88422e17f707b (patch) | |
tree | a93e644b31f9c8860a6dd513abfc0005bb602c0f /fs/dlm/lowcomms.c | |
parent | cb2d45da81c86d5191b19d0f67732a854bc0253c (diff) | |
download | linux-f92c8dd7a0eb18124521e2b549f88422e17f707b.tar.gz linux-f92c8dd7a0eb18124521e2b549f88422e17f707b.tar.bz2 linux-f92c8dd7a0eb18124521e2b549f88422e17f707b.zip |
dlm: reduce cond_resched during send
Calling cond_resched() after every send can unnecessarily
degrade performance. Go back to an old method of scheduling
after 25 messages.
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/lowcomms.c')
-rw-r--r-- | fs/dlm/lowcomms.c | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 2bedb0ac5f92..0e75f152eac2 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -63,6 +63,9 @@ #define NEEDED_RMEM (4*1024*1024) #define CONN_HASH_SIZE 32 +/* Number of messages to send before rescheduling */ +#define MAX_SEND_MSG_COUNT 25 + struct cbuf { unsigned int base; unsigned int len; @@ -1318,6 +1321,7 @@ static void send_to_sock(struct connection *con) const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; struct writequeue_entry *e; int len, offset; + int count = 0; mutex_lock(&con->sock_mutex); if (con->sock == NULL) @@ -1355,8 +1359,12 @@ static void send_to_sock(struct connection *con) if (ret <= 0) goto send_error; } - /* Don't starve people filling buffers */ + + /* Don't starve people filling buffers */ + if (++count >= MAX_SEND_MSG_COUNT) { cond_resched(); + count = 0; + } spin_lock(&con->writequeue_lock); e->offset += ret; |