summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sunrpc/cache.h3
-rw-r--r--net/sunrpc/cache.c59
-rw-r--r--net/sunrpc/svc_xprt.c11
3 files changed, 72 insertions, 1 deletions
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index ece432b7f87f..52a7d7224e90 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -125,6 +125,9 @@ struct cache_detail {
*/
struct cache_req {
struct cache_deferred_req *(*defer)(struct cache_req *req);
+ int thread_wait; /* How long (jiffies) we can block the
+ * current thread to wait for updates.
+ */
};
/* this must be embedded in a deferred_request that is being
* delayed awaiting cache-fill
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 8dc121955fdc..2c5297f245b4 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -509,10 +509,22 @@ static LIST_HEAD(cache_defer_list);
static struct list_head cache_defer_hash[DFR_HASHSIZE];
static int cache_defer_cnt;
+struct thread_deferred_req {
+ struct cache_deferred_req handle;
+ struct completion completion;
+};
+static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
+{
+ struct thread_deferred_req *dr =
+ container_of(dreq, struct thread_deferred_req, handle);
+ complete(&dr->completion);
+}
+
static int cache_defer_req(struct cache_req *req, struct cache_head *item)
{
struct cache_deferred_req *dreq, *discard;
int hash = DFR_HASH(item);
+ struct thread_deferred_req sleeper;
if (cache_defer_cnt >= DFR_MAX) {
/* too much in the cache, randomly drop this one,
@@ -521,7 +533,15 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
if (net_random()&1)
return -ENOMEM;
}
- dreq = req->defer(req);
+ if (req->thread_wait) {
+ dreq = &sleeper.handle;
+ sleeper.completion =
+ COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
+ dreq->revisit = cache_restart_thread;
+ } else
+ dreq = req->defer(req);
+
+ retry:
if (dreq == NULL)
return -ENOMEM;
@@ -555,6 +575,43 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
cache_revisit_request(item);
return -EAGAIN;
}
+
+ if (dreq == &sleeper.handle) {
+ if (wait_for_completion_interruptible_timeout(
+ &sleeper.completion, req->thread_wait) <= 0) {
+ /* The completion wasn't completed, so we need
+ * to clean up
+ */
+ spin_lock(&cache_defer_lock);
+ if (!list_empty(&sleeper.handle.hash)) {
+ list_del_init(&sleeper.handle.recent);
+ list_del_init(&sleeper.handle.hash);
+ cache_defer_cnt--;
+ spin_unlock(&cache_defer_lock);
+ } else {
+ /* cache_revisit_request already removed
+ * this from the hash table, but hasn't
+ * called ->revisit yet. It will very soon
+ * and we need to wait for it.
+ */
+ spin_unlock(&cache_defer_lock);
+ wait_for_completion(&sleeper.completion);
+ }
+ }
+ if (test_bit(CACHE_PENDING, &item->flags)) {
+ /* item is still pending, try request
+ * deferral
+ */
+ dreq = req->defer(req);
+ goto retry;
+ }
+ /* only return success if we actually deferred the
+ * request. In this case we waited until it was
+ * answered so no deferral has happened - rather
+ * an answer already exists.
+ */
+ return -EEXIST;
+ }
return 0;
}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index cbc084939dd8..8ff6840866fa 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -651,6 +651,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (signalled() || kthread_should_stop())
return -EINTR;
+ /* Normally we will wait up to 5 seconds for any required
+ * cache information to be provided.
+ */
+ rqstp->rq_chandle.thread_wait = 5*HZ;
+
spin_lock_bh(&pool->sp_lock);
xprt = svc_xprt_dequeue(pool);
if (xprt) {
@@ -658,6 +663,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+ /* As there is a shortage of threads and this request
+ * had to be queue, don't allow the thread to wait so
+ * long for cache updates.
+ */
+ rqstp->rq_chandle.thread_wait = 1*HZ;
} else {
/* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp);