summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorHongbo Li <lihongbo22@huawei.com>2024-08-27 17:52:18 +0800
committerAnna Schumaker <anna.schumaker@oracle.com>2024-09-23 15:03:13 -0400
commit64a3ab99675ef7b833f64cd3b84db66b6a351529 (patch)
tree08c6a4cf8d6ae31a60e6a1fe0063861f5ccb8974 /net/sunrpc
parent2e001972e8ebc318de3b5542887ac06ea309aff8 (diff)
downloadlinux-stable-64a3ab99675ef7b833f64cd3b84db66b6a351529.tar.gz
linux-stable-64a3ab99675ef7b833f64cd3b84db66b6a351529.tar.bz2
linux-stable-64a3ab99675ef7b833f64cd3b84db66b6a351529.zip
net/sunrpc: make use of the helper macro LIST_HEAD()
list_head can be initialized automatically with LIST_HEAD() instead of calling INIT_LIST_HEAD(). Here we can simplify the code. Signed-off-by: Hongbo Li <lihongbo22@huawei.com> Signed-off-by: Anna Schumaker <anna.schumaker@oracle.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/cache.c10
1 files changed, 3 insertions, 7 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 95ff74706104..4f31e73dc34d 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -731,11 +731,10 @@ static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
static void cache_revisit_request(struct cache_head *item)
{
struct cache_deferred_req *dreq;
- struct list_head pending;
struct hlist_node *tmp;
int hash = DFR_HASH(item);
+ LIST_HEAD(pending);
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
@@ -756,10 +755,8 @@ static void cache_revisit_request(struct cache_head *item)
void cache_clean_deferred(void *owner)
{
struct cache_deferred_req *dreq, *tmp;
- struct list_head pending;
+ LIST_HEAD(pending);
-
- INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
@@ -1085,9 +1082,8 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
{
struct cache_queue *cq, *tmp;
struct cache_request *cr;
- struct list_head dequeued;
+ LIST_HEAD(dequeued);
- INIT_LIST_HEAD(&dequeued);
spin_lock(&queue_lock);
list_for_each_entry_safe(cq, tmp, &detail->queue, list)
if (!cq->reader) {