summaryrefslogtreecommitdiffstats
path: root/net/unix
diff options
context:
space:
mode:
authorKuniyuki Iwashima <kuniyu@amazon.co.jp>2021-11-24 11:14:29 +0900
committerJakub Kicinski <kuba@kernel.org>2021-11-26 18:01:57 -0800
commite6b4b873896f0e9298f70d25726f4bb1e1b265ba (patch)
tree28cc19f5e0cb930e261ec0ab463c2232370f4dd2 /net/unix
parentf452be496a5c8f58b1a67cde79e89b9f1cfde31c (diff)
downloadlinux-e6b4b873896f0e9298f70d25726f4bb1e1b265ba.tar.gz
linux-e6b4b873896f0e9298f70d25726f4bb1e1b265ba.tar.bz2
linux-e6b4b873896f0e9298f70d25726f4bb1e1b265ba.zip
af_unix: Save hash in sk_hash.
To replace unix_table_lock with per-hash locks in the next patch, we need to save a hash in each socket because /proc/net/unix or BPF prog iterate sockets while holding a hash table lock and release it later in a different function. Currently, we store a real/pseudo hash in struct unix_address. However, we do not allocate it to unbound sockets, nor should we do just for that. For this purpose, we can use sk_hash. Then, we no longer use the hash field in struct unix_address and can remove it. Also, this patch does - rename unix_insert_socket() to unix_insert_unbound_socket() - remove the redundant list argument from __unix_insert_socket() and unix_insert_unbound_socket() - use 'unsigned int' instead of 'unsigned' in __unix_set_addr_hash() - remove 'inline' from unix_remove_socket() and unix_insert_unbound_socket(). Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/unix')
-rw-r--r--net/unix/af_unix.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 076d0297450d..bd9fbfe0e7bb 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -280,31 +280,33 @@ static void __unix_remove_socket(struct sock *sk)
sk_del_node_init(sk);
}
-static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
+static void __unix_insert_socket(struct sock *sk)
{
WARN_ON(!sk_unhashed(sk));
- sk_add_node(sk, list);
+ sk_add_node(sk, &unix_socket_table[sk->sk_hash]);
}
-static void __unix_set_addr(struct sock *sk, struct unix_address *addr,
- unsigned hash)
+static void __unix_set_addr_hash(struct sock *sk, struct unix_address *addr,
+ unsigned int hash)
{
__unix_remove_socket(sk);
smp_store_release(&unix_sk(sk)->addr, addr);
- __unix_insert_socket(&unix_socket_table[hash], sk);
+
+ sk->sk_hash = hash;
+ __unix_insert_socket(sk);
}
-static inline void unix_remove_socket(struct sock *sk)
+static void unix_remove_socket(struct sock *sk)
{
spin_lock(&unix_table_lock);
__unix_remove_socket(sk);
spin_unlock(&unix_table_lock);
}
-static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
+static void unix_insert_unbound_socket(struct sock *sk)
{
spin_lock(&unix_table_lock);
- __unix_insert_socket(list, sk);
+ __unix_insert_socket(sk);
spin_unlock(&unix_table_lock);
}
@@ -893,6 +895,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sock_init_data(sock, sk);
+ sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
@@ -908,7 +911,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
memset(&u->scm_stat, 0, sizeof(struct scm_stat));
- unix_insert_socket(&unix_socket_table[unix_unbound_hash(sk)], sk);
+ unix_insert_unbound_socket(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
@@ -1054,6 +1057,7 @@ static int unix_autobind(struct sock *sk)
struct unix_address *addr;
unsigned int retries = 0;
static u32 ordernum = 1;
+ unsigned int new_hash;
int err;
err = mutex_lock_interruptible(&u->bindlock);
@@ -1075,13 +1079,13 @@ static int unix_autobind(struct sock *sk)
retry:
addr->len = sprintf(addr->name->sun_path + 1, "%05x", ordernum) +
offsetof(struct sockaddr_un, sun_path) + 1;
- addr->hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+ new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
spin_lock(&unix_table_lock);
ordernum = (ordernum+1)&0xFFFFF;
if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
- addr->hash)) {
+ new_hash)) {
spin_unlock(&unix_table_lock);
/*
* __unix_find_socket_byname() may take long time if many names
@@ -1097,7 +1101,7 @@ retry:
goto retry;
}
- __unix_set_addr(sk, addr, addr->hash);
+ __unix_set_addr_hash(sk, addr, new_hash);
spin_unlock(&unix_table_lock);
err = 0;
@@ -1113,9 +1117,9 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
struct unix_sock *u = unix_sk(sk);
struct user_namespace *ns; // barf...
struct unix_address *addr;
+ unsigned int new_hash;
struct dentry *dentry;
struct path parent;
- unsigned int hash;
int err;
unix_mkname_bsd(sunaddr, addr_len);
@@ -1151,12 +1155,11 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
if (u->addr)
goto out_unlock;
- addr->hash = UNIX_HASH_SIZE;
- hash = unix_bsd_hash(d_backing_inode(dentry));
+ new_hash = unix_bsd_hash(d_backing_inode(dentry));
spin_lock(&unix_table_lock);
u->path.mnt = mntget(parent.mnt);
u->path.dentry = dget(dentry);
- __unix_set_addr(sk, addr, hash);
+ __unix_set_addr_hash(sk, addr, new_hash);
spin_unlock(&unix_table_lock);
mutex_unlock(&u->bindlock);
done_path_create(&parent, dentry);
@@ -1180,6 +1183,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
{
struct unix_sock *u = unix_sk(sk);
struct unix_address *addr;
+ unsigned int new_hash;
int err;
addr = unix_create_addr(sunaddr, addr_len);
@@ -1195,14 +1199,14 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
goto out_mutex;
}
- addr->hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+ new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
spin_lock(&unix_table_lock);
if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
- addr->hash))
+ new_hash))
goto out_spin;
- __unix_set_addr(sk, addr, addr->hash);
+ __unix_set_addr_hash(sk, addr, new_hash);
spin_unlock(&unix_table_lock);
mutex_unlock(&u->bindlock);
return 0;