summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@ghostprotocols.net>2005-08-09 20:10:12 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 15:42:36 -0700
commit87d11ceb9deb7a3f13fdee6e89d9bb6be7d27a71 (patch)
tree3b5f6f15a76e6d58e5088cad707859390757181a
parentc676270bcd25015b978722ec0352c330dcc87883 (diff)
downloadlinux-87d11ceb9deb7a3f13fdee6e89d9bb6be7d27a71.tar.gz
linux-87d11ceb9deb7a3f13fdee6e89d9bb6be7d27a71.tar.bz2
linux-87d11ceb9deb7a3f13fdee6e89d9bb6be7d27a71.zip
[SOCK]: Introduce sk_clone
Out of tcp_create_openreq_child, will be used in dccp_create_openreq_child, and is a nice sock function anyway. Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sock.h2
-rw-r--r--net/core/sock.c74
-rw-r--r--net/ipv4/tcp_minisocks.c70
3 files changed, 79 insertions, 67 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index bdae0a5eadf5..828dc082fcb7 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -737,6 +737,8 @@ extern struct sock *sk_alloc(int family,
unsigned int __nocast priority,
struct proto *prot, int zero_it);
extern void sk_free(struct sock *sk);
+extern struct sock *sk_clone(const struct sock *sk,
+ const unsigned int __nocast priority);
extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force,
diff --git a/net/core/sock.c b/net/core/sock.c
index aba31fedf2ac..ccd10fd65682 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -700,6 +700,80 @@ void sk_free(struct sock *sk)
module_put(owner);
}
+struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority)
+{
+ struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0);
+
+ if (newsk != NULL) {
+ struct sk_filter *filter;
+
+ memcpy(newsk, sk, sk->sk_prot->obj_size);
+
+ /* SANITY */
+ sk_node_init(&newsk->sk_node);
+ sock_lock_init(newsk);
+ bh_lock_sock(newsk);
+
+ atomic_set(&newsk->sk_rmem_alloc, 0);
+ atomic_set(&newsk->sk_wmem_alloc, 0);
+ atomic_set(&newsk->sk_omem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_receive_queue);
+ skb_queue_head_init(&newsk->sk_write_queue);
+
+ rwlock_init(&newsk->sk_dst_lock);
+ rwlock_init(&newsk->sk_callback_lock);
+
+ newsk->sk_dst_cache = NULL;
+ newsk->sk_wmem_queued = 0;
+ newsk->sk_forward_alloc = 0;
+ newsk->sk_send_head = NULL;
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+
+ sock_reset_flag(newsk, SOCK_DONE);
+ skb_queue_head_init(&newsk->sk_error_queue);
+
+ filter = newsk->sk_filter;
+ if (filter != NULL)
+ sk_filter_charge(newsk, filter);
+
+ if (unlikely(xfrm_sk_clone_policy(newsk))) {
+ /* It is still raw copy of parent, so invalidate
+ * destructor and make plain sk_free() */
+ newsk->sk_destruct = NULL;
+ sk_free(newsk);
+ newsk = NULL;
+ goto out;
+ }
+
+ newsk->sk_err = 0;
+ newsk->sk_priority = 0;
+ atomic_set(&newsk->sk_refcnt, 2);
+
+ /*
+ * Increment the counter in the same struct proto as the master
+ * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
+ * is the same as sk->sk_prot->socks, as this field was copied
+ * with memcpy).
+ *
+ * This _changes_ the previous behaviour, where
+ * tcp_create_openreq_child always was incrementing the
+ * equivalent to tcp_prot->socks (inet_sock_nr), so this have
+ * to be taken into account in all callers. -acme
+ */
+ sk_refcnt_debug_inc(newsk);
+ newsk->sk_socket = NULL;
+ newsk->sk_sleep = NULL;
+
+ if (newsk->sk_prot->sockets_allocated)
+ atomic_inc(newsk->sk_prot->sockets_allocated);
+ }
+out:
+ return newsk;
+}
+
+EXPORT_SYMBOL_GPL(sk_clone);
+
void __init sk_init(void)
{
if (num_physpages <= 4096) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 66ce1790a94f..8b6cd8d80662 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -599,67 +599,26 @@ out:
*/
struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
{
- /* allocate the newsk from the same slab of the master sock,
- * if not, at sk_free time we'll try to free it from the wrong
- * slabcache (i.e. is it TCPv4 or v6?), this is handled thru sk->sk_prot -acme */
- struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, sk->sk_prot, 0);
+ struct sock *newsk = sk_clone(sk, GFP_ATOMIC);
- if(newsk != NULL) {
+ if (newsk != NULL) {
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_sock *newinet = inet_sk(newsk);
struct tcp_sock *newtp;
- struct sk_filter *filter;
- memcpy(newsk, sk, sizeof(struct tcp_sock));
newsk->sk_state = TCP_SYN_RECV;
-
- /* SANITY */
- sk_node_init(&newsk->sk_node);
newinet->bind_hash = NULL;
/* Clone the TCP header template */
newinet->dport = ireq->rmt_port;
-
- sock_lock_init(newsk);
- bh_lock_sock(newsk);
-
- rwlock_init(&newsk->sk_dst_lock);
- newsk->sk_dst_cache = NULL;
- atomic_set(&newsk->sk_rmem_alloc, 0);
- skb_queue_head_init(&newsk->sk_receive_queue);
- atomic_set(&newsk->sk_wmem_alloc, 0);
- skb_queue_head_init(&newsk->sk_write_queue);
- atomic_set(&newsk->sk_omem_alloc, 0);
- newsk->sk_wmem_queued = 0;
- newsk->sk_forward_alloc = 0;
-
- sock_reset_flag(newsk, SOCK_DONE);
- newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
- newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
- newsk->sk_send_head = NULL;
- rwlock_init(&newsk->sk_callback_lock);
- skb_queue_head_init(&newsk->sk_error_queue);
newsk->sk_write_space = sk_stream_write_space;
- if ((filter = newsk->sk_filter) != NULL)
- sk_filter_charge(newsk, filter);
-
- if (unlikely(xfrm_sk_clone_policy(newsk))) {
- /* It is still raw copy of parent, so invalidate
- * destructor and make plain sk_free() */
- newsk->sk_destruct = NULL;
- sk_free(newsk);
- return NULL;
- }
-
/* Now setup tcp_sock */
newtp = tcp_sk(newsk);
newtp->pred_flags = 0;
newtp->rcv_nxt = treq->rcv_isn + 1;
- newtp->snd_nxt = treq->snt_isn + 1;
- newtp->snd_una = treq->snt_isn + 1;
- newtp->snd_sml = treq->snt_isn + 1;
+ newtp->snd_nxt = newtp->snd_una = newtp->snd_sml = treq->snt_isn + 1;
tcp_prequeue_init(newtp);
@@ -710,32 +669,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
/* Deinitialize accept_queue to trap illegal accesses. */
memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
- /* Back to base struct sock members. */
- newsk->sk_err = 0;
- newsk->sk_priority = 0;
- atomic_set(&newsk->sk_refcnt, 2);
-
- /*
- * Increment the counter in the same struct proto as the master
- * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
- * is the same as sk->sk_prot->socks, as this field was copied
- * with memcpy), same rationale as the first comment in this
- * function.
- *
- * This _changes_ the previous behaviour, where
- * tcp_create_openreq_child always was incrementing the
- * equivalent to tcp_prot->socks (inet_sock_nr), so this have
- * to be taken into account in all callers. -acme
- */
- sk_refcnt_debug_inc(newsk);
-
- atomic_inc(&tcp_sockets_allocated);
-
if (sock_flag(newsk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
- newsk->sk_socket = NULL;
- newsk->sk_sleep = NULL;
newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {