From 5d927eb0101eb791fb2d4f72b49a2da5faf01941 Mon Sep 17 00:00:00 2001 From: Harald Welte Date: Wed, 22 Jun 2005 12:37:50 -0700 Subject: [NETFILTER]: Fix handling of ICMP packets (RELATED) in ipt_CLUSTERIP target. Signed-off-by: Harald Welte Signed-off-by: David S. Miller --- net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index dc4362b57cfa..9cde8c61f525 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -339,7 +339,7 @@ target(struct sk_buff **pskb, * error messages (RELATED) and information requests (see below) */ if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP && (ctinfo == IP_CT_RELATED - || ctinfo == IP_CT_IS_REPLY+IP_CT_IS_REPLY)) + || ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY)) return IPT_CONTINUE; /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, -- cgit v1.2.3 From f31f5f051269746179b01017fc5e3dcf6b37c67e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 22 Jun 2005 14:32:51 -0700 Subject: [NET]: dont use strlen() but the result from a prior sprintf() Small patch to save an unecessary call to strlen() : sprintf() gave us the length, just trust it. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/socket.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/socket.c b/net/socket.c index 38729af09461..6f2a17881972 100644 --- a/net/socket.c +++ b/net/socket.c @@ -383,9 +383,8 @@ int sock_map_fd(struct socket *sock) goto out; } - sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); + this.len = sprintf(name, "[%lu]", SOCK_INODE(sock)->i_ino); this.name = name; - this.len = strlen(name); this.hash = SOCK_INODE(sock)->i_ino; file->f_dentry = d_alloc(sock_mnt->mnt_sb->s_root, &this); -- cgit v1.2.3 From 115c1d6e61b70851d9a363328c3b8d4c2559a1d3 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Wed, 22 Jun 2005 22:05:31 -0700 Subject: [NETPOLL]: Introduce a netpoll_info struct This patch introduces a netpoll_info structure, which the struct net_device will now point to instead of pointing to a struct netpoll. The reason for this is two-fold: 1) fields such as the rx_flags, poll_owner, and poll_lock should be maintained per net_device, not per netpoll; and 2) this is a first step in providing support for multiple netpoll clients to register against the same net_device. The struct netpoll is now pointed to by the netpoll_info structure. As such, the previous behaviour of the code is preserved. Signed-off-by: Jeff Moyer Signed-off-by: David S. Miller --- net/core/netpoll.c | 57 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 19 deletions(-) (limited to 'net') diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a119696d5521..ab3c0c9713b0 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -130,19 +130,20 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, */ static void poll_napi(struct netpoll *np) { + struct netpoll_info *npinfo = np->dev->npinfo; int budget = 16; if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && - np->poll_owner != smp_processor_id() && - spin_trylock(&np->poll_lock)) { - np->rx_flags |= NETPOLL_RX_DROP; + npinfo->poll_owner != smp_processor_id() && + spin_trylock(&npinfo->poll_lock)) { + npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); np->dev->poll(np->dev, &budget); atomic_dec(&trapped); - np->rx_flags &= ~NETPOLL_RX_DROP; - spin_unlock(&np->poll_lock); + npinfo->rx_flags &= ~NETPOLL_RX_DROP; + spin_unlock(&npinfo->poll_lock); } } @@ -245,6 +246,7 @@ repeat: static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { int status; + struct netpoll_info *npinfo; repeat: if(!np || !np->dev || !netif_running(np->dev)) { @@ -253,8 +255,9 @@ repeat: } /* avoid recursion */ - if(np->poll_owner == smp_processor_id() || - np->dev->xmit_lock_owner == smp_processor_id()) { + npinfo = np->dev->npinfo; + if (npinfo->poll_owner == smp_processor_id() || + np->dev->xmit_lock_owner == smp_processor_id()) { if (np->drop) np->drop(skb); else @@ -341,14 +344,18 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) static void arp_reply(struct sk_buff *skb) { + struct netpoll_info *npinfo = skb->dev->npinfo; struct arphdr *arp; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; u32 sip, tip; struct sk_buff *send_skb; - struct netpoll *np = skb->dev->np; + struct netpoll *np = NULL; - if (!np) return; + if (npinfo) + np = npinfo->np; + if (!np) + return; /* No arp on this interface */ if (skb->dev->flags & IFF_NOARP) @@ -429,7 +436,7 @@ int __netpoll_rx(struct sk_buff *skb) int proto, len, ulen; struct iphdr *iph; struct udphdr *uh; - struct netpoll *np = skb->dev->np; + struct netpoll *np = skb->dev->npinfo->np; if (!np->rx_hook) goto out; @@ -611,9 +618,7 @@ int netpoll_setup(struct netpoll *np) { struct net_device *ndev = NULL; struct in_device *in_dev; - - np->poll_lock = SPIN_LOCK_UNLOCKED; - np->poll_owner = -1; + struct netpoll_info *npinfo; if (np->dev_name) ndev = dev_get_by_name(np->dev_name); @@ -624,7 +629,16 @@ int netpoll_setup(struct netpoll *np) } np->dev = ndev; - ndev->np = np; + if (!ndev->npinfo) { + npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); + if (!npinfo) + goto release; + + npinfo->np = NULL; + npinfo->poll_lock = SPIN_LOCK_UNLOCKED; + npinfo->poll_owner = -1; + } else + npinfo = ndev->npinfo; if (!ndev->poll_controller) { printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", @@ -693,12 +707,15 @@ int netpoll_setup(struct netpoll *np) } if(np->rx_hook) - np->rx_flags = NETPOLL_RX_ENABLED; + npinfo->rx_flags = NETPOLL_RX_ENABLED; + npinfo->np = np; + ndev->npinfo = npinfo; return 0; release: - ndev->np = NULL; + if (!ndev->npinfo) + kfree(npinfo); np->dev = NULL; dev_put(ndev); return -1; @@ -706,9 +723,11 @@ int netpoll_setup(struct netpoll *np) void netpoll_cleanup(struct netpoll *np) { - if (np->dev) - np->dev->np = NULL; - dev_put(np->dev); + if (np->dev) { + if (np->dev->npinfo) + np->dev->npinfo->np = NULL; + dev_put(np->dev); + } np->dev = NULL; } -- cgit v1.2.3 From fbeec2e1552949002065435c9829dc244ad85407 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Wed, 22 Jun 2005 22:05:59 -0700 Subject: [NETPOLL]: allow multiple netpoll_clients to register against one interface This patch provides support for registering multiple netpoll clients to the same network device. Only one of these clients may register an rx_hook, however. In practice, this restriction has not been problematic. It is worth mentioning, though, that the current design can be easily extended to allow for the registration of multiple rx_hooks. The basic idea of the patch is that the rx_np pointer in the netpoll_info structure points to the struct netpoll that has rx_hook filled in. Aside from this one case, there is no need for a pointer from the struct net_device to an individual struct netpoll. A lock is introduced to protect the setting and clearing of the np_rx pointer. The pointer will only be cleared upon netpoll client module removal, and the lock should be uncontested. Signed-off-by: Jeff Moyer Signed-off-by: David S. Miller --- net/core/netpoll.c | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/core/netpoll.c b/net/core/netpoll.c index ab3c0c9713b0..c327c9edadc5 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -349,11 +349,15 @@ static void arp_reply(struct sk_buff *skb) unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; u32 sip, tip; + unsigned long flags; struct sk_buff *send_skb; struct netpoll *np = NULL; - if (npinfo) - np = npinfo->np; + spin_lock_irqsave(&npinfo->rx_lock, flags); + if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) + np = npinfo->rx_np; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + if (!np) return; @@ -436,9 +440,9 @@ int __netpoll_rx(struct sk_buff *skb) int proto, len, ulen; struct iphdr *iph; struct udphdr *uh; - struct netpoll *np = skb->dev->npinfo->np; + struct netpoll *np = skb->dev->npinfo->rx_np; - if (!np->rx_hook) + if (!np) goto out; if (skb->dev->type != ARPHRD_ETHER) goto out; @@ -619,6 +623,7 @@ int netpoll_setup(struct netpoll *np) struct net_device *ndev = NULL; struct in_device *in_dev; struct netpoll_info *npinfo; + unsigned long flags; if (np->dev_name) ndev = dev_get_by_name(np->dev_name); @@ -634,9 +639,10 @@ int netpoll_setup(struct netpoll *np) if (!npinfo) goto release; - npinfo->np = NULL; + npinfo->rx_np = NULL; npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_owner = -1; + npinfo->rx_lock = SPIN_LOCK_UNLOCKED; } else npinfo = ndev->npinfo; @@ -706,9 +712,13 @@ int netpoll_setup(struct netpoll *np) np->name, HIPQUAD(np->local_ip)); } - if(np->rx_hook) - npinfo->rx_flags = NETPOLL_RX_ENABLED; - npinfo->np = np; + if (np->rx_hook) { + spin_lock_irqsave(&npinfo->rx_lock, flags); + npinfo->rx_flags |= NETPOLL_RX_ENABLED; + npinfo->rx_np = np; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } + /* last thing to do is link it to the net device structure */ ndev->npinfo = npinfo; return 0; @@ -723,11 +733,20 @@ int netpoll_setup(struct netpoll *np) void netpoll_cleanup(struct netpoll *np) { + struct netpoll_info *npinfo; + unsigned long flags; + if (np->dev) { - if (np->dev->npinfo) - np->dev->npinfo->np = NULL; + npinfo = np->dev->npinfo; + if (npinfo && npinfo->rx_np == np) { + spin_lock_irqsave(&npinfo->rx_lock, flags); + npinfo->rx_np = NULL; + npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } dev_put(np->dev); } + np->dev = NULL; } -- cgit v1.2.3 From 7abaa27c1c54208bd76fa8bae55839c034aebfb2 Mon Sep 17 00:00:00 2001 From: Chuck Short Date: Wed, 22 Jun 2005 22:10:23 -0700 Subject: [IPV4]: Fix route.c gcc4 warnings Signed-off by: Chuck Short Signed-off-by: David S. Miller --- net/ipv4/route.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f4d53c919869..80cf633d9f4a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1767,7 +1767,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb, struct in_device *in_dev, u32 daddr, u32 saddr, u32 tos) { - struct rtable* rth; + struct rtable* rth = NULL; int err; unsigned hash; @@ -1794,7 +1794,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb, u32 daddr, u32 saddr, u32 tos) { #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED - struct rtable* rth; + struct rtable* rth = NULL; unsigned char hop, hopcount, lasthop; int err = -EINVAL; unsigned int hash; @@ -2239,7 +2239,7 @@ static inline int ip_mkroute_output_def(struct rtable **rp, struct net_device *dev_out, unsigned flags) { - struct rtable *rth; + struct rtable *rth = NULL; int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); unsigned hash; if (err == 0) { @@ -2267,7 +2267,7 @@ static inline int ip_mkroute_output(struct rtable** rp, unsigned char hop; unsigned hash; int err = -EINVAL; - struct rtable *rth; + struct rtable *rth = NULL; if (res->fi && res->fi->fib_nhs > 1) { unsigned char hopcount = res->fi->fib_nhs; -- cgit v1.2.3 From 285b3afefacff14bc98e5754b8b48a0a2b42f0df Mon Sep 17 00:00:00 2001 From: Nishanth Aravamudan Date: Wed, 22 Jun 2005 22:11:44 -0700 Subject: [ATALK] aarp: replace schedule_timeout() with msleep() From: Nishanth Aravamudan Use msleep() instead of schedule_timeout() to guarantee the task delays as expected. The current code is not wrong, but it does not account for early return due to signals, so I think msleep() should be appropriate. Signed-off-by: Nishanth Aravamudan Signed-off-by: Domen Puncer Signed-off-by: David S. Miller --- net/appletalk/aarp.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c index 10d040461021..c34614ea5fce 100644 --- a/net/appletalk/aarp.c +++ b/net/appletalk/aarp.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -462,8 +463,7 @@ void aarp_probe_network(struct atalk_iface *atif) aarp_send_probe(atif->dev, &atif->address); /* Defer 1/10th */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ / 10); + msleep(100); if (atif->status & ATIF_PROBE_FAIL) break; @@ -510,9 +510,8 @@ int aarp_proxy_probe_network(struct atalk_iface *atif, struct atalk_addr *sa) aarp_send_probe(atif->dev, sa); /* Defer 1/10th */ - current->state = TASK_INTERRUPTIBLE; write_unlock_bh(&aarp_lock); - schedule_timeout(HZ / 10); + msleep(100); write_lock_bh(&aarp_lock); if (entry->status & ATIF_PROBE_FAIL) -- cgit v1.2.3 From 68d318720052154bc6b2513b0f15d0d947cc53c9 Mon Sep 17 00:00:00 2001 From: James Lamanna Date: Wed, 22 Jun 2005 22:12:57 -0700 Subject: [EBTABLES]: vfree() checking cleanups From: jlamanna@gmail.com ebtables.c vfree() checking cleanups. Signed-off by: James Lamanna Signed-off-by: Domen Puncer Signed-off-by: David S. Miller --- net/bridge/netfilter/ebtables.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 18ebc664769b..c4540144f0f4 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -859,8 +859,7 @@ static int translate_table(struct ebt_replace *repl, if (repl->valid_hooks & (1 << i)) if (check_chainloops(newinfo->hook_entry[i], cl_s, udc_cnt, i, newinfo->entries)) { - if (cl_s) - vfree(cl_s); + vfree(cl_s); return -EINVAL; } @@ -883,8 +882,7 @@ static int translate_table(struct ebt_replace *repl, EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, &i); } - if (cl_s) - vfree(cl_s); + vfree(cl_s); return ret; } @@ -1030,8 +1028,7 @@ static int do_replace(void __user *user, unsigned int len) } vfree(table); - if (counterstmp) - vfree(counterstmp); + vfree(counterstmp); return ret; free_unlock: @@ -1040,8 +1037,7 @@ free_iterate: EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, ebt_cleanup_entry, NULL); free_counterstmp: - if (counterstmp) - vfree(counterstmp); + vfree(counterstmp); /* can be initialized in translate_table() */ if (newinfo->chainstack) { for (i = 0; i < num_possible_cpus(); i++) @@ -1049,11 +1045,9 @@ free_counterstmp: vfree(newinfo->chainstack); } free_entries: - if (newinfo->entries) - vfree(newinfo->entries); + vfree(newinfo->entries); free_newinfo: - if (newinfo) - vfree(newinfo); + vfree(newinfo); return ret; } @@ -1213,8 +1207,7 @@ void ebt_unregister_table(struct ebt_table *table) down(&ebt_mutex); LIST_DELETE(&ebt_tables, table); up(&ebt_mutex); - if (table->private->entries) - vfree(table->private->entries); + vfree(table->private->entries); if (table->private->chainstack) { for (i = 0; i < num_possible_cpus(); i++) vfree(table->private->chainstack[i]); -- cgit v1.2.3 From cb65d506c34c86df5bcef939ce5a8666a451bd8b Mon Sep 17 00:00:00 2001 From: Shaun Pereira Date: Wed, 22 Jun 2005 22:15:01 -0700 Subject: [X25]: Selective sub-address matching with call user data. From: Shaun Pereira This is the first (independent of the second) patch of two that I am working on with x25 on linux (tested with xot on a cisco router). Details are as follows. Current state of module: A server using the current implementation (2.6.11.7) of the x25 module will accept a call request/ incoming call packet at the listening x.25 address, from all callers to that address, as long as NO call user data is present in the packet header. If the server needs to choose to accept a particular call request/ incoming call packet arriving at its listening x25 address, then the kernel has to allow a match of call user data present in the call request packet with its own. This is required when multiple servers listen at the same x25 address and device interface. The kernel currently matches ALL call user data, if present. Current Changes: This patch is a follow up to the patch submitted previously by Andrew Hendry, and allows the user to selectively control the number of octets of call user data in the call request packet, that the kernel will match. By default no call user data is matched, even if call user data is present. To allow call user data matching, a cudmatchlength > 0 has to be passed into the kernel after which the passed number of octets will be matched. Otherwise the kernel behavior is exactly as the original implementation. This patch also ensures that as is normally the case, no call user data will be present in the Call accepted / call connected packet sent back to the caller Future Changes on next patch: There are cases however when call user data may be present in the call accepted packet. According to the X.25 recommendation (ITU-T 10/96) section 5.2.3.2 call user data may be present in the call accepted packet provided the fast select facility is used. My next patch will include this fast select utility and the ability to send up to 128 octets call user data in the call accepted packet provided the fast select facility is used. I am currently testing this, again with xot on linux and cisco. Signed-off-by: Shaun Pereira (With a fix from Alexey Dobriyan ) Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- net/x25/af_x25.c | 73 +++++++++++++++++++++++++++++++++++------------------- net/x25/x25_subr.c | 18 -------------- 2 files changed, 48 insertions(+), 43 deletions(-) (limited to 'net') diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 2a24b243b841..e17d84a55d5e 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -29,6 +29,8 @@ * 2000-11-14 Henner Eisen Closing datalink from NETDEV_GOING_DOWN * 2002-10-06 Arnaldo C. Melo Get rid of cli/sti, move proc stuff to * x25_proc.c, using seq_file + * 2005-04-02 Shaun Pereira Selective sub address matching + * with call user data */ #include @@ -219,7 +221,8 @@ static void x25_insert_socket(struct sock *sk) * Note: if a listening socket has cud set it must only get calls * with matching cud. */ -static struct sock *x25_find_listener(struct x25_address *addr, struct x25_calluserdata *calluserdata) +static struct sock *x25_find_listener(struct x25_address *addr, + struct sk_buff *skb) { struct sock *s; struct sock *next_best; @@ -230,22 +233,23 @@ static struct sock *x25_find_listener(struct x25_address *addr, struct x25_callu sk_for_each(s, node, &x25_list) if ((!strcmp(addr->x25_addr, - x25_sk(s)->source_addr.x25_addr) || - !strcmp(addr->x25_addr, - null_x25_address.x25_addr)) && - s->sk_state == TCP_LISTEN) { - + x25_sk(s)->source_addr.x25_addr) || + !strcmp(addr->x25_addr, + null_x25_address.x25_addr)) && + s->sk_state == TCP_LISTEN) { /* * Found a listening socket, now check the incoming * call user data vs this sockets call user data */ - if (x25_check_calluserdata(&x25_sk(s)->calluserdata, calluserdata)) { - sock_hold(s); - goto found; - } - if (x25_sk(s)->calluserdata.cudlength == 0) { + if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { + if((memcmp(x25_sk(s)->calluserdata.cuddata, + skb->data, + x25_sk(s)->cudmatchlength)) == 0) { + sock_hold(s); + goto found; + } + } else next_best = s; - } } if (next_best) { s = next_best; @@ -497,6 +501,7 @@ static int x25_create(struct socket *sock, int protocol) x25->t23 = sysctl_x25_clear_request_timeout; x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; + x25->cudmatchlength = 0; x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; @@ -545,6 +550,7 @@ static struct sock *x25_make_new(struct sock *osk) x25->t2 = ox25->t2; x25->facilities = ox25->facilities; x25->qbitincl = ox25->qbitincl; + x25->cudmatchlength = ox25->cudmatchlength; x25_init_timers(sk); out: @@ -822,7 +828,6 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, struct x25_sock *makex25; struct x25_address source_addr, dest_addr; struct x25_facilities facilities; - struct x25_calluserdata calluserdata; int len, rc; /* @@ -844,20 +849,11 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, len = skb->data[0] + 1; skb_pull(skb,len); - /* - * Incoming Call User Data. - */ - if (skb->len >= 0) { - memcpy(calluserdata.cuddata, skb->data, skb->len); - calluserdata.cudlength = skb->len; - } - - skb_push(skb,len); - /* * Find a listener for the particular address/cud pair. */ - sk = x25_find_listener(&source_addr,&calluserdata); + sk = x25_find_listener(&source_addr,skb); + skb_push(skb,len); /* * We can't accept the Call Request. @@ -900,12 +896,22 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, makex25->neighbour = nb; makex25->facilities = facilities; makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask; - makex25->calluserdata = calluserdata; + /* ensure no reverse facil on accept */ + makex25->vc_facil_mask &= ~X25_MASK_REVERSE; + makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; x25_write_internal(make, X25_CALL_ACCEPTED); makex25->state = X25_STATE_3; + /* + * Incoming Call User Data. + */ + if (skb->len >= 0) { + memcpy(makex25->calluserdata.cuddata, skb->data, skb->len); + makex25->calluserdata.cudlength = skb->len; + } + sk->sk_ack_backlog++; x25_insert_socket(make); @@ -1325,6 +1331,23 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; } + case SIOCX25SCUDMATCHLEN: { + struct x25_subaddr sub_addr; + rc = -EINVAL; + if(sk->sk_state != TCP_CLOSE) + break; + rc = -EFAULT; + if (copy_from_user(&sub_addr, argp, + sizeof(sub_addr))) + break; + rc = -EINVAL; + if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) + break; + x25->cudmatchlength = sub_addr.cudmatchlength; + rc = 0; + break; + } + default: rc = dev_ioctl(cmd, argp); break; diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 183fea3bba67..c349bbd61684 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -354,21 +354,3 @@ void x25_check_rbuf(struct sock *sk) } } -/* - * Compare 2 calluserdata structures, used to find correct listening sockets - * when call user data is used. - */ -int x25_check_calluserdata(struct x25_calluserdata *ours, struct x25_calluserdata *theirs) -{ - int i; - if (ours->cudlength != theirs->cudlength) - return 0; - - for (i=0;icudlength;i++) { - if (ours->cuddata[i] != theirs->cuddata[i]) { - return 0; - } - } - return 1; -} - -- cgit v1.2.3 From ebc3f64b864fc16a594c2e63bf55a55c7d42084b Mon Sep 17 00:00:00 2001 From: Shaun Pereira Date: Wed, 22 Jun 2005 22:16:17 -0700 Subject: [X25]: Fast select with no restriction on response This patch is a follow up to patch 1 regarding "Selective Sub Address matching with call user data". It allows use of the Fast-Select-Acceptance optional user facility for X.25. This patch just implements fast select with no restriction on response (NRR). What this means (according to ITU-T Recomendation 10/96 section 6.16) is that if in an incoming call packet, the relevant facility bits are set for fast-select-NRR, then the called DTE can issue a direct response to the incoming packet using a call-accepted packet that contains call-user-data. This patch allows such a response. The called DTE can also respond with a clear-request packet that contains call-user-data. However, this feature is currently not implemented by the patch. How is Fast Select Acceptance used? By default, the system does not allow fast select acceptance (as before). To enable a response to fast select acceptance, After a listen socket in created and bound as follows socket(AF_X25, SOCK_SEQPACKET, 0); bind(call_soc, (struct sockaddr *)&locl_addr, sizeof(locl_addr)); but before a listen system call is made, the following ioctl should be used. ioctl(call_soc,SIOCX25CALLACCPTAPPRV); Now the listen system call can be made listen(call_soc, 4); After this, an incoming-call packet will be accepted, but no call-accepted packet will be sent back until the following system call is made on the socket that accepts the call ioctl(vc_soc,SIOCX25SENDCALLACCPT); The network (or cisco xot router used for testing here) will allow the application server's call-user-data in the call-accepted packet, provided the call-request was made with Fast-select NRR. Signed-off-by: Shaun Pereira Signed-off-by: Andrew Morton Signed-off-by: David S. Miller --- net/x25/af_x25.c | 37 +++++++++++++++++++++++++++++++++---- net/x25/x25_facilities.c | 34 +++++++++++++++++++++++++++++----- net/x25/x25_subr.c | 23 ++++++++++++++++++----- 3 files changed, 80 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index e17d84a55d5e..04bec047fa9a 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -31,6 +31,8 @@ * x25_proc.c, using seq_file * 2005-04-02 Shaun Pereira Selective sub address matching * with call user data + * 2005-04-15 Shaun Pereira Fast select with no restriction on + * response */ #include @@ -502,6 +504,8 @@ static int x25_create(struct socket *sock, int protocol) x25->t2 = sysctl_x25_ack_holdback_timeout; x25->state = X25_STATE_0; x25->cudmatchlength = 0; + x25->accptapprv = X25_DENY_ACCPT_APPRV; /* normally no cud */ + /* on call accept */ x25->facilities.winsize_in = X25_DEFAULT_WINDOW_SIZE; x25->facilities.winsize_out = X25_DEFAULT_WINDOW_SIZE; @@ -551,6 +555,7 @@ static struct sock *x25_make_new(struct sock *osk) x25->facilities = ox25->facilities; x25->qbitincl = ox25->qbitincl; x25->cudmatchlength = ox25->cudmatchlength; + x25->accptapprv = ox25->accptapprv; x25_init_timers(sk); out: @@ -900,9 +905,11 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, makex25->vc_facil_mask &= ~X25_MASK_REVERSE; makex25->cudmatchlength = x25_sk(sk)->cudmatchlength; - x25_write_internal(make, X25_CALL_ACCEPTED); - - makex25->state = X25_STATE_3; + /* Normally all calls are accepted immediatly */ + if(makex25->accptapprv & X25_DENY_ACCPT_APPRV) { + x25_write_internal(make, X25_CALL_ACCEPTED); + makex25->state = X25_STATE_3; + } /* * Incoming Call User Data. @@ -1294,7 +1301,8 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) if (facilities.throughput < 0x03 || facilities.throughput > 0xDD) break; - if (facilities.reverse && facilities.reverse != 1) + if (facilities.reverse && + (facilities.reverse | 0x81)!= 0x81) break; x25->facilities = facilities; rc = 0; @@ -1348,6 +1356,27 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; } + case SIOCX25CALLACCPTAPPRV: { + rc = -EINVAL; + if (sk->sk_state != TCP_CLOSE) + break; + x25->accptapprv = X25_ALLOW_ACCPT_APPRV; + rc = 0; + break; + } + + case SIOCX25SENDCALLACCPT: { + rc = -EINVAL; + if (sk->sk_state != TCP_ESTABLISHED) + break; + if (x25->accptapprv) /* must call accptapprv above */ + break; + x25_write_internal(sk, X25_CALL_ACCEPTED); + x25->state = X25_STATE_3; + rc = 0; + break; + } + default: rc = dev_ioctl(cmd, argp); break; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index a21bdb95f9a8..54278b962f4c 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -17,6 +17,8 @@ * X.25 001 Split from x25_subr.c * mar/20/00 Daniela Squassoni Disabling/enabling of facilities * negotiation. + * apr/14/05 Shaun Pereira - Allow fast select with no restriction + * on response. */ #include @@ -43,9 +45,31 @@ int x25_parse_facilities(struct sk_buff *skb, case X25_FAC_CLASS_A: switch (*p) { case X25_FAC_REVERSE: - facilities->reverse = p[1] & 0x01; - *vc_fac_mask |= X25_MASK_REVERSE; - break; + if((p[1] & 0x81) == 0x81) { + facilities->reverse = p[1] & 0x81; + *vc_fac_mask |= X25_MASK_REVERSE; + break; + } + + if((p[1] & 0x01) == 0x01) { + facilities->reverse = p[1] & 0x01; + *vc_fac_mask |= X25_MASK_REVERSE; + break; + } + + if((p[1] & 0x80) == 0x80) { + facilities->reverse = p[1] & 0x80; + *vc_fac_mask |= X25_MASK_REVERSE; + break; + } + + if(p[1] == 0x00) { + facilities->reverse + = X25_DEFAULT_REVERSE; + *vc_fac_mask |= X25_MASK_REVERSE; + break; + } + case X25_FAC_THROUGHPUT: facilities->throughput = p[1]; *vc_fac_mask |= X25_MASK_THROUGHPUT; @@ -122,7 +146,7 @@ int x25_create_facilities(unsigned char *buffer, if (facilities->reverse && (facil_mask & X25_MASK_REVERSE)) { *p++ = X25_FAC_REVERSE; - *p++ = !!facilities->reverse; + *p++ = facilities->reverse; } if (facilities->throughput && (facil_mask & X25_MASK_THROUGHPUT)) { @@ -171,7 +195,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk, /* * They want reverse charging, we won't accept it. */ - if (theirs.reverse && ours->reverse) { + if ((theirs.reverse & 0x01 ) && (ours->reverse & 0x01)) { SOCK_DEBUG(sk, "X.25: rejecting reverse charging request"); return -1; } diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index c349bbd61684..7fd872ad0c20 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c @@ -19,6 +19,8 @@ * mar/20/00 Daniela Squassoni Disabling/enabling of facilities * negotiation. * jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups + * apr/04/15 Shaun Pereira Fast select with no + * restriction on response. */ #include @@ -127,8 +129,12 @@ void x25_write_internal(struct sock *sk, int frametype) len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN; break; - case X25_CALL_ACCEPTED: - len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN; + case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */ + if(x25->facilities.reverse & 0x80) { + len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN; + } else { + len += 1 + X25_MAX_FAC_LEN; + } break; case X25_CLEAR_REQUEST: case X25_RESET_REQUEST: @@ -203,9 +209,16 @@ void x25_write_internal(struct sock *sk, int frametype) x25->vc_facil_mask); dptr = skb_put(skb, len); memcpy(dptr, facilities, len); - dptr = skb_put(skb, x25->calluserdata.cudlength); - memcpy(dptr, x25->calluserdata.cuddata, - x25->calluserdata.cudlength); + + /* fast select with no restriction on response + allows call user data. Userland must + ensure it is ours and not theirs */ + if(x25->facilities.reverse & 0x80) { + dptr = skb_put(skb, + x25->calluserdata.cudlength); + memcpy(dptr, x25->calluserdata.cuddata, + x25->calluserdata.cudlength); + } x25->calluserdata.cudlength = 0; break; -- cgit v1.2.3