diff options
Diffstat (limited to 'net/caif')
-rw-r--r-- | net/caif/Makefile | 2 | ||||
-rw-r--r-- | net/caif/caif_config_util.c | 99 | ||||
-rw-r--r-- | net/caif/caif_dev.c | 392 | ||||
-rw-r--r-- | net/caif/caif_socket.c | 115 | ||||
-rw-r--r-- | net/caif/cfcnfg.c | 531 | ||||
-rw-r--r-- | net/caif/cfctrl.c | 234 | ||||
-rw-r--r-- | net/caif/cfdgml.c | 13 | ||||
-rw-r--r-- | net/caif/cffrml.c | 60 | ||||
-rw-r--r-- | net/caif/cfmuxl.c | 197 | ||||
-rw-r--r-- | net/caif/cfpkt_skbuff.c | 205 | ||||
-rw-r--r-- | net/caif/cfrfml.c | 4 | ||||
-rw-r--r-- | net/caif/cfserl.c | 7 | ||||
-rw-r--r-- | net/caif/cfsrvl.c | 40 | ||||
-rw-r--r-- | net/caif/cfutill.c | 7 | ||||
-rw-r--r-- | net/caif/cfveil.c | 11 | ||||
-rw-r--r-- | net/caif/cfvidl.c | 5 | ||||
-rw-r--r-- | net/caif/chnl_net.c | 45 |
17 files changed, 968 insertions, 999 deletions
diff --git a/net/caif/Makefile b/net/caif/Makefile index 9d38e406e4a4..ebcd4e7e6f47 100644 --- a/net/caif/Makefile +++ b/net/caif/Makefile @@ -5,7 +5,7 @@ caif-y := caif_dev.o \ cffrml.o cfveil.o cfdbgl.o\ cfserl.o cfdgml.o \ cfrfml.o cfvidl.o cfutill.o \ - cfsrvl.o cfpkt_skbuff.o caif_config_util.o + cfsrvl.o cfpkt_skbuff.o obj-$(CONFIG_CAIF) += caif.o obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c deleted file mode 100644 index d522d8c1703e..000000000000 --- a/net/caif/caif_config_util.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Sjur Brendeland sjur.brandeland@stericsson.com - * License terms: GNU General Public License (GPL) version 2 - */ - -#include <linux/module.h> -#include <linux/spinlock.h> -#include <net/caif/cfctrl.h> -#include <net/caif/cfcnfg.h> -#include <net/caif/caif_dev.h> - -int connect_req_to_link_param(struct cfcnfg *cnfg, - struct caif_connect_request *s, - struct cfctrl_link_param *l) -{ - struct dev_info *dev_info; - enum cfcnfg_phy_preference pref; - int res; - - memset(l, 0, sizeof(*l)); - /* In caif protocol low value is high priority */ - l->priority = CAIF_PRIO_MAX - s->priority + 1; - - if (s->ifindex != 0){ - res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); - if (res < 0) - return res; - l->phyid = res; - } - else { - switch (s->link_selector) { - case CAIF_LINK_HIGH_BANDW: - pref = CFPHYPREF_HIGH_BW; - break; - case CAIF_LINK_LOW_LATENCY: - pref = CFPHYPREF_LOW_LAT; - break; - default: - return -EINVAL; - } - dev_info = cfcnfg_get_phyid(cnfg, pref); - if (dev_info == NULL) - return -ENODEV; - l->phyid = dev_info->id; - } - switch (s->protocol) { - case CAIFPROTO_AT: - l->linktype = CFCTRL_SRV_VEI; - if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN) - l->chtype = 0x02; - else - l->chtype = s->sockaddr.u.at.type; - l->endpoint = 0x00; - break; - case CAIFPROTO_DATAGRAM: - l->linktype = CFCTRL_SRV_DATAGRAM; - l->chtype = 0x00; - l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; - break; - case CAIFPROTO_DATAGRAM_LOOP: - l->linktype = CFCTRL_SRV_DATAGRAM; - l->chtype = 0x03; - l->endpoint = 0x00; - l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; - break; - case CAIFPROTO_RFM: - l->linktype = CFCTRL_SRV_RFM; - l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; - strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, - sizeof(l->u.rfm.volume)-1); - l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0; - break; - case CAIFPROTO_UTIL: - l->linktype = CFCTRL_SRV_UTIL; - l->endpoint = 0x00; - l->chtype = 0x00; - strncpy(l->u.utility.name, s->sockaddr.u.util.service, - sizeof(l->u.utility.name)-1); - l->u.utility.name[sizeof(l->u.utility.name)-1] = 0; - caif_assert(sizeof(l->u.utility.name) > 10); - l->u.utility.paramlen = s->param.size; - if (l->u.utility.paramlen > sizeof(l->u.utility.params)) - l->u.utility.paramlen = sizeof(l->u.utility.params); - - memcpy(l->u.utility.params, s->param.data, - l->u.utility.paramlen); - - break; - case CAIFPROTO_DEBUG: - l->linktype = CFCTRL_SRV_DBG; - l->endpoint = s->sockaddr.u.dbg.service; - l->chtype = s->sockaddr.u.dbg.type; - break; - default: - return -EINVAL; - } - return 0; -} diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index a42a408306e4..682c0fedf360 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -12,49 +12,51 @@ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/version.h> -#include <linux/module.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/net.h> #include <linux/netdevice.h> -#include <linux/skbuff.h> -#include <linux/sched.h> -#include <linux/wait.h> +#include <linux/mutex.h> #include <net/netns/generic.h> #include <net/net_namespace.h> #include <net/pkt_sched.h> #include <net/caif/caif_device.h> -#include <net/caif/caif_dev.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfcnfg.h> MODULE_LICENSE("GPL"); -#define TIMEOUT (HZ*5) /* Used for local tracking of the CAIF net devices */ struct caif_device_entry { struct cflayer layer; struct list_head list; - atomic_t in_use; - atomic_t state; - u16 phyid; struct net_device *netdev; - wait_queue_head_t event; + int __percpu *pcpu_refcnt; }; struct caif_device_entry_list { struct list_head list; /* Protects simulanous deletes in list */ - spinlock_t lock; + struct mutex lock; }; struct caif_net { + struct cfcnfg *cfg; struct caif_device_entry_list caifdevs; }; static int caif_net_id; -static struct cfcnfg *cfg; + +struct cfcnfg *get_cfcnfg(struct net *net) +{ + struct caif_net *caifn; + BUG_ON(!net); + caifn = net_generic(net, caif_net_id); + BUG_ON(!caifn); + return caifn->cfg; +} +EXPORT_SYMBOL(get_cfcnfg); static struct caif_device_entry_list *caif_device_list(struct net *net) { @@ -65,19 +67,39 @@ static struct caif_device_entry_list *caif_device_list(struct net *net) return &caifn->caifdevs; } +static void caifd_put(struct caif_device_entry *e) +{ + irqsafe_cpu_dec(*e->pcpu_refcnt); +} + +static void caifd_hold(struct caif_device_entry *e) +{ + irqsafe_cpu_inc(*e->pcpu_refcnt); +} + +static int caifd_refcnt_read(struct caif_device_entry *e) +{ + int i, refcnt = 0; + for_each_possible_cpu(i) + refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); + return refcnt; +} + /* Allocate new CAIF device. */ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) { struct caif_device_entry_list *caifdevs; struct caif_device_entry *caifd; + caifdevs = caif_device_list(dev_net(dev)); BUG_ON(!caifdevs); + caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); if (!caifd) return NULL; + caifd->pcpu_refcnt = alloc_percpu(int); caifd->netdev = dev; - list_add(&caifd->list, &caifdevs->list); - init_waitqueue_head(&caifd->event); + dev_hold(dev); return caifd; } @@ -87,98 +109,65 @@ static struct caif_device_entry *caif_get(struct net_device *dev) caif_device_list(dev_net(dev)); struct caif_device_entry *caifd; BUG_ON(!caifdevs); - list_for_each_entry(caifd, &caifdevs->list, list) { + list_for_each_entry_rcu(caifd, &caifdevs->list, list) { if (caifd->netdev == dev) return caifd; } return NULL; } -static void caif_device_destroy(struct net_device *dev) -{ - struct caif_device_entry_list *caifdevs = - caif_device_list(dev_net(dev)); - struct caif_device_entry *caifd; - ASSERT_RTNL(); - if (dev->type != ARPHRD_CAIF) - return; - - spin_lock_bh(&caifdevs->lock); - caifd = caif_get(dev); - if (caifd == NULL) { - spin_unlock_bh(&caifdevs->lock); - return; - } - - list_del(&caifd->list); - spin_unlock_bh(&caifdevs->lock); - - kfree(caifd); -} - static int transmit(struct cflayer *layer, struct cfpkt *pkt) { + int err; struct caif_device_entry *caifd = container_of(layer, struct caif_device_entry, layer); - struct sk_buff *skb, *skb2; - int ret = -EINVAL; + struct sk_buff *skb; + skb = cfpkt_tonative(pkt); skb->dev = caifd->netdev; - /* - * Don't allow SKB to be destroyed upon error, but signal resend - * notification to clients. We can't rely on the return value as - * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't. - */ - if (netif_queue_stopped(caifd->netdev)) - return -EAGAIN; - skb2 = skb_get(skb); - - ret = dev_queue_xmit(skb2); - - if (!ret) - kfree_skb(skb); - else - return -EAGAIN; - return 0; -} + err = dev_queue_xmit(skb); + if (err > 0) + err = -EIO; -static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) -{ - struct caif_device_entry *caifd; - struct caif_dev_common *caifdev; - caifd = container_of(layr, struct caif_device_entry, layer); - caifdev = netdev_priv(caifd->netdev); - if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) { - atomic_set(&caifd->in_use, 1); - wake_up_interruptible(&caifd->event); - - } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) { - atomic_set(&caifd->in_use, 0); - wake_up_interruptible(&caifd->event); - } - return 0; + return err; } /* - * Stuff received packets to associated sockets. + * Stuff received packets into the CAIF stack. * On error, returns non-zero and releases the skb. */ static int receive(struct sk_buff *skb, struct net_device *dev, struct packet_type *pkttype, struct net_device *orig_dev) { - struct net *net; struct cfpkt *pkt; struct caif_device_entry *caifd; - net = dev_net(dev); + int err; + pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); + + rcu_read_lock(); caifd = caif_get(dev); - if (!caifd || !caifd->layer.up || !caifd->layer.up->receive) - return NET_RX_DROP; - if (caifd->layer.up->receive(caifd->layer.up, pkt)) + if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || + !netif_oper_up(caifd->netdev)) { + rcu_read_unlock(); + kfree_skb(skb); return NET_RX_DROP; + } + + /* Hold reference to netdevice while using CAIF stack */ + caifd_hold(caifd); + rcu_read_unlock(); + + err = caifd->layer.up->receive(caifd->layer.up, pkt); + /* For -EILSEQ the packet is not freed so so it now */ + if (err == -EILSEQ) + cfpkt_destroy(pkt); + + /* Release reference to stack upwards */ + caifd_put(caifd); return 0; } @@ -189,15 +178,25 @@ static struct packet_type caif_packet_type __read_mostly = { static void dev_flowctrl(struct net_device *dev, int on) { - struct caif_device_entry *caifd = caif_get(dev); - if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) + struct caif_device_entry *caifd; + + rcu_read_lock(); + + caifd = caif_get(dev); + if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { + rcu_read_unlock(); return; + } + + caifd_hold(caifd); + rcu_read_unlock(); caifd->layer.up->ctrlcmd(caifd->layer.up, on ? _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, caifd->layer.id); + caifd_put(caifd); } /* notify Caif of device events */ @@ -208,37 +207,28 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, struct caif_device_entry *caifd = NULL; struct caif_dev_common *caifdev; enum cfcnfg_phy_preference pref; - int res = -EINVAL; enum cfcnfg_phy_type phy_type; + struct cfcnfg *cfg; + struct caif_device_entry_list *caifdevs = + caif_device_list(dev_net(dev)); if (dev->type != ARPHRD_CAIF) return 0; + cfg = get_cfcnfg(dev_net(dev)); + if (cfg == NULL) + return 0; + switch (what) { case NETDEV_REGISTER: - netdev_info(dev, "register\n"); caifd = caif_device_alloc(dev); - if (caifd == NULL) - break; + if (!caifd) + return 0; + caifdev = netdev_priv(dev); caifdev->flowctrl = dev_flowctrl; - atomic_set(&caifd->state, what); - res = 0; - break; - case NETDEV_UP: - netdev_info(dev, "up\n"); - caifd = caif_get(dev); - if (caifd == NULL) - break; - caifdev = netdev_priv(dev); - if (atomic_read(&caifd->state) == NETDEV_UP) { - netdev_info(dev, "already up\n"); - break; - } - atomic_set(&caifd->state, what); caifd->layer.transmit = transmit; - caifd->layer.modemcmd = modemcmd; if (caifdev->use_frag) phy_type = CFPHYTYPE_FRAG; @@ -256,62 +246,94 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, pref = CFPHYPREF_HIGH_BW; break; } - dev_hold(dev); - cfcnfg_add_phy_layer(get_caif_conf(), + strncpy(caifd->layer.name, dev->name, + sizeof(caifd->layer.name) - 1); + caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; + + mutex_lock(&caifdevs->lock); + list_add_rcu(&caifd->list, &caifdevs->list); + + cfcnfg_add_phy_layer(cfg, phy_type, dev, &caifd->layer, - &caifd->phyid, pref, caifdev->use_fcs, caifdev->use_stx); - strncpy(caifd->layer.name, dev->name, - sizeof(caifd->layer.name) - 1); - caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; + mutex_unlock(&caifdevs->lock); break; - case NETDEV_GOING_DOWN: + case NETDEV_UP: + rcu_read_lock(); + caifd = caif_get(dev); - if (caifd == NULL) + if (caifd == NULL) { + rcu_read_unlock(); break; - netdev_info(dev, "going down\n"); + } - if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || - atomic_read(&caifd->state) == NETDEV_DOWN) - break; + cfcnfg_set_phy_state(cfg, &caifd->layer, true); + rcu_read_unlock(); - atomic_set(&caifd->state, what); - if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) - return -EINVAL; - caifd->layer.up->ctrlcmd(caifd->layer.up, - _CAIF_CTRLCMD_PHYIF_DOWN_IND, - caifd->layer.id); - might_sleep(); - res = wait_event_interruptible_timeout(caifd->event, - atomic_read(&caifd->in_use) == 0, - TIMEOUT); break; case NETDEV_DOWN: + rcu_read_lock(); + caifd = caif_get(dev); - if (caifd == NULL) - break; - netdev_info(dev, "down\n"); - if (atomic_read(&caifd->in_use)) - netdev_warn(dev, - "Unregistering an active CAIF device\n"); - cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); - dev_put(dev); - atomic_set(&caifd->state, what); + if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { + rcu_read_unlock(); + return -EINVAL; + } + + cfcnfg_set_phy_state(cfg, &caifd->layer, false); + caifd_hold(caifd); + rcu_read_unlock(); + + caifd->layer.up->ctrlcmd(caifd->layer.up, + _CAIF_CTRLCMD_PHYIF_DOWN_IND, + caifd->layer.id); + caifd_put(caifd); break; case NETDEV_UNREGISTER: + mutex_lock(&caifdevs->lock); + caifd = caif_get(dev); - if (caifd == NULL) + if (caifd == NULL) { + mutex_unlock(&caifdevs->lock); + break; + } + list_del_rcu(&caifd->list); + + /* + * NETDEV_UNREGISTER is called repeatedly until all reference + * counts for the net-device are released. If references to + * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for + * the next call to NETDEV_UNREGISTER. + * + * If any packets are in flight down the CAIF Stack, + * cfcnfg_del_phy_layer will return nonzero. + * If no packets are in flight, the CAIF Stack associated + * with the net-device un-registering is freed. + */ + + if (caifd_refcnt_read(caifd) != 0 || + cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { + + pr_info("Wait for device inuse\n"); + /* Enrole device if CAIF Stack is still in use */ + list_add_rcu(&caifd->list, &caifdevs->list); + mutex_unlock(&caifdevs->lock); break; - netdev_info(dev, "unregister\n"); - atomic_set(&caifd->state, what); - caif_device_destroy(dev); + } + + synchronize_rcu(); + dev_put(caifd->netdev); + free_percpu(caifd->pcpu_refcnt); + kfree(caifd); + + mutex_unlock(&caifdevs->lock); break; } return 0; @@ -322,61 +344,60 @@ static struct notifier_block caif_device_notifier = { .priority = 0, }; - -struct cfcnfg *get_caif_conf(void) -{ - return cfg; -} -EXPORT_SYMBOL(get_caif_conf); - -int caif_connect_client(struct caif_connect_request *conn_req, - struct cflayer *client_layer, int *ifindex, - int *headroom, int *tailroom) -{ - struct cfctrl_link_param param; - int ret; - ret = connect_req_to_link_param(get_caif_conf(), conn_req, ¶m); - if (ret) - return ret; - /* Hook up the adaptation layer. */ - return cfcnfg_add_adaptation_layer(get_caif_conf(), ¶m, - client_layer, ifindex, - headroom, tailroom); -} -EXPORT_SYMBOL(caif_connect_client); - -int caif_disconnect_client(struct cflayer *adap_layer) -{ - return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer); -} -EXPORT_SYMBOL(caif_disconnect_client); - -void caif_release_client(struct cflayer *adap_layer) -{ - cfcnfg_release_adap_layer(adap_layer); -} -EXPORT_SYMBOL(caif_release_client); - /* Per-namespace Caif devices handling */ static int caif_init_net(struct net *net) { struct caif_net *caifn = net_generic(net, caif_net_id); + BUG_ON(!caifn); INIT_LIST_HEAD(&caifn->caifdevs.list); - spin_lock_init(&caifn->caifdevs.lock); + mutex_init(&caifn->caifdevs.lock); + + caifn->cfg = cfcnfg_create(); + if (!caifn->cfg) { + pr_warn("can't create cfcnfg\n"); + return -ENOMEM; + } + return 0; } static void caif_exit_net(struct net *net) { - struct net_device *dev; - int res; + struct caif_device_entry *caifd, *tmp; + struct caif_device_entry_list *caifdevs = + caif_device_list(net); + struct cfcnfg *cfg; + rtnl_lock(); - for_each_netdev(net, dev) { - if (dev->type != ARPHRD_CAIF) - continue; - res = dev_close(dev); - caif_device_destroy(dev); + mutex_lock(&caifdevs->lock); + + cfg = get_cfcnfg(net); + if (cfg == NULL) { + mutex_unlock(&caifdevs->lock); + return; } + + list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { + int i = 0; + list_del_rcu(&caifd->list); + cfcnfg_set_phy_state(cfg, &caifd->layer, false); + + while (i < 10 && + (caifd_refcnt_read(caifd) != 0 || + cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { + + pr_info("Wait for device inuse\n"); + msleep(250); + i++; + } + synchronize_rcu(); + dev_put(caifd->netdev); + free_percpu(caifd->pcpu_refcnt); + kfree(caifd); + } + cfcnfg_remove(cfg); + + mutex_unlock(&caifdevs->lock); rtnl_unlock(); } @@ -391,32 +412,23 @@ static struct pernet_operations caif_net_ops = { static int __init caif_device_init(void) { int result; - cfg = cfcnfg_create(); - if (!cfg) { - pr_warn("can't create cfcnfg\n"); - goto err_cfcnfg_create_failed; - } + result = register_pernet_device(&caif_net_ops); - if (result) { - kfree(cfg); - cfg = NULL; + if (result) return result; - } - dev_add_pack(&caif_packet_type); + register_netdevice_notifier(&caif_device_notifier); + dev_add_pack(&caif_packet_type); return result; -err_cfcnfg_create_failed: - return -ENODEV; } static void __exit caif_device_exit(void) { - dev_remove_pack(&caif_packet_type); unregister_pernet_device(&caif_net_ops); unregister_netdevice_notifier(&caif_device_notifier); - cfcnfg_remove(cfg); + dev_remove_pack(&caif_packet_type); } module_init(caif_device_init); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 8184c031d028..a98628086452 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -19,7 +19,7 @@ #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/caif/caif_socket.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/caif/caif_layer.h> @@ -48,6 +48,7 @@ static struct dentry *debugfsdir; #ifdef CONFIG_DEBUG_FS struct debug_fs_counter { atomic_t caif_nr_socks; + atomic_t caif_sock_create; atomic_t num_connect_req; atomic_t num_connect_resp; atomic_t num_connect_fail_resp; @@ -59,11 +60,11 @@ struct debug_fs_counter { atomic_t num_rx_flow_on; }; static struct debug_fs_counter cnt; -#define dbfs_atomic_inc(v) atomic_inc(v) -#define dbfs_atomic_dec(v) atomic_dec(v) +#define dbfs_atomic_inc(v) atomic_inc_return(v) +#define dbfs_atomic_dec(v) atomic_dec_return(v) #else -#define dbfs_atomic_inc(v) -#define dbfs_atomic_dec(v) +#define dbfs_atomic_inc(v) 0 +#define dbfs_atomic_dec(v) 0 #endif struct caifsock { @@ -155,9 +156,10 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { - pr_debug("sending flow OFF (queue len = %d %d)\n", - atomic_read(&cf_sk->sk.sk_rmem_alloc), - sk_rcvbuf_lowwater(cf_sk)); + if (net_ratelimit()) + pr_debug("sending flow OFF (queue len = %d %d)\n", + atomic_read(&cf_sk->sk.sk_rmem_alloc), + sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); dbfs_atomic_inc(&cnt.num_rx_flow_off); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); @@ -168,7 +170,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) return err; if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); - pr_debug("sending flow OFF due to rmem_schedule\n"); + if (net_ratelimit()) + pr_debug("sending flow OFF due to rmem_schedule\n"); dbfs_atomic_inc(&cnt.num_rx_flow_off); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } @@ -202,13 +205,25 @@ static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) skb = cfpkt_tonative(pkt); if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { - cfpkt_destroy(pkt); + kfree_skb(skb); return 0; } caif_queue_rcv_skb(&cf_sk->sk, skb); return 0; } +static void cfsk_hold(struct cflayer *layr) +{ + struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); + sock_hold(&cf_sk->sk); +} + +static void cfsk_put(struct cflayer *layr) +{ + struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); + sock_put(&cf_sk->sk); +} + /* Packet Control Callback function called from CAIF */ static void caif_ctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, @@ -232,6 +247,8 @@ static void caif_ctrl_cb(struct cflayer *layr, case CAIF_CTRLCMD_INIT_RSP: /* We're now connected */ + caif_client_register_refcnt(&cf_sk->layer, + cfsk_hold, cfsk_put); dbfs_atomic_inc(&cnt.num_connect_resp); cf_sk->sk.sk_state = CAIF_CONNECTED; set_tx_flow_on(cf_sk); @@ -242,7 +259,6 @@ static void caif_ctrl_cb(struct cflayer *layr, /* We're now disconnected */ cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_state_change(&cf_sk->sk); - cfcnfg_release_adap_layer(&cf_sk->layer); break; case CAIF_CTRLCMD_INIT_FAIL_RSP: @@ -519,43 +535,14 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, int noblock, long timeo) { struct cfpkt *pkt; - int ret, loopcnt = 0; pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); - memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info)); - do { + memset(skb->cb, 0, sizeof(struct caif_payload_info)); - ret = -ETIMEDOUT; + if (cf_sk->layer.dn == NULL) + return -EINVAL; - /* Slight paranoia, probably not needed. */ - if (unlikely(loopcnt++ > 1000)) { - pr_warn("transmit retries failed, error = %d\n", ret); - break; - } - - if (cf_sk->layer.dn != NULL) - ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); - if (likely(ret >= 0)) - break; - /* if transmit return -EAGAIN, then retry */ - if (noblock && ret == -EAGAIN) - break; - timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret); - if (signal_pending(current)) { - ret = sock_intr_errno(timeo); - break; - } - if (ret) - break; - if (cf_sk->sk.sk_state != CAIF_CONNECTED || - sock_flag(&cf_sk->sk, SOCK_DEAD) || - (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) { - ret = -EPIPE; - cf_sk->sk.sk_err = EPIPE; - break; - } - } while (ret == -EAGAIN); - return ret; + return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); } /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ @@ -620,7 +607,9 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, goto err; ret = transmit_skb(skb, cf_sk, noblock, timeo); if (ret < 0) - goto err; + /* skb is already freed */ + return ret; + return len; err: kfree_skb(skb); @@ -826,7 +815,8 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, sk->sk_state == CAIF_DISCONNECTED); if (sk->sk_shutdown & SHUTDOWN_MASK) { /* Allow re-connect after SHUTDOWN_IND */ - caif_disconnect_client(&cf_sk->layer); + caif_disconnect_client(sock_net(sk), &cf_sk->layer); + caif_free_client(&cf_sk->layer); break; } /* No reconnect on a seqpacket socket */ @@ -866,8 +856,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, dbfs_atomic_inc(&cnt.num_connect_req); cf_sk->layer.receive = caif_sktrecv_cb; - err = caif_connect_client(&cf_sk->conn_req, + + err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, &cf_sk->layer, &ifindex, &headroom, &tailroom); + if (err < 0) { cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; @@ -935,7 +927,6 @@ static int caif_release(struct socket *sock) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); - int res = 0; if (!sk) return 0; @@ -947,13 +938,14 @@ static int caif_release(struct socket *sock) * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, * this ensures no packets when sock is dead. */ - spin_lock(&sk->sk_receive_queue.lock); + spin_lock_bh(&sk->sk_receive_queue.lock); sock_set_flag(sk, SOCK_DEAD); - spin_unlock(&sk->sk_receive_queue.lock); + spin_unlock_bh(&sk->sk_receive_queue.lock); sock->sk = NULL; dbfs_atomic_inc(&cnt.num_disconnect); + WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); if (cf_sk->debugfs_socket_dir != NULL) debugfs_remove_recursive(cf_sk->debugfs_socket_dir); @@ -961,19 +953,15 @@ static int caif_release(struct socket *sock) sk->sk_state = CAIF_DISCONNECTED; sk->sk_shutdown = SHUTDOWN_MASK; - if (cf_sk->sk.sk_socket->state == SS_CONNECTED || - cf_sk->sk.sk_socket->state == SS_CONNECTING) - res = caif_disconnect_client(&cf_sk->layer); - + caif_disconnect_client(sock_net(sk), &cf_sk->layer); cf_sk->sk.sk_socket->state = SS_DISCONNECTING; wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); sock_orphan(sk); - cf_sk->layer.dn = NULL; sk_stream_kill_queues(&cf_sk->sk); release_sock(sk); sock_put(sk); - return res; + return 0; } /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ @@ -1060,16 +1048,18 @@ static void caif_sock_destructor(struct sock *sk) caif_assert(sk_unhashed(sk)); caif_assert(!sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { - pr_info("Attempt to release alive CAIF socket: %p\n", sk); + pr_debug("Attempt to release alive CAIF socket: %p\n", sk); return; } sk_stream_kill_queues(&cf_sk->sk); dbfs_atomic_dec(&cnt.caif_nr_socks); + caif_free_client(&cf_sk->layer); } static int caif_create(struct net *net, struct socket *sock, int protocol, int kern) { + int num; struct sock *sk = NULL; struct caifsock *cf_sk = NULL; static struct proto prot = {.name = "PF_CAIF", @@ -1127,19 +1117,21 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, set_rx_flow_on(cf_sk); /* Set default options on configuration */ - cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; + cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL; cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; /* Increase the number of sockets created. */ dbfs_atomic_inc(&cnt.caif_nr_socks); + num = dbfs_atomic_inc(&cnt.caif_sock_create); #ifdef CONFIG_DEBUG_FS if (!IS_ERR(debugfsdir)) { + /* Fill in some information concerning the misc socket. */ - snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", - atomic_read(&cnt.caif_nr_socks)); + snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", num); cf_sk->debugfs_socket_dir = debugfs_create_dir(cf_sk->name, debugfsdir); + debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR, cf_sk->debugfs_socket_dir, (u32 *) &cf_sk->sk.sk_state); @@ -1183,6 +1175,9 @@ static int __init caif_sktinit_module(void) debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, debugfsdir, (u32 *) &cnt.caif_nr_socks); + debugfs_create_u32("num_create", S_IRUSR | S_IWUSR, + debugfsdir, + (u32 *) &cnt.caif_sock_create); debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR, debugfsdir, (u32 *) &cnt.num_connect_req); diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index f1f98d967d8a..52fe33bee029 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -10,6 +10,7 @@ #include <linux/stddef.h> #include <linux/slab.h> #include <linux/netdevice.h> +#include <linux/module.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfcnfg.h> @@ -18,11 +19,7 @@ #include <net/caif/cffrml.h> #include <net/caif/cfserl.h> #include <net/caif/cfsrvl.h> - -#include <linux/module.h> -#include <asm/atomic.h> - -#define MAX_PHY_LAYERS 7 +#include <net/caif/caif_dev.h> #define container_obj(layr) container_of(layr, struct cfcnfg, layer) @@ -30,6 +27,9 @@ * to manage physical interfaces */ struct cfcnfg_phyinfo { + struct list_head node; + bool up; + /* Pointer to the layer below the MUX (framing layer) */ struct cflayer *frm_layer; /* Pointer to the lowest actual physical layer */ @@ -39,9 +39,6 @@ struct cfcnfg_phyinfo { /* Preference of the physical in interface */ enum cfcnfg_phy_preference pref; - /* Reference count, number of channels using the device */ - int phy_ref_count; - /* Information about the physical device */ struct dev_info dev_info; @@ -59,8 +56,8 @@ struct cfcnfg { struct cflayer layer; struct cflayer *ctrl; struct cflayer *mux; - u8 last_phyid; - struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; + struct list_head phys; + struct mutex lock; }; static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, @@ -76,6 +73,9 @@ struct cfcnfg *cfcnfg_create(void) { struct cfcnfg *this; struct cfctrl_rsp *resp; + + might_sleep(); + /* Initiate this layer */ this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); if (!this) { @@ -99,27 +99,33 @@ struct cfcnfg *cfcnfg_create(void) resp->radioset_rsp = cfctrl_resp_func; resp->linksetup_rsp = cfcnfg_linkup_rsp; resp->reject_rsp = cfcnfg_reject_rsp; - - this->last_phyid = 1; + INIT_LIST_HEAD(&this->phys); cfmuxl_set_uplayer(this->mux, this->ctrl, 0); layer_set_dn(this->ctrl, this->mux); layer_set_up(this->ctrl, this); + mutex_init(&this->lock); + return this; out_of_mem: pr_warn("Out of memory\n"); + + synchronize_rcu(); + kfree(this->mux); kfree(this->ctrl); kfree(this); return NULL; } -EXPORT_SYMBOL(cfcnfg_create); void cfcnfg_remove(struct cfcnfg *cfg) { + might_sleep(); if (cfg) { + synchronize_rcu(); + kfree(cfg->mux); - kfree(cfg->ctrl); + cfctrl_remove(cfg->ctrl); kfree(cfg); } } @@ -128,132 +134,83 @@ static void cfctrl_resp_func(void) { } +static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg, + u8 phyid) +{ + struct cfcnfg_phyinfo *phy; + + list_for_each_entry_rcu(phy, &cnfg->phys, node) + if (phy->id == phyid) + return phy; + return NULL; +} + static void cfctrl_enum_resp(void) { } -struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, +static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, enum cfcnfg_phy_preference phy_pref) { - u16 i; - /* Try to match with specified preference */ - for (i = 1; i < MAX_PHY_LAYERS; i++) { - if (cnfg->phy_layers[i].id == i && - cnfg->phy_layers[i].pref == phy_pref && - cnfg->phy_layers[i].frm_layer != NULL) { - caif_assert(cnfg->phy_layers != NULL); - caif_assert(cnfg->phy_layers[i].id == i); - return &cnfg->phy_layers[i].dev_info; - } + struct cfcnfg_phyinfo *phy; + + list_for_each_entry_rcu(phy, &cnfg->phys, node) { + if (phy->up && phy->pref == phy_pref && + phy->frm_layer != NULL) + + return &phy->dev_info; } + /* Otherwise just return something */ - for (i = 1; i < MAX_PHY_LAYERS; i++) { - if (cnfg->phy_layers[i].id == i) { - caif_assert(cnfg->phy_layers != NULL); - caif_assert(cnfg->phy_layers[i].id == i); - return &cnfg->phy_layers[i].dev_info; - } - } + list_for_each_entry_rcu(phy, &cnfg->phys, node) + if (phy->up) + return &phy->dev_info; return NULL; } -static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, - u8 phyid) +static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) { - int i; - /* Try to match with specified preference */ - for (i = 0; i < MAX_PHY_LAYERS; i++) - if (cnfg->phy_layers[i].frm_layer != NULL && - cnfg->phy_layers[i].id == phyid) - return &cnfg->phy_layers[i]; - return NULL; -} + struct cfcnfg_phyinfo *phy; - -int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) -{ - int i; - for (i = 0; i < MAX_PHY_LAYERS; i++) - if (cnfg->phy_layers[i].frm_layer != NULL && - cnfg->phy_layers[i].ifindex == ifi) - return i; + list_for_each_entry_rcu(phy, &cnfg->phys, node) + if (phy->ifindex == ifi && phy->up) + return phy->id; return -ENODEV; } -int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) +int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) { - u8 channel_id = 0; - int ret = 0; - struct cflayer *servl = NULL; - struct cfcnfg_phyinfo *phyinfo = NULL; - u8 phyid = 0; + u8 channel_id; + struct cfcnfg *cfg = get_cfcnfg(net); caif_assert(adap_layer != NULL); + cfctrl_cancel_req(cfg->ctrl, adap_layer); channel_id = adap_layer->id; - if (adap_layer->dn == NULL || channel_id == 0) { - pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); - ret = -ENOTCONN; - goto end; - } - servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); - if (servl == NULL) { - pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", - channel_id); - ret = -EINVAL; - goto end; - } - layer_set_up(servl, NULL); - ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); - if (ret) - goto end; - caif_assert(channel_id == servl->id); - if (adap_layer->dn != NULL) { - phyid = cfsrvl_getphyid(adap_layer->dn); - - phyinfo = cfcnfg_get_phyinfo(cnfg, phyid); - if (phyinfo == NULL) { - pr_warn("No interface to send disconnect to\n"); - ret = -ENODEV; - goto end; - } - if (phyinfo->id != phyid || - phyinfo->phy_layer->id != phyid || - phyinfo->frm_layer->id != phyid) { - pr_err("Inconsistency in phy registration\n"); - ret = -EINVAL; - goto end; - } - } - if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 && - phyinfo->phy_layer != NULL && - phyinfo->phy_layer->modemcmd != NULL) { - phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, - _CAIF_MODEMCMD_PHYIF_USELESS); - } -end: - cfsrvl_put(servl); - cfctrl_cancel_req(cnfg->ctrl, adap_layer); + if (channel_id != 0) { + struct cflayer *servl; + servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); + if (servl != NULL) + layer_set_up(servl, NULL); + } else + pr_debug("nothing to disconnect\n"); + cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); + + /* Do RCU sync before initiating cleanup */ + synchronize_rcu(); if (adap_layer->ctrlcmd != NULL) adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); - return ret; - -} -EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer); + return 0; -void cfcnfg_release_adap_layer(struct cflayer *adap_layer) -{ - if (adap_layer->dn) - cfsrvl_put(adap_layer->dn); } -EXPORT_SYMBOL(cfcnfg_release_adap_layer); +EXPORT_SYMBOL(caif_disconnect_client); static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) { } -int protohead[CFCTRL_SRV_MASK] = { +static const int protohead[CFCTRL_SRV_MASK] = { [CFCTRL_SRV_VEI] = 4, [CFCTRL_SRV_DATAGRAM] = 7, [CFCTRL_SRV_UTIL] = 4, @@ -261,49 +218,157 @@ int protohead[CFCTRL_SRV_MASK] = { [CFCTRL_SRV_DBG] = 3, }; -int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, - struct cfctrl_link_param *param, - struct cflayer *adap_layer, - int *ifindex, + +static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, + struct caif_connect_request *s, + struct cfctrl_link_param *l) +{ + struct dev_info *dev_info; + enum cfcnfg_phy_preference pref; + int res; + + memset(l, 0, sizeof(*l)); + /* In caif protocol low value is high priority */ + l->priority = CAIF_PRIO_MAX - s->priority + 1; + + if (s->ifindex != 0) { + res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); + if (res < 0) + return res; + l->phyid = res; + } else { + switch (s->link_selector) { + case CAIF_LINK_HIGH_BANDW: + pref = CFPHYPREF_HIGH_BW; + break; + case CAIF_LINK_LOW_LATENCY: + pref = CFPHYPREF_LOW_LAT; + break; + default: + return -EINVAL; + } + dev_info = cfcnfg_get_phyid(cnfg, pref); + if (dev_info == NULL) + return -ENODEV; + l->phyid = dev_info->id; + } + switch (s->protocol) { + case CAIFPROTO_AT: + l->linktype = CFCTRL_SRV_VEI; + l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3; + l->chtype = s->sockaddr.u.at.type & 0x3; + break; + case CAIFPROTO_DATAGRAM: + l->linktype = CFCTRL_SRV_DATAGRAM; + l->chtype = 0x00; + l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; + break; + case CAIFPROTO_DATAGRAM_LOOP: + l->linktype = CFCTRL_SRV_DATAGRAM; + l->chtype = 0x03; + l->endpoint = 0x00; + l->u.datagram.connid = s->sockaddr.u.dgm.connection_id; + break; + case CAIFPROTO_RFM: + l->linktype = CFCTRL_SRV_RFM; + l->u.datagram.connid = s->sockaddr.u.rfm.connection_id; + strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume, + sizeof(l->u.rfm.volume)-1); + l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0; + break; + case CAIFPROTO_UTIL: + l->linktype = CFCTRL_SRV_UTIL; + l->endpoint = 0x00; + l->chtype = 0x00; + strncpy(l->u.utility.name, s->sockaddr.u.util.service, + sizeof(l->u.utility.name)-1); + l->u.utility.name[sizeof(l->u.utility.name)-1] = 0; + caif_assert(sizeof(l->u.utility.name) > 10); + l->u.utility.paramlen = s->param.size; + if (l->u.utility.paramlen > sizeof(l->u.utility.params)) + l->u.utility.paramlen = sizeof(l->u.utility.params); + + memcpy(l->u.utility.params, s->param.data, + l->u.utility.paramlen); + + break; + case CAIFPROTO_DEBUG: + l->linktype = CFCTRL_SRV_DBG; + l->endpoint = s->sockaddr.u.dbg.service; + l->chtype = s->sockaddr.u.dbg.type; + break; + default: + return -EINVAL; + } + return 0; +} + +int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, + struct cflayer *adap_layer, int *ifindex, int *proto_head, int *proto_tail) { struct cflayer *frml; + struct cfcnfg_phyinfo *phy; + int err; + struct cfctrl_link_param param; + struct cfcnfg *cfg = get_cfcnfg(net); + caif_assert(cfg != NULL); + + rcu_read_lock(); + err = caif_connect_req_to_link_param(cfg, conn_req, ¶m); + if (err) + goto unlock; + + phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid); + if (!phy) { + err = -ENODEV; + goto unlock; + } + err = -EINVAL; + if (adap_layer == NULL) { pr_err("adap_layer is zero\n"); - return -EINVAL; + goto unlock; } if (adap_layer->receive == NULL) { pr_err("adap_layer->receive is NULL\n"); - return -EINVAL; + goto unlock; } if (adap_layer->ctrlcmd == NULL) { pr_err("adap_layer->ctrlcmd == NULL\n"); - return -EINVAL; + goto unlock; } - frml = cnfg->phy_layers[param->phyid].frm_layer; + + err = -ENODEV; + frml = phy->frm_layer; if (frml == NULL) { pr_err("Specified PHY type does not exist!\n"); - return -ENODEV; + goto unlock; } - caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); - caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == - param->phyid); - caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == - param->phyid); + caif_assert(param.phyid == phy->id); + caif_assert(phy->frm_layer->id == + param.phyid); + caif_assert(phy->phy_layer->id == + param.phyid); - *ifindex = cnfg->phy_layers[param->phyid].ifindex; + *ifindex = phy->ifindex; + *proto_tail = 2; *proto_head = - protohead[param->linktype]+ - (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0); - *proto_tail = 2; + protohead[param.linktype] + (phy->use_stx ? 1 : 0); + + rcu_read_unlock(); /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ - cfctrl_enum_req(cnfg->ctrl, param->phyid); - return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); + cfctrl_enum_req(cfg->ctrl, param.phyid); + return cfctrl_linkup_request(cfg->ctrl, ¶m, adap_layer); + +unlock: + rcu_read_unlock(); + return err; } -EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); +EXPORT_SYMBOL(caif_connect_client); static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, struct cflayer *adapt_layer) @@ -315,32 +380,45 @@ static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, - u8 phyid, struct cflayer *adapt_layer) + u8 phyid, struct cflayer *adapt_layer) { struct cfcnfg *cnfg = container_obj(layer); struct cflayer *servicel = NULL; struct cfcnfg_phyinfo *phyinfo; struct net_device *netdev; + if (channel_id == 0) { + pr_warn("received channel_id zero\n"); + if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) + adapt_layer->ctrlcmd(adapt_layer, + CAIF_CTRLCMD_INIT_FAIL_RSP, 0); + return; + } + + rcu_read_lock(); + if (adapt_layer == NULL) { - pr_debug("link setup response but no client exist, send linkdown back\n"); + pr_debug("link setup response but no client exist," + "send linkdown back\n"); cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); - return; + goto unlock; } caif_assert(cnfg != NULL); caif_assert(phyid != 0); - phyinfo = &cnfg->phy_layers[phyid]; + + phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); + if (phyinfo == NULL) { + pr_err("ERROR: Link Layer Device dissapeared" + "while connecting\n"); + goto unlock; + } + + caif_assert(phyinfo != NULL); caif_assert(phyinfo->id == phyid); caif_assert(phyinfo->phy_layer != NULL); caif_assert(phyinfo->phy_layer->id == phyid); - phyinfo->phy_ref_count++; - if (phyinfo->phy_ref_count == 1 && - phyinfo->phy_layer->modemcmd != NULL) { - phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, - _CAIF_MODEMCMD_PHYIF_USEFULL); - } adapt_layer->id = channel_id; switch (serv) { @@ -348,7 +426,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, servicel = cfvei_create(channel_id, &phyinfo->dev_info); break; case CFCTRL_SRV_DATAGRAM: - servicel = cfdgml_create(channel_id, &phyinfo->dev_info); + servicel = cfdgml_create(channel_id, + &phyinfo->dev_info); break; case CFCTRL_SRV_RFM: netdev = phyinfo->dev_info.dev; @@ -365,94 +444,92 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); break; default: - pr_err("Protocol error. Link setup response - unknown channel type\n"); - return; + pr_err("Protocol error. Link setup response " + "- unknown channel type\n"); + goto unlock; } if (!servicel) { pr_warn("Out of memory\n"); - return; + goto unlock; } layer_set_dn(servicel, cnfg->mux); cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); layer_set_up(servicel, adapt_layer); layer_set_dn(adapt_layer, servicel); - cfsrvl_get(servicel); + + rcu_read_unlock(); + servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); + return; +unlock: + rcu_read_unlock(); } void cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, struct net_device *dev, struct cflayer *phy_layer, - u16 *phyid, enum cfcnfg_phy_preference pref, + enum cfcnfg_phy_preference pref, bool fcs, bool stx) { struct cflayer *frml; struct cflayer *phy_driver = NULL; + struct cfcnfg_phyinfo *phyinfo; int i; + u8 phyid; + mutex_lock(&cnfg->lock); - if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { - *phyid = cnfg->last_phyid; - - /* range: * 1..(MAX_PHY_LAYERS-1) */ - cnfg->last_phyid = - (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; - } else { - *phyid = 0; - for (i = 1; i < MAX_PHY_LAYERS; i++) { - if (cnfg->phy_layers[i].frm_layer == NULL) { - *phyid = i; - break; - } - } - } - if (*phyid == 0) { - pr_err("No Available PHY ID\n"); - return; + /* CAIF protocol allow maximum 6 link-layers */ + for (i = 0; i < 7; i++) { + phyid = (dev->ifindex + i) & 0x7; + if (phyid == 0) + continue; + if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL) + goto got_phyid; } + pr_warn("Too many CAIF Link Layers (max 6)\n"); + goto out; + +got_phyid: + phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); switch (phy_type) { case CFPHYTYPE_FRAG: phy_driver = - cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); + cfserl_create(CFPHYTYPE_FRAG, phyid, stx); if (!phy_driver) { pr_warn("Out of memory\n"); - return; + goto out; } - break; case CFPHYTYPE_CAIF: phy_driver = NULL; break; default: - pr_err("%d\n", phy_type); - return; - break; + goto out; } + phy_layer->id = phyid; + phyinfo->pref = pref; + phyinfo->id = phyid; + phyinfo->dev_info.id = phyid; + phyinfo->dev_info.dev = dev; + phyinfo->phy_layer = phy_layer; + phyinfo->ifindex = dev->ifindex; + phyinfo->use_stx = stx; + phyinfo->use_fcs = fcs; + + frml = cffrml_create(phyid, fcs); - phy_layer->id = *phyid; - cnfg->phy_layers[*phyid].pref = pref; - cnfg->phy_layers[*phyid].id = *phyid; - cnfg->phy_layers[*phyid].dev_info.id = *phyid; - cnfg->phy_layers[*phyid].dev_info.dev = dev; - cnfg->phy_layers[*phyid].phy_layer = phy_layer; - cnfg->phy_layers[*phyid].phy_ref_count = 0; - cnfg->phy_layers[*phyid].ifindex = dev->ifindex; - cnfg->phy_layers[*phyid].use_stx = stx; - cnfg->phy_layers[*phyid].use_fcs = fcs; - - phy_layer->type = phy_type; - frml = cffrml_create(*phyid, fcs); if (!frml) { pr_warn("Out of memory\n"); - return; + kfree(phyinfo); + goto out; } - cnfg->phy_layers[*phyid].frm_layer = frml; - cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid); + phyinfo->frm_layer = frml; layer_set_up(frml, cnfg->mux); if (phy_driver != NULL) { - phy_driver->id = *phyid; + phy_driver->id = phyid; layer_set_dn(frml, phy_driver); layer_set_up(phy_driver, frml); layer_set_dn(phy_driver, phy_layer); @@ -461,33 +538,95 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, layer_set_dn(frml, phy_layer); layer_set_up(phy_layer, frml); } + + list_add_rcu(&phyinfo->node, &cnfg->phys); +out: + mutex_unlock(&cnfg->lock); } EXPORT_SYMBOL(cfcnfg_add_phy_layer); +int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, + bool up) +{ + struct cfcnfg_phyinfo *phyinfo; + + rcu_read_lock(); + phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id); + if (phyinfo == NULL) { + rcu_read_unlock(); + return -ENODEV; + } + + if (phyinfo->up == up) { + rcu_read_unlock(); + return 0; + } + phyinfo->up = up; + + if (up) { + cffrml_hold(phyinfo->frm_layer); + cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer, + phy_layer->id); + } else { + cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); + cffrml_put(phyinfo->frm_layer); + } + + rcu_read_unlock(); + return 0; +} +EXPORT_SYMBOL(cfcnfg_set_phy_state); + int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) { struct cflayer *frml, *frml_dn; u16 phyid; + struct cfcnfg_phyinfo *phyinfo; + + might_sleep(); + + mutex_lock(&cnfg->lock); + phyid = phy_layer->id; - caif_assert(phyid == cnfg->phy_layers[phyid].id); - caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); + phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid); + + if (phyinfo == NULL) { + mutex_unlock(&cnfg->lock); + return 0; + } + caif_assert(phyid == phyinfo->id); + caif_assert(phy_layer == phyinfo->phy_layer); caif_assert(phy_layer->id == phyid); - caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); + caif_assert(phyinfo->frm_layer->id == phyid); + + list_del_rcu(&phyinfo->node); + synchronize_rcu(); - memset(&cnfg->phy_layers[phy_layer->id], 0, - sizeof(struct cfcnfg_phyinfo)); - frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); + /* Fail if reference count is not zero */ + if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) { + pr_info("Wait for device inuse\n"); + list_add_rcu(&phyinfo->node, &cnfg->phys); + mutex_unlock(&cnfg->lock); + return -EAGAIN; + } + + frml = phyinfo->frm_layer; frml_dn = frml->dn; cffrml_set_uplayer(frml, NULL); cffrml_set_dnlayer(frml, NULL); - kfree(frml); - if (phy_layer != frml_dn) { layer_set_up(frml_dn, NULL); layer_set_dn(frml_dn, NULL); - kfree(frml_dn); } layer_set_up(phy_layer, NULL); + + if (phyinfo->phy_layer != frml_dn) + kfree(frml_dn); + + cffrml_free(frml); + kfree(phyinfo); + mutex_unlock(&cnfg->lock); + return 0; } EXPORT_SYMBOL(cfcnfg_del_phy_layer); diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 3cd8f978e309..e22671bed669 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -17,7 +17,6 @@ #define UTILITY_NAME_LENGTH 16 #define CFPKT_CTRL_PKT_LEN 20 - #ifdef CAIF_NO_LOOP static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt){ @@ -51,14 +50,31 @@ struct cflayer *cfctrl_create(void) this->serv.layer.receive = cfctrl_recv; sprintf(this->serv.layer.name, "ctrl"); this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; +#ifndef CAIF_NO_LOOP spin_lock_init(&this->loop_linkid_lock); + this->loop_linkid = 1; +#endif spin_lock_init(&this->info_list_lock); INIT_LIST_HEAD(&this->list); - this->loop_linkid = 1; return &this->serv.layer; } -static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) +void cfctrl_remove(struct cflayer *layer) +{ + struct cfctrl_request_info *p, *tmp; + struct cfctrl *ctrl = container_obj(layer); + + spin_lock_bh(&ctrl->info_list_lock); + list_for_each_entry_safe(p, tmp, &ctrl->list, list) { + list_del(&p->list); + kfree(p); + } + spin_unlock_bh(&ctrl->info_list_lock); + kfree(layer); +} + +static bool param_eq(const struct cfctrl_link_param *p1, + const struct cfctrl_link_param *p2) { bool eq = p1->linktype == p2->linktype && @@ -100,8 +116,8 @@ static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) return false; } -bool cfctrl_req_eq(struct cfctrl_request_info *r1, - struct cfctrl_request_info *r2) +static bool cfctrl_req_eq(const struct cfctrl_request_info *r1, + const struct cfctrl_request_info *r2) { if (r1->cmd != r2->cmd) return false; @@ -112,23 +128,22 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1, } /* Insert request at the end */ -void cfctrl_insert_req(struct cfctrl *ctrl, +static void cfctrl_insert_req(struct cfctrl *ctrl, struct cfctrl_request_info *req) { - spin_lock(&ctrl->info_list_lock); + spin_lock_bh(&ctrl->info_list_lock); atomic_inc(&ctrl->req_seq_no); req->sequence_no = atomic_read(&ctrl->req_seq_no); list_add_tail(&req->list, &ctrl->list); - spin_unlock(&ctrl->info_list_lock); + spin_unlock_bh(&ctrl->info_list_lock); } /* Compare and remove request */ -struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, - struct cfctrl_request_info *req) +static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, + struct cfctrl_request_info *req) { struct cfctrl_request_info *p, *tmp, *first; - spin_lock(&ctrl->info_list_lock); first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); list_for_each_entry_safe(p, tmp, &ctrl->list, list) { @@ -144,7 +159,6 @@ struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, } p = NULL; out: - spin_unlock(&ctrl->info_list_lock); return p; } @@ -154,16 +168,6 @@ struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) return &this->res; } -void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn) -{ - this->dn = dn; -} - -void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up) -{ - this->up = up; -} - static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) { info->hdr_len = 0; @@ -174,24 +178,23 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) { struct cfctrl *cfctrl = container_obj(layer); - int ret; struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); + struct cflayer *dn = cfctrl->serv.layer.dn; if (!pkt) { pr_warn("Out of memory\n"); return; } + if (!dn) { + pr_debug("not able to send enum request\n"); + return; + } caif_assert(offsetof(struct cfctrl, serv.layer) == 0); init_info(cfpkt_info(pkt), cfctrl); cfpkt_info(pkt)->dev_info->id = physlinkid; cfctrl->serv.dev_info.id = physlinkid; cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); cfpkt_addbdy(pkt, physlinkid); - ret = - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); - if (ret < 0) { - pr_err("Could not transmit enum message\n"); - cfpkt_destroy(pkt); - } + dn->transmit(dn, pkt); } int cfctrl_linkup_request(struct cflayer *layer, @@ -205,14 +208,29 @@ int cfctrl_linkup_request(struct cflayer *layer, struct cfctrl_request_info *req; int ret; char utility_name[16]; - struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); + struct cfpkt *pkt; + struct cflayer *dn = cfctrl->serv.layer.dn; + + if (!dn) { + pr_debug("not able to send linkup request\n"); + return -ENODEV; + } + + if (cfctrl_cancel_req(layer, user_layer) > 0) { + /* Slight Paranoia, check if already connecting */ + pr_err("Duplicate connect request for same client\n"); + WARN_ON(1); + return -EALREADY; + } + + pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); if (!pkt) { pr_warn("Out of memory\n"); return -ENOMEM; } cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); - cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); - cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid); + cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype); + cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid); cfpkt_addbdy(pkt, param->endpoint & 0x03); switch (param->linktype) { @@ -273,11 +291,15 @@ int cfctrl_linkup_request(struct cflayer *layer, */ cfpkt_info(pkt)->dev_info->id = param->phyid; ret = - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); + dn->transmit(dn, pkt); if (ret < 0) { - pr_err("Could not transmit linksetup request\n"); - cfpkt_destroy(pkt); - return -ENODEV; + int count; + + count = cfctrl_cancel_req(&cfctrl->serv.layer, + user_layer); + if (count != 1) + pr_err("Could not remove request (%d)", count); + return -ENODEV; } return 0; } @@ -288,89 +310,46 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, int ret; struct cfctrl *cfctrl = container_obj(layer); struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); - if (!pkt) { - pr_warn("Out of memory\n"); - return -ENOMEM; - } - cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); - cfpkt_addbdy(pkt, channelid); - init_info(cfpkt_info(pkt), cfctrl); - ret = - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); - if (ret < 0) { - pr_err("Could not transmit link-down request\n"); - cfpkt_destroy(pkt); - } - return ret; -} + struct cflayer *dn = cfctrl->serv.layer.dn; -void cfctrl_sleep_req(struct cflayer *layer) -{ - int ret; - struct cfctrl *cfctrl = container_obj(layer); - struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); if (!pkt) { pr_warn("Out of memory\n"); - return; + return -ENOMEM; } - cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP); - init_info(cfpkt_info(pkt), cfctrl); - ret = - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); - if (ret < 0) - cfpkt_destroy(pkt); -} -void cfctrl_wake_req(struct cflayer *layer) -{ - int ret; - struct cfctrl *cfctrl = container_obj(layer); - struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); - if (!pkt) { - pr_warn("Out of memory\n"); - return; + if (!dn) { + pr_debug("not able to send link-down request\n"); + return -ENODEV; } - cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE); - init_info(cfpkt_info(pkt), cfctrl); - ret = - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); - if (ret < 0) - cfpkt_destroy(pkt); -} -void cfctrl_getstartreason_req(struct cflayer *layer) -{ - int ret; - struct cfctrl *cfctrl = container_obj(layer); - struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); - if (!pkt) { - pr_warn("Out of memory\n"); - return; - } - cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON); + cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); + cfpkt_addbdy(pkt, channelid); init_info(cfpkt_info(pkt), cfctrl); ret = - cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); - if (ret < 0) - cfpkt_destroy(pkt); + dn->transmit(dn, pkt); +#ifndef CAIF_NO_LOOP + cfctrl->loop_linkused[channelid] = 0; +#endif + return ret; } - -void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) +int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) { struct cfctrl_request_info *p, *tmp; struct cfctrl *ctrl = container_obj(layr); - spin_lock(&ctrl->info_list_lock); + int found = 0; + spin_lock_bh(&ctrl->info_list_lock); list_for_each_entry_safe(p, tmp, &ctrl->list, list) { if (p->client_layer == adap_layer) { - pr_debug("cancel req :%d\n", p->sequence_no); list_del(&p->list); kfree(p); + found++; } } - spin_unlock(&ctrl->info_list_lock); + spin_unlock_bh(&ctrl->info_list_lock); + return found; } static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) @@ -389,7 +368,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) cfpkt_extr_head(pkt, &cmdrsp, 1); cmd = cmdrsp & CFCTRL_CMD_MASK; if (cmd != CFCTRL_CMD_LINK_ERR - && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { + && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp) + && CFCTRL_ERR_BIT != (CFCTRL_ERR_BIT & cmdrsp)) { if (handle_loop(cfctrl, cmd, pkt) != 0) cmdrsp |= CFCTRL_ERR_BIT; } @@ -515,18 +495,20 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) cfpkt_extr_head(pkt, ¶m, len); break; default: - pr_warn("Request setup - invalid link type (%d)\n", + pr_warn("Request setup, invalid type (%d)\n", serv); goto error; } rsp.cmd = cmd; rsp.param = linkparam; + spin_lock_bh(&cfctrl->info_list_lock); req = cfctrl_remove_req(cfctrl, &rsp); if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || cfpkt_erroneous(pkt)) { - pr_err("Invalid O/E bit or parse error on CAIF control channel\n"); + pr_err("Invalid O/E bit or parse error " + "on CAIF control channel\n"); cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 0, req ? req->client_layer @@ -541,6 +523,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) if (req != NULL) kfree(req); + + spin_unlock_bh(&cfctrl->info_list_lock); } break; case CFCTRL_CMD_LINK_DESTROY: @@ -584,12 +568,28 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, switch (ctrl) { case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: case CAIF_CTRLCMD_FLOW_OFF_IND: - spin_lock(&this->info_list_lock); - if (!list_empty(&this->list)) { + spin_lock_bh(&this->info_list_lock); + if (!list_empty(&this->list)) pr_debug("Received flow off in control layer\n"); + spin_unlock_bh(&this->info_list_lock); + break; + case _CAIF_CTRLCMD_PHYIF_DOWN_IND: { + struct cfctrl_request_info *p, *tmp; + + /* Find all connect request and report failure */ + spin_lock_bh(&this->info_list_lock); + list_for_each_entry_safe(p, tmp, &this->list, list) { + if (p->param.phyid == phyid) { + list_del(&p->list); + p->client_layer->ctrlcmd(p->client_layer, + CAIF_CTRLCMD_INIT_FAIL_RSP, + phyid); + kfree(p); + } } - spin_unlock(&this->info_list_lock); + spin_unlock_bh(&this->info_list_lock); break; + } default: break; } @@ -599,27 +599,33 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) { static int last_linkid; + static int dec; u8 linkid, linktype, tmp; switch (cmd) { case CFCTRL_CMD_LINK_SETUP: - spin_lock(&ctrl->loop_linkid_lock); - for (linkid = last_linkid + 1; linkid < 255; linkid++) - if (!ctrl->loop_linkused[linkid]) - goto found; - for (linkid = last_linkid - 1; linkid > 0; linkid--) + spin_lock_bh(&ctrl->loop_linkid_lock); + if (!dec) { + for (linkid = last_linkid + 1; linkid < 254; linkid++) + if (!ctrl->loop_linkused[linkid]) + goto found; + } + dec = 1; + for (linkid = last_linkid - 1; linkid > 1; linkid--) if (!ctrl->loop_linkused[linkid]) goto found; - spin_unlock(&ctrl->loop_linkid_lock); - pr_err("Out of link-ids\n"); - return -EINVAL; + spin_unlock_bh(&ctrl->loop_linkid_lock); + return -1; found: + if (linkid < 10) + dec = 0; + if (!ctrl->loop_linkused[linkid]) ctrl->loop_linkused[linkid] = 1; last_linkid = linkid; cfpkt_add_trail(pkt, &linkid, 1); - spin_unlock(&ctrl->loop_linkid_lock); + spin_unlock_bh(&ctrl->loop_linkid_lock); cfpkt_peek_head(pkt, &linktype, 1); if (linktype == CFCTRL_SRV_UTIL) { tmp = 0x01; @@ -629,10 +635,10 @@ found: break; case CFCTRL_CMD_LINK_DESTROY: - spin_lock(&ctrl->loop_linkid_lock); + spin_lock_bh(&ctrl->loop_linkid_lock); cfpkt_peek_head(pkt, &linkid, 1); ctrl->loop_linkused[linkid] = 0; - spin_unlock(&ctrl->loop_linkid_lock); + spin_unlock_bh(&ctrl->loop_linkid_lock); break; default: break; diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c index 27dab26ad3b8..0382dec84fdc 100644 --- a/net/caif/cfdgml.c +++ b/net/caif/cfdgml.c @@ -13,6 +13,7 @@ #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> + #define container_obj(layr) ((struct cfsrvl *) layr) #define DGM_CMD_BIT 0x80 @@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt) static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) { + u8 packet_type; u32 zero = 0; struct caif_payload_info *info; struct cfsrvl *service = container_obj(layr); @@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) if (cfpkt_getlen(pkt) > DGM_MTU) return -EMSGSIZE; - cfpkt_add_head(pkt, &zero, 4); + cfpkt_add_head(pkt, &zero, 3); + packet_type = 0x08; /* B9 set - UNCLASSIFIED */ + cfpkt_add_head(pkt, &packet_type, 1); /* Add info for MUX-layer to route the packet out. */ info = cfpkt_info(pkt); @@ -104,10 +108,5 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) */ info->hdr_len = 4; info->dev_info = &service->dev_info; - ret = layr->dn->transmit(layr->dn, pkt); - if (ret < 0) { - u32 tmp32; - cfpkt_extr_head(pkt, &tmp32, 4); - } - return ret; + return layr->dn->transmit(layr->dn, pkt); } diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index a445043931ae..04204b202718 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c @@ -12,6 +12,7 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/crc-ccitt.h> +#include <linux/netdevice.h> #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cffrml.h> @@ -21,6 +22,7 @@ struct cffrml { struct cflayer layer; bool dofcs; /* !< FCS active */ + int __percpu *pcpu_refcnt; }; static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); @@ -37,6 +39,12 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs) pr_warn("Out of memory\n"); return NULL; } + this->pcpu_refcnt = alloc_percpu(int); + if (this->pcpu_refcnt == NULL) { + kfree(this); + return NULL; + } + caif_assert(offsetof(struct cffrml, layer) == 0); memset(this, 0, sizeof(struct cflayer)); @@ -49,6 +57,13 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs) return (struct cflayer *) this; } +void cffrml_free(struct cflayer *layer) +{ + struct cffrml *this = container_obj(layer); + free_percpu(this->pcpu_refcnt); + kfree(layer); +} + void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) { this->up = up; @@ -112,6 +127,13 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) cfpkt_destroy(pkt); return -EPROTO; } + + if (layr->up == NULL) { + pr_err("Layr up is missing!\n"); + cfpkt_destroy(pkt); + return -EINVAL; + } + return layr->up->receive(layr->up, pkt); } @@ -120,7 +142,6 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) int tmp; u16 chks; u16 len; - int ret; struct cffrml *this = container_obj(layr); if (this->dofcs) { chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); @@ -135,19 +156,44 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt) cfpkt_info(pkt)->hdr_len += 2; if (cfpkt_erroneous(pkt)) { pr_err("Packet is erroneous!\n"); + cfpkt_destroy(pkt); return -EPROTO; } - ret = layr->dn->transmit(layr->dn, pkt); - if (ret < 0) { - /* Remove header on faulty packet. */ - cfpkt_extr_head(pkt, &tmp, 2); + + if (layr->dn == NULL) { + cfpkt_destroy(pkt); + return -ENODEV; + } - return ret; + return layr->dn->transmit(layr->dn, pkt); } static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid) { - if (layr->up->ctrlcmd) + if (layr->up && layr->up->ctrlcmd) layr->up->ctrlcmd(layr->up, ctrl, layr->id); } + +void cffrml_put(struct cflayer *layr) +{ + struct cffrml *this = container_obj(layr); + if (layr != NULL && this->pcpu_refcnt != NULL) + irqsafe_cpu_dec(*this->pcpu_refcnt); +} + +void cffrml_hold(struct cflayer *layr) +{ + struct cffrml *this = container_obj(layr); + if (layr != NULL && this->pcpu_refcnt != NULL) + irqsafe_cpu_inc(*this->pcpu_refcnt); +} + +int cffrml_refcnt_read(struct cflayer *layr) +{ + int i, refcnt = 0; + struct cffrml *this = container_obj(layr); + for_each_possible_cpu(i) + refcnt += *per_cpu_ptr(this->pcpu_refcnt, i); + return refcnt; +} diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 46f34b2e0478..3a66b8c10e09 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c @@ -9,6 +9,7 @@ #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/rculist.h> #include <net/caif/cfpkt.h> #include <net/caif/cfmuxl.h> #include <net/caif/cfsrvl.h> @@ -61,111 +62,88 @@ struct cflayer *cfmuxl_create(void) return &this->layer; } -int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) +int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) { - struct cfmuxl *muxl = container_obj(layr); - spin_lock(&muxl->receive_lock); - cfsrvl_get(up); - list_add(&up->node, &muxl->srvl_list); - spin_unlock(&muxl->receive_lock); + struct cfmuxl *muxl = (struct cfmuxl *) layr; + + spin_lock_bh(&muxl->transmit_lock); + list_add_rcu(&dn->node, &muxl->frml_list); + spin_unlock_bh(&muxl->transmit_lock); return 0; } -bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid) +static struct cflayer *get_from_id(struct list_head *list, u16 id) { - struct list_head *node; - struct cflayer *layer; - struct cfmuxl *muxl = container_obj(layr); - bool match = false; - spin_lock(&muxl->receive_lock); - - list_for_each(node, &muxl->srvl_list) { - layer = list_entry(node, struct cflayer, node); - if (cfsrvl_phyid_match(layer, phyid)) { - match = true; - break; - } - + struct cflayer *lyr; + list_for_each_entry_rcu(lyr, list, node) { + if (lyr->id == id) + return lyr; } - spin_unlock(&muxl->receive_lock); - return match; + + return NULL; } -u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id) +int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) { - struct cflayer *up; - int phyid; struct cfmuxl *muxl = container_obj(layr); - spin_lock(&muxl->receive_lock); - up = get_up(muxl, channel_id); - if (up != NULL) - phyid = cfsrvl_getphyid(up); - else - phyid = 0; - spin_unlock(&muxl->receive_lock); - return phyid; -} + struct cflayer *old; -int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) -{ - struct cfmuxl *muxl = (struct cfmuxl *) layr; - spin_lock(&muxl->transmit_lock); - list_add(&dn->node, &muxl->frml_list); - spin_unlock(&muxl->transmit_lock); - return 0; -} + spin_lock_bh(&muxl->receive_lock); -static struct cflayer *get_from_id(struct list_head *list, u16 id) -{ - struct list_head *node; - struct cflayer *layer; - list_for_each(node, list) { - layer = list_entry(node, struct cflayer, node); - if (layer->id == id) - return layer; - } - return NULL; + /* Two entries with same id is wrong, so remove old layer from mux */ + old = get_from_id(&muxl->srvl_list, linkid); + if (old != NULL) + list_del_rcu(&old->node); + + list_add_rcu(&up->node, &muxl->srvl_list); + spin_unlock_bh(&muxl->receive_lock); + + return 0; } struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) { struct cfmuxl *muxl = container_obj(layr); struct cflayer *dn; - spin_lock(&muxl->transmit_lock); - memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache)); + int idx = phyid % DN_CACHE_SIZE; + + spin_lock_bh(&muxl->transmit_lock); + rcu_assign_pointer(muxl->dn_cache[idx], NULL); dn = get_from_id(&muxl->frml_list, phyid); - if (dn == NULL) { - spin_unlock(&muxl->transmit_lock); - return NULL; - } - list_del(&dn->node); + if (dn == NULL) + goto out; + + list_del_rcu(&dn->node); caif_assert(dn != NULL); - spin_unlock(&muxl->transmit_lock); +out: + spin_unlock_bh(&muxl->transmit_lock); return dn; } -/* Invariant: lock is taken */ static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) { struct cflayer *up; int idx = id % UP_CACHE_SIZE; - up = muxl->up_cache[idx]; + up = rcu_dereference(muxl->up_cache[idx]); if (up == NULL || up->id != id) { + spin_lock_bh(&muxl->receive_lock); up = get_from_id(&muxl->srvl_list, id); - muxl->up_cache[idx] = up; + rcu_assign_pointer(muxl->up_cache[idx], up); + spin_unlock_bh(&muxl->receive_lock); } return up; } -/* Invariant: lock is taken */ static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) { struct cflayer *dn; int idx = dev_info->id % DN_CACHE_SIZE; - dn = muxl->dn_cache[idx]; + dn = rcu_dereference(muxl->dn_cache[idx]); if (dn == NULL || dn->id != dev_info->id) { + spin_lock_bh(&muxl->transmit_lock); dn = get_from_id(&muxl->frml_list, dev_info->id); - muxl->dn_cache[idx] = dn; + rcu_assign_pointer(muxl->dn_cache[idx], dn); + spin_unlock_bh(&muxl->transmit_lock); } return dn; } @@ -174,15 +152,22 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) { struct cflayer *up; struct cfmuxl *muxl = container_obj(layr); - spin_lock(&muxl->receive_lock); - up = get_up(muxl, id); + int idx = id % UP_CACHE_SIZE; + + if (id == 0) { + pr_warn("Trying to remove control layer\n"); + return NULL; + } + + spin_lock_bh(&muxl->receive_lock); + up = get_from_id(&muxl->srvl_list, id); if (up == NULL) goto out; - memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); - list_del(&up->node); - cfsrvl_put(up); + + rcu_assign_pointer(muxl->up_cache[idx], NULL); + list_del_rcu(&up->node); out: - spin_unlock(&muxl->receive_lock); + spin_unlock_bh(&muxl->receive_lock); return up; } @@ -197,58 +182,92 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) cfpkt_destroy(pkt); return -EPROTO; } - - spin_lock(&muxl->receive_lock); + rcu_read_lock(); up = get_up(muxl, id); - spin_unlock(&muxl->receive_lock); + if (up == NULL) { - pr_info("Received data on unknown link ID = %d (0x%x) up == NULL", - id, id); + pr_debug("Received data on unknown link ID = %d (0x%x)" + " up == NULL", id, id); cfpkt_destroy(pkt); /* * Don't return ERROR, since modem misbehaves and sends out * flow on before linksetup response. */ + + rcu_read_unlock(); return /* CFGLU_EPROT; */ 0; } + + /* We can't hold rcu_lock during receive, so take a ref count instead */ cfsrvl_get(up); + rcu_read_unlock(); + ret = up->receive(up, pkt); + cfsrvl_put(up); return ret; } static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) { - int ret; struct cfmuxl *muxl = container_obj(layr); + int err; u8 linkid; struct cflayer *dn; struct caif_payload_info *info = cfpkt_info(pkt); - dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); + BUG_ON(!info); + + rcu_read_lock(); + + dn = get_dn(muxl, info->dev_info); if (dn == NULL) { - pr_warn("Send data on unknown phy ID = %d (0x%x)\n", + pr_debug("Send data on unknown phy ID = %d (0x%x)\n", info->dev_info->id, info->dev_info->id); + rcu_read_unlock(); + cfpkt_destroy(pkt); return -ENOTCONN; } + info->hdr_len += 1; linkid = info->channel_id; cfpkt_add_head(pkt, &linkid, 1); - ret = dn->transmit(dn, pkt); - /* Remove MUX protocol header upon error. */ - if (ret < 0) - cfpkt_extr_head(pkt, &linkid, 1); - return ret; + + /* We can't hold rcu_lock during receive, so take a ref count instead */ + cffrml_hold(dn); + + rcu_read_unlock(); + + err = dn->transmit(dn, pkt); + + cffrml_put(dn); + return err; } static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid) { struct cfmuxl *muxl = container_obj(layr); - struct list_head *node; struct cflayer *layer; - list_for_each(node, &muxl->srvl_list) { - layer = list_entry(node, struct cflayer, node); - if (cfsrvl_phyid_match(layer, phyid)) + int idx; + + rcu_read_lock(); + list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { + + if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { + + if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || + ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && + layer->id != 0) { + + idx = layer->id % UP_CACHE_SIZE; + spin_lock_bh(&muxl->receive_lock); + rcu_assign_pointer(muxl->up_cache[idx], NULL); + list_del_rcu(&layer->node); + spin_unlock_bh(&muxl->receive_lock); + } + /* NOTE: ctrlcmd is not allowed to block */ layer->ctrlcmd(layer, ctrl, phyid); + } } + rcu_read_unlock(); } diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index d7e865e2ff65..75d4bfae1a78 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -42,22 +42,22 @@ struct cfpkt_priv_data { bool erronous; }; -inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) +static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) { return (struct cfpkt_priv_data *) pkt->skb.cb; } -inline bool is_erronous(struct cfpkt *pkt) +static inline bool is_erronous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } -inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) +static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) { return &pkt->skb; } -inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) +static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) { return (struct cfpkt *) skb; } @@ -97,21 +97,20 @@ inline struct cfpkt *cfpkt_create(u16 len) { return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); } -EXPORT_SYMBOL(cfpkt_create); void cfpkt_destroy(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); kfree_skb(skb); } -EXPORT_SYMBOL(cfpkt_destroy); + inline bool cfpkt_more(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len > 0; } -EXPORT_SYMBOL(cfpkt_more); + int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) { @@ -123,7 +122,6 @@ int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) return !cfpkt_extr_head(pkt, data, len) && !cfpkt_add_head(pkt, data, len); } -EXPORT_SYMBOL(cfpkt_peek_head); int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) { @@ -148,7 +146,6 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) memcpy(data, from, len); return 0; } -EXPORT_SYMBOL(cfpkt_extr_head); int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) { @@ -171,13 +168,13 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) memcpy(data, from, len); return 0; } -EXPORT_SYMBOL(cfpkt_extr_trail); + int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) { return cfpkt_add_body(pkt, NULL, len); } -EXPORT_SYMBOL(cfpkt_pad_trail); + int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) { @@ -226,13 +223,11 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) memcpy(to, data, len); return 0; } -EXPORT_SYMBOL(cfpkt_add_body); inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) { return cfpkt_add_body(pkt, &data, 1); } -EXPORT_SYMBOL(cfpkt_addbdy); int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) { @@ -259,20 +254,20 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) memcpy(to, data, len); return 0; } -EXPORT_SYMBOL(cfpkt_add_head); + inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) { return cfpkt_add_body(pkt, data, len); } -EXPORT_SYMBOL(cfpkt_add_trail); + inline u16 cfpkt_getlen(struct cfpkt *pkt) { struct sk_buff *skb = pkt_to_skb(pkt); return skb->len; } -EXPORT_SYMBOL(cfpkt_getlen); + inline u16 cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16, void *, u16), @@ -290,7 +285,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt, } return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); } -EXPORT_SYMBOL(cfpkt_iterate); + int cfpkt_setlen(struct cfpkt *pkt, u16 len) { @@ -315,18 +310,6 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len) return cfpkt_getlen(pkt); } -EXPORT_SYMBOL(cfpkt_setlen); - -struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) -{ - struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); - if (!pkt) - return NULL; - if (unlikely(data != NULL)) - cfpkt_add_body(pkt, data, len); - return pkt; -} -EXPORT_SYMBOL(cfpkt_create_uplink); struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt, @@ -368,7 +351,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, dst->len += addlen; return skb_to_pkt(dst); } -EXPORT_SYMBOL(cfpkt_append); struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) { @@ -406,174 +388,13 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) skb2->len += len2nd; return skb_to_pkt(skb2); } -EXPORT_SYMBOL(cfpkt_split); - -char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen) -{ - struct sk_buff *skb = pkt_to_skb(pkt); - char *p = buf; - int i; - - /* - * Sanity check buffer length, it needs to be at least as large as - * the header info: ~=50+ bytes - */ - if (buflen < 50) - return NULL; - - snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [", - is_erronous(pkt) ? "ERRONOUS-SKB" : - (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"), - skb, - (long) skb->len, - (long) (skb_tail_pointer(skb) - skb->data), - (long) skb->data_len, - (long) (skb->data - skb->head), - (long) (skb_tail_pointer(skb) - skb->head)); - p = buf + strlen(buf); - - for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) { - if (p > buf + buflen - 10) { - sprintf(p, "..."); - p = buf + strlen(buf); - break; - } - sprintf(p, "%02x,", skb->data[i]); - p = buf + strlen(buf); - } - sprintf(p, "]\n"); - return buf; -} -EXPORT_SYMBOL(cfpkt_log_pkt); - -int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen) -{ - struct sk_buff *skb = pkt_to_skb(pkt); - struct sk_buff *lastskb; - - caif_assert(buf != NULL); - if (unlikely(is_erronous(pkt))) - return -EPROTO; - /* Make sure SKB is writable */ - if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { - PKT_ERROR(pkt, "skb_cow_data failed\n"); - return -EPROTO; - } - - if (unlikely(skb_linearize(skb) != 0)) { - PKT_ERROR(pkt, "linearize failed\n"); - return -EPROTO; - } - - if (unlikely(skb_tailroom(skb) < buflen)) { - PKT_ERROR(pkt, "buffer too short - failed\n"); - return -EPROTO; - } - - *buf = skb_put(skb, buflen); - return 1; -} -EXPORT_SYMBOL(cfpkt_raw_append); -int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen) -{ - struct sk_buff *skb = pkt_to_skb(pkt); - - caif_assert(buf != NULL); - if (unlikely(is_erronous(pkt))) - return -EPROTO; - - if (unlikely(buflen > skb->len)) { - PKT_ERROR(pkt, "buflen too large - failed\n"); - return -EPROTO; - } - - if (unlikely(buflen > skb_headlen(skb))) { - if (unlikely(skb_linearize(skb) != 0)) { - PKT_ERROR(pkt, "linearize failed\n"); - return -EPROTO; - } - } - - *buf = skb->data; - skb_pull(skb, buflen); - - return 1; -} -EXPORT_SYMBOL(cfpkt_raw_extract); - -inline bool cfpkt_erroneous(struct cfpkt *pkt) +bool cfpkt_erroneous(struct cfpkt *pkt) { return cfpkt_priv(pkt)->erronous; } -EXPORT_SYMBOL(cfpkt_erroneous); - -struct cfpktq *cfpktq_create(void) -{ - struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC); - if (!q) - return NULL; - skb_queue_head_init(&q->head); - atomic_set(&q->count, 0); - spin_lock_init(&q->lock); - return q; -} -EXPORT_SYMBOL(cfpktq_create); - -void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio) -{ - atomic_inc(&pktq->count); - spin_lock(&pktq->lock); - skb_queue_tail(&pktq->head, pkt_to_skb(pkt)); - spin_unlock(&pktq->lock); - -} -EXPORT_SYMBOL(cfpkt_queue); - -struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq) -{ - struct cfpkt *tmp; - spin_lock(&pktq->lock); - tmp = skb_to_pkt(skb_peek(&pktq->head)); - spin_unlock(&pktq->lock); - return tmp; -} -EXPORT_SYMBOL(cfpkt_qpeek); - -struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq) -{ - struct cfpkt *pkt; - spin_lock(&pktq->lock); - pkt = skb_to_pkt(skb_dequeue(&pktq->head)); - if (pkt) { - atomic_dec(&pktq->count); - caif_assert(atomic_read(&pktq->count) >= 0); - } - spin_unlock(&pktq->lock); - return pkt; -} -EXPORT_SYMBOL(cfpkt_dequeue); - -int cfpkt_qcount(struct cfpktq *pktq) -{ - return atomic_read(&pktq->count); -} -EXPORT_SYMBOL(cfpkt_qcount); - -struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt) -{ - struct cfpkt *clone; - clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC)); - /* Free original packet. */ - cfpkt_destroy(pkt); - if (!clone) - return NULL; - return clone; -} -EXPORT_SYMBOL(cfpkt_clone_release); struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) { return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; } -EXPORT_SYMBOL(cfpkt_info); diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index e2fb5fa75795..0deabb440051 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -31,9 +31,9 @@ struct cfrfml { spinlock_t sync; }; -static void cfrfml_release(struct kref *kref) +static void cfrfml_release(struct cflayer *layer) { - struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref); + struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer); struct cfrfml *rfml = container_obj(&srvl->layer); if (rfml->incomplete_frm) diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 8303fe3ebf89..2715c84cfa87 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -179,15 +179,10 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) { struct cfserl *layr = container_obj(layer); - int ret; u8 tmp8 = CFSERL_STX; if (layr->usestx) cfpkt_add_head(newpkt, &tmp8, 1); - ret = layer->dn->transmit(layer->dn, newpkt); - if (ret < 0) - cfpkt_extr_head(newpkt, &tmp8, 1); - - return ret; + return layer->dn->transmit(layer->dn, newpkt); } static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index ab5e542526bf..535a1e72b366 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -10,6 +10,7 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/slab.h> +#include <linux/module.h> #include <net/caif/caif_layer.h> #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> @@ -27,8 +28,8 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, { struct cfsrvl *service = container_obj(layr); - caif_assert(layr->up != NULL); - caif_assert(layr->up->ctrlcmd != NULL); + if (layr->up == NULL || layr->up->ctrlcmd == NULL) + return; switch (ctrl) { case CAIF_CTRLCMD_INIT_RSP: @@ -151,14 +152,9 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) return -EINVAL; } -void cfservl_destroy(struct cflayer *layer) +static void cfsrvl_release(struct cflayer *layer) { - kfree(layer); -} - -void cfsrvl_release(struct kref *kref) -{ - struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); + struct cfsrvl *service = container_of(layer, struct cfsrvl, layer); kfree(service); } @@ -178,10 +174,8 @@ void cfsrvl_init(struct cfsrvl *service, service->dev_info = *dev_info; service->supports_flowctrl = supports_flowctrl; service->release = cfsrvl_release; - kref_init(&service->ref); } - bool cfsrvl_ready(struct cfsrvl *service, int *err) { if (service->open && service->modem_flow_on && service->phy_flow_on) @@ -194,6 +188,7 @@ bool cfsrvl_ready(struct cfsrvl *service, int *err) *err = -EAGAIN; return false; } + u8 cfsrvl_getphyid(struct cflayer *layer) { struct cfsrvl *servl = container_obj(layer); @@ -205,3 +200,26 @@ bool cfsrvl_phyid_match(struct cflayer *layer, int phyid) struct cfsrvl *servl = container_obj(layer); return servl->dev_info.id == phyid; } + +void caif_free_client(struct cflayer *adap_layer) +{ + struct cfsrvl *servl; + if (adap_layer == NULL || adap_layer->dn == NULL) + return; + servl = container_obj(adap_layer->dn); + servl->release(&servl->layer); +} +EXPORT_SYMBOL(caif_free_client); + +void caif_client_register_refcnt(struct cflayer *adapt_layer, + void (*hold)(struct cflayer *lyr), + void (*put)(struct cflayer *lyr)) +{ + struct cfsrvl *service; + service = container_of(adapt_layer->dn, struct cfsrvl, layer); + + WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL); + service->hold = hold; + service->put = put; +} +EXPORT_SYMBOL(caif_client_register_refcnt); diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c index 315c0d601368..98e027db18ed 100644 --- a/net/caif/cfutill.c +++ b/net/caif/cfutill.c @@ -100,10 +100,5 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt) */ info->hdr_len = 1; info->dev_info = &service->dev_info; - ret = layr->dn->transmit(layr->dn, pkt); - if (ret < 0) { - u32 tmp32; - cfpkt_extr_head(pkt, &tmp32, 4); - } - return ret; + return layr->dn->transmit(layr->dn, pkt); } diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index c3b1dec4acf6..3ec83fbc2887 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c @@ -82,13 +82,14 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) int ret; struct cfsrvl *service = container_obj(layr); if (!cfsrvl_ready(service, &ret)) - return ret; + goto err; caif_assert(layr->dn != NULL); caif_assert(layr->dn->transmit != NULL); if (cfpkt_add_head(pkt, &tmp, 1) < 0) { pr_err("Packet is erroneous!\n"); - return -EPROTO; + ret = -EPROTO; + goto err; } /* Add info-> for MUX-layer to route the packet out. */ @@ -96,8 +97,8 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) info->channel_id = service->layer.id; info->hdr_len = 1; info->dev_info = &service->dev_info; - ret = layr->dn->transmit(layr->dn, pkt); - if (ret < 0) - cfpkt_extr_head(pkt, &tmp, 1); + return layr->dn->transmit(layr->dn, pkt); +err: + cfpkt_destroy(pkt); return ret; } diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c index bf6fef2a0eff..b2f5989ad455 100644 --- a/net/caif/cfvidl.c +++ b/net/caif/cfvidl.c @@ -60,8 +60,5 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt) info = cfpkt_info(pkt); info->channel_id = service->layer.id; info->dev_info = &service->dev_info; - ret = layr->dn->transmit(layr->dn, pkt); - if (ret < 0) - cfpkt_extr_head(pkt, &videoheader, 4); - return ret; + return layr->dn->transmit(layr->dn, pkt); } diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 6008d6dc18a0..649ebacaf6bc 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -20,7 +20,6 @@ #include <linux/caif/if_caif.h> #include <net/rtnetlink.h> #include <net/caif/caif_layer.h> -#include <net/caif/cfcnfg.h> #include <net/caif/cfpkt.h> #include <net/caif/caif_dev.h> @@ -84,10 +83,11 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) if (!priv) return -EINVAL; + skb = (struct sk_buff *) cfpkt_tonative(pkt); + /* Get length of CAIF packet. */ - pktlen = cfpkt_getlen(pkt); + pktlen = skb->len; - skb = (struct sk_buff *) cfpkt_tonative(pkt); /* Pass some minimum information and * send the packet to the net stack. */ @@ -153,6 +153,18 @@ static void close_work(struct work_struct *work) } static DECLARE_WORK(close_worker, close_work); +static void chnl_hold(struct cflayer *lyr) +{ + struct chnl_net *priv = container_of(lyr, struct chnl_net, chnl); + dev_hold(priv->netdev); +} + +static void chnl_put(struct cflayer *lyr) +{ + struct chnl_net *priv = container_of(lyr, struct chnl_net, chnl); + dev_put(priv->netdev); +} + static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, int phyid) { @@ -190,6 +202,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, netif_wake_queue(priv->netdev); break; case CAIF_CTRLCMD_INIT_RSP: + caif_client_register_refcnt(&priv->chnl, chnl_hold, chnl_put); priv->state = CAIF_CONNECTED; priv->flowenabled = true; netif_wake_queue(priv->netdev); @@ -257,8 +270,9 @@ static int chnl_net_open(struct net_device *dev) if (priv->state != CAIF_CONNECTING) { priv->state = CAIF_CONNECTING; - result = caif_connect_client(&priv->conn_req, &priv->chnl, - &llifindex, &headroom, &tailroom); + result = caif_connect_client(dev_net(dev), &priv->conn_req, + &priv->chnl, &llifindex, + &headroom, &tailroom); if (result != 0) { pr_debug("err: " "Unable to register and open device," @@ -314,7 +328,7 @@ static int chnl_net_open(struct net_device *dev) if (result == 0) { pr_debug("connect timeout\n"); - caif_disconnect_client(&priv->chnl); + caif_disconnect_client(dev_net(dev), &priv->chnl); priv->state = CAIF_DISCONNECTED; pr_debug("state disconnected\n"); result = -ETIMEDOUT; @@ -330,7 +344,7 @@ static int chnl_net_open(struct net_device *dev) return 0; error: - caif_disconnect_client(&priv->chnl); + caif_disconnect_client(dev_net(dev), &priv->chnl); priv->state = CAIF_DISCONNECTED; pr_debug("state disconnected\n"); return result; @@ -344,7 +358,7 @@ static int chnl_net_stop(struct net_device *dev) ASSERT_RTNL(); priv = netdev_priv(dev); priv->state = CAIF_DISCONNECTED; - caif_disconnect_client(&priv->chnl); + caif_disconnect_client(dev_net(dev), &priv->chnl); return 0; } @@ -373,11 +387,18 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = chnl_net_start_xmit, }; +static void chnl_net_destructor(struct net_device *dev) +{ + struct chnl_net *priv = netdev_priv(dev); + caif_free_client(&priv->chnl); + free_netdev(dev); +} + static void ipcaif_net_setup(struct net_device *dev) { struct chnl_net *priv; dev->netdev_ops = &netdev_ops; - dev->destructor = free_netdev; + dev->destructor = chnl_net_destructor; dev->flags |= IFF_NOARP; dev->flags |= IFF_POINTOPOINT; dev->mtu = GPRS_PDP_MTU; @@ -391,7 +412,7 @@ static void ipcaif_net_setup(struct net_device *dev) priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; priv->conn_req.priority = CAIF_PRIO_LOW; /* Insert illegal value */ - priv->conn_req.sockaddr.u.dgm.connection_id = -1; + priv->conn_req.sockaddr.u.dgm.connection_id = 0; priv->flowenabled = false; init_waitqueue_head(&priv->netmgmt_wq); @@ -453,6 +474,10 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev, pr_warn("device rtml registration failed\n"); else list_add(&caifdev->list_field, &chnl_net_list); + + /* Take ifindex as connection-id if null */ + if (caifdev->conn_req.sockaddr.u.dgm.connection_id == 0) + caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex; return ret; } |