diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390/net | |
download | linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2 linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/s390/net')
31 files changed, 32463 insertions, 0 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig new file mode 100644 index 000000000000..a7efc394515e --- /dev/null +++ b/drivers/s390/net/Kconfig @@ -0,0 +1,108 @@ +menu "S/390 network device drivers" + depends on NETDEVICES && ARCH_S390 + +config LCS + tristate "Lan Channel Station Interface" + depends on NETDEVICES && (NET_ETHERNET || TR || FDDI) + help + Select this option if you want to use LCS networking on IBM S/390 + or zSeries. This device driver supports Token Ring (IEEE 802.5), + FDDI (IEEE 802.7) and Ethernet. + This option is also available as a module which will be + called lcs.ko. If you do not know what it is, it's safe to say "Y". + +config CTC + tristate "CTC device support" + depends on NETDEVICES + help + Select this option if you want to use channel-to-channel networking + on IBM S/390 or zSeries. This device driver supports real CTC + coupling using ESCON. It also supports virtual CTCs when running + under VM. It will use the channel device configuration if this is + available. This option is also available as a module which will be + called ctc.ko. If you do not know what it is, it's safe to say "Y". + +config IUCV + tristate "IUCV support (VM only)" + help + Select this option if you want to use inter-user communication + under VM or VIF. If unsure, say "Y" to enable a fast communication + link between VM guests. At boot time the user ID of the guest needs + to be passed to the kernel. Note that both kernels need to be + compiled with this option and both need to be booted with the user ID + of the other VM guest. + +config NETIUCV + tristate "IUCV network device support (VM only)" + depends on IUCV && NETDEVICES + help + Select this option if you want to use inter-user communication + vehicle networking under VM or VIF. It enables a fast communication + link between VM guests. Using ifconfig a point-to-point connection + can be established to the Linux for zSeries and S7390 system + running on the other VM guest. This option is also available + as a module which will be called netiucv.ko. If unsure, say "Y". + +config SMSGIUCV + tristate "IUCV special message support (VM only)" + depends on IUCV + help + Select this option if you want to be able to receive SMSG messages + from other VM guest systems. + +config CLAW + tristate "CLAW device support" + depends on NETDEVICES + help + This driver supports channel attached CLAW devices. + CLAW is Common Link Access for Workstation. Common devices + that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices. + To compile as a module choose M here: The module will be called + claw.ko to compile into the kernel choose Y + +config QETH + tristate "Gigabit Ethernet device support" + depends on NETDEVICES && IP_MULTICAST && QDIO + help + This driver supports the IBM S/390 and zSeries OSA Express adapters + in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN + interfaces in QDIO and HIPER mode. + + For details please refer to the documentation provided by IBM at + <http://www10.software.ibm.com/developerworks/opensource/linux390> + + To compile this driver as a module, choose M here: the + module will be called qeth.ko. + + +comment "Gigabit Ethernet default settings" + depends on QETH + +config QETH_IPV6 + bool "IPv6 support for gigabit ethernet" + depends on (QETH = IPV6) || (QETH && IPV6 = 'y') + help + If CONFIG_QETH is switched on, this option will include IPv6 + support in the qeth device driver. + +config QETH_VLAN + bool "VLAN support for gigabit ethernet" + depends on (QETH = VLAN_8021Q) || (QETH && VLAN_8021Q = 'y') + help + If CONFIG_QETH is switched on, this option will include IEEE + 802.1q VLAN support in the qeth device driver. + +config QETH_PERF_STATS + bool "Performance statistics in /proc" + depends on QETH + help + When switched on, this option will add a file in the proc-fs + (/proc/qeth_perf_stats) containing performance statistics. It + may slightly impact performance, so this is only recommended for + internal tuning of the device driver. + +config CCWGROUP + tristate + default (LCS || CTC || QETH) + +endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile new file mode 100644 index 000000000000..7cabb80a2e41 --- /dev/null +++ b/drivers/s390/net/Makefile @@ -0,0 +1,14 @@ +# +# S/390 network devices +# + +ctc-objs := ctcmain.o ctctty.o ctcdbug.o + +obj-$(CONFIG_IUCV) += iucv.o +obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o +obj-$(CONFIG_SMSGIUCV) += smsgiucv.o +obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o +obj-$(CONFIG_LCS) += lcs.o cu3088.o +qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o qeth_tso.o +qeth-$(CONFIG_PROC_FS) += qeth_proc.o +obj-$(CONFIG_QETH) += qeth.o diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c new file mode 100644 index 000000000000..06804d39a9c6 --- /dev/null +++ b/drivers/s390/net/claw.c @@ -0,0 +1,4447 @@ +/* + * drivers/s390/net/claw.c + * ESCON CLAW network driver + * + * $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ + * + * Linux fo zSeries version + * Copyright (C) 2002,2005 IBM Corporation + * Author(s) Original code written by: + * Kazuo Iimura (iimura@jp.ibm.com) + * Rewritten by + * Andy Richter (richtera@us.ibm.com) + * Marc Price (mwprice@us.ibm.com) + * + * sysfs parms: + * group x.x.rrrr,x.x.wwww + * read_buffer nnnnnnn + * write_buffer nnnnnn + * host_name aaaaaaaa + * adapter_name aaaaaaaa + * api_type aaaaaaaa + * + * eg. + * group 0.0.0200 0.0.0201 + * read_buffer 25 + * write_buffer 20 + * host_name LINUX390 + * adapter_name RS6K + * api_type TCPIP + * + * where + * + * The device id is decided by the order entries + * are added to the group the first is claw0 the second claw1 + * up to CLAW_MAX_DEV + * + * rrrr - the first of 2 consecutive device addresses used for the + * CLAW protocol. + * The specified address is always used as the input (Read) + * channel and the next address is used as the output channel. + * + * wwww - the second of 2 consecutive device addresses used for + * the CLAW protocol. + * The specified address is always used as the output + * channel and the previous address is used as the input channel. + * + * read_buffer - specifies number of input buffers to allocate. + * write_buffer - specifies number of output buffers to allocate. + * host_name - host name + * adaptor_name - adaptor name + * api_type - API type TCPIP or API will be sent and expected + * as ws_name + * + * Note the following requirements: + * 1) host_name must match the configured adapter_name on the remote side + * 2) adaptor_name must match the configured host name on the remote side + * + * Change History + * 1.00 Initial release shipped + * 1.10 Changes for Buffer allocation + * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower + * 1.25 Added Packing support + */ +#include <asm/bitops.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <asm/debug.h> +#include <asm/idals.h> +#include <asm/io.h> + +#include <linux/ctype.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/if_arp.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/proc_fs.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/tcp.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/version.h> + +#include "cu3088.h" +#include "claw.h" + +MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>"); +MODULE_DESCRIPTION("Linux for zSeries CLAW Driver\n" \ + "Copyright 2000,2005 IBM Corporation\n"); +MODULE_LICENSE("GPL"); + +/* Debugging is based on DEBUGMSG, IOTRACE, or FUNCTRACE options: + DEBUGMSG - Enables output of various debug messages in the code + IOTRACE - Enables output of CCW and other IO related traces + FUNCTRACE - Enables output of function entry/exit trace + Define any combination of above options to enable tracing + + CLAW also uses the s390dbf file system see claw_trace and claw_setup +*/ + +/* following enables tracing */ +//#define DEBUGMSG +//#define IOTRACE +//#define FUNCTRACE + +#ifdef DEBUGMSG +#define DEBUG +#endif + +#ifdef IOTRACE +#define DEBUG +#endif + +#ifdef FUNCTRACE +#define DEBUG +#endif + + char debug_buffer[255]; +/** + * Debug Facility Stuff + */ +static debug_info_t *claw_dbf_setup; +static debug_info_t *claw_dbf_trace; + +/** + * CLAW Debug Facility functions + */ +static void +claw_unregister_debug_facility(void) +{ + if (claw_dbf_setup) + debug_unregister(claw_dbf_setup); + if (claw_dbf_trace) + debug_unregister(claw_dbf_trace); +} + +static int +claw_register_debug_facility(void) +{ + claw_dbf_setup = debug_register("claw_setup", 1, 1, 8); + claw_dbf_trace = debug_register("claw_trace", 1, 2, 8); + if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) { + printk(KERN_WARNING "Not enough memory for debug facility.\n"); + claw_unregister_debug_facility(); + return -ENOMEM; + } + debug_register_view(claw_dbf_setup, &debug_hex_ascii_view); + debug_set_level(claw_dbf_setup, 2); + debug_register_view(claw_dbf_trace, &debug_hex_ascii_view); + debug_set_level(claw_dbf_trace, 2); + return 0; +} + +static inline void +claw_set_busy(struct net_device *dev) +{ + ((struct claw_privbk *) dev->priv)->tbusy=1; + eieio(); +} + +static inline void +claw_clear_busy(struct net_device *dev) +{ + clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy)); + netif_wake_queue(dev); + eieio(); +} + +static inline int +claw_check_busy(struct net_device *dev) +{ + eieio(); + return ((struct claw_privbk *) dev->priv)->tbusy; +} + +static inline void +claw_setbit_busy(int nr,struct net_device *dev) +{ + netif_stop_queue(dev); + set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy)); +} + +static inline void +claw_clearbit_busy(int nr,struct net_device *dev) +{ + clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy)); + netif_wake_queue(dev); +} + +static inline int +claw_test_and_setbit_busy(int nr,struct net_device *dev) +{ + netif_stop_queue(dev); + return test_and_set_bit(nr, + (void *)&(((struct claw_privbk *) dev->priv)->tbusy)); +} + + +/* Functions for the DEV methods */ + +static int claw_probe(struct ccwgroup_device *cgdev); +static void claw_remove_device(struct ccwgroup_device *cgdev); +static void claw_purge_skb_queue(struct sk_buff_head *q); +static int claw_new_device(struct ccwgroup_device *cgdev); +static int claw_shutdown_device(struct ccwgroup_device *cgdev); +static int claw_tx(struct sk_buff *skb, struct net_device *dev); +static int claw_change_mtu( struct net_device *dev, int new_mtu); +static int claw_open(struct net_device *dev); +static void claw_irq_handler(struct ccw_device *cdev, + unsigned long intparm, struct irb *irb); +static void claw_irq_tasklet ( unsigned long data ); +static int claw_release(struct net_device *dev); +static void claw_write_retry ( struct chbk * p_ch ); +static void claw_write_next ( struct chbk * p_ch ); +static void claw_timer ( struct chbk * p_ch ); + +/* Functions */ +static int add_claw_reads(struct net_device *dev, + struct ccwbk* p_first, struct ccwbk* p_last); +static void inline ccw_check_return_code (struct ccw_device *cdev, + int return_code); +static void inline ccw_check_unit_check (struct chbk * p_ch, + unsigned char sense ); +static int find_link(struct net_device *dev, char *host_name, char *ws_name ); +static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); +static int init_ccw_bk(struct net_device *dev); +static void probe_error( struct ccwgroup_device *cgdev); +static struct net_device_stats *claw_stats(struct net_device *dev); +static int inline pages_to_order_of_mag(int num_of_pages); +static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); +#ifdef DEBUG +static void dumpit (char *buf, int len); +#endif +/* sysfs Functions */ +static ssize_t claw_hname_show(struct device *dev, char *buf); +static ssize_t claw_hname_write(struct device *dev, + const char *buf, size_t count); +static ssize_t claw_adname_show(struct device *dev, char *buf); +static ssize_t claw_adname_write(struct device *dev, + const char *buf, size_t count); +static ssize_t claw_apname_show(struct device *dev, char *buf); +static ssize_t claw_apname_write(struct device *dev, + const char *buf, size_t count); +static ssize_t claw_wbuff_show(struct device *dev, char *buf); +static ssize_t claw_wbuff_write(struct device *dev, + const char *buf, size_t count); +static ssize_t claw_rbuff_show(struct device *dev, char *buf); +static ssize_t claw_rbuff_write(struct device *dev, + const char *buf, size_t count); +static int claw_add_files(struct device *dev); +static void claw_remove_files(struct device *dev); + +/* Functions for System Validate */ +static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw); +static int claw_send_control(struct net_device *dev, __u8 type, __u8 link, + __u8 correlator, __u8 rc , char *local_name, char *remote_name); +static int claw_snd_conn_req(struct net_device *dev, __u8 link); +static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl); +static int claw_snd_sys_validate_rsp(struct net_device *dev, + struct clawctl * p_ctl, __u32 return_code); +static int claw_strt_conn_req(struct net_device *dev ); +static void claw_strt_read ( struct net_device *dev, int lock ); +static void claw_strt_out_IO( struct net_device *dev ); +static void claw_free_wrt_buf( struct net_device *dev ); + +/* Functions for unpack reads */ +static void unpack_read (struct net_device *dev ); + +/* ccwgroup table */ + +static struct ccwgroup_driver claw_group_driver = { + .owner = THIS_MODULE, + .name = "claw", + .max_slaves = 2, + .driver_id = 0xC3D3C1E6, + .probe = claw_probe, + .remove = claw_remove_device, + .set_online = claw_new_device, + .set_offline = claw_shutdown_device, +}; + +/* +* +* Key functions +*/ + +/*----------------------------------------------------------------* + * claw_probe * + * this function is called for each CLAW device. * + *----------------------------------------------------------------*/ +static int +claw_probe(struct ccwgroup_device *cgdev) +{ + int rc; + struct claw_privbk *privptr=NULL; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s Enter\n",__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"probe"); + if (!get_device(&cgdev->dev)) + return -ENODEV; +#ifdef DEBUGMSG + printk(KERN_INFO "claw: variable cgdev =\n"); + dumpit((char *)cgdev, sizeof(struct ccwgroup_device)); +#endif + privptr = kmalloc(sizeof(struct claw_privbk), GFP_KERNEL); + if (privptr == NULL) { + probe_error(cgdev); + put_device(&cgdev->dev); + printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", + cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); + CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); + return -ENOMEM; + } + memset(privptr,0x00,sizeof(struct claw_privbk)); + privptr->p_mtc_envelope= kmalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL); + privptr->p_env = kmalloc(sizeof(struct claw_env), GFP_KERNEL); + if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) { + probe_error(cgdev); + put_device(&cgdev->dev); + printk(KERN_WARNING "Out of memory %s %s Exit Line %d \n", + cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); + CLAW_DBF_TEXT_(2,setup,"probex%d",-ENOMEM); + return -ENOMEM; + } + memset(privptr->p_mtc_envelope, 0x00, MAX_ENVELOPE_SIZE); + memset(privptr->p_env, 0x00, sizeof(struct claw_env)); + memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8); + memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8); + memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8); + privptr->p_env->packing = 0; + privptr->p_env->write_buffers = 5; + privptr->p_env->read_buffers = 5; + privptr->p_env->read_size = CLAW_FRAME_SIZE; + privptr->p_env->write_size = CLAW_FRAME_SIZE; + rc = claw_add_files(&cgdev->dev); + if (rc) { + probe_error(cgdev); + put_device(&cgdev->dev); + printk(KERN_WARNING "add_files failed %s %s Exit Line %d \n", + cgdev->cdev[0]->dev.bus_id,__FUNCTION__,__LINE__); + CLAW_DBF_TEXT_(2,setup,"probex%d",rc); + return rc; + } + printk(KERN_INFO "claw: sysfs files added for %s\n",cgdev->cdev[0]->dev.bus_id); + privptr->p_env->p_priv = privptr; + cgdev->cdev[0]->handler = claw_irq_handler; + cgdev->cdev[1]->handler = claw_irq_handler; + cgdev->dev.driver_data = privptr; +#ifdef FUNCTRACE + printk(KERN_INFO "claw:%s exit on line %d, " + "rc = 0\n",__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(2,setup,"prbext 0"); + + return 0; +} /* end of claw_probe */ + +/*-------------------------------------------------------------------* + * claw_tx * + *-------------------------------------------------------------------*/ + +static int +claw_tx(struct sk_buff *skb, struct net_device *dev) +{ + int rc; + struct claw_privbk *privptr=dev->priv; + unsigned long saveflags; + struct chbk *p_ch; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s enter\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"claw_tx"); + p_ch=&privptr->channel[WRITE]; + if (skb == NULL) { + printk(KERN_WARNING "%s: null pointer passed as sk_buffer\n", + dev->name); + privptr->stats.tx_dropped++; +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() exit on line %d, rc = EIO\n", + dev->name,__FUNCTION__, __LINE__); +#endif + CLAW_DBF_TEXT_(2,trace,"clawtx%d",-EIO); + return -EIO; + } + +#ifdef IOTRACE + printk(KERN_INFO "%s: variable sk_buff=\n",dev->name); + dumpit((char *) skb, sizeof(struct sk_buff)); + printk(KERN_INFO "%s: variable dev=\n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); +#endif + spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); + rc=claw_hw_tx( skb, dev, 1 ); + spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s exit on line %d, rc = %d\n", + dev->name, __FUNCTION__, __LINE__, rc); +#endif + CLAW_DBF_TEXT_(4,trace,"clawtx%d",rc); + return rc; +} /* end of claw_tx */ + +/*------------------------------------------------------------------* + * pack the collect queue into an skb and return it * + * If not packing just return the top skb from the queue * + *------------------------------------------------------------------*/ + +static struct sk_buff * +claw_pack_skb(struct claw_privbk *privptr) +{ + struct sk_buff *new_skb,*held_skb; + struct chbk *p_ch = &privptr->channel[WRITE]; + struct claw_env *p_env = privptr->p_env; + int pkt_cnt,pk_ind,so_far; + + new_skb = NULL; /* assume no dice */ + pkt_cnt = 0; + CLAW_DBF_TEXT(4,trace,"PackSKBe"); + if (skb_queue_len(&p_ch->collect_queue) > 0) { + /* some data */ + held_skb = skb_dequeue(&p_ch->collect_queue); + if (p_env->packing != DO_PACKED) + return held_skb; + if (held_skb) + atomic_dec(&held_skb->users); + else + return NULL; + /* get a new SKB we will pack at least one */ + new_skb = dev_alloc_skb(p_env->write_size); + if (new_skb == NULL) { + atomic_inc(&held_skb->users); + skb_queue_head(&p_ch->collect_queue,held_skb); + return NULL; + } + /* we have packed packet and a place to put it */ + pk_ind = 1; + so_far = 0; + new_skb->cb[1] = 'P'; /* every skb on queue has pack header */ + while ((pk_ind) && (held_skb != NULL)) { + if (held_skb->len+so_far <= p_env->write_size-8) { + memcpy(skb_put(new_skb,held_skb->len), + held_skb->data,held_skb->len); + privptr->stats.tx_packets++; + so_far += held_skb->len; + pkt_cnt++; + dev_kfree_skb_irq(held_skb); + held_skb = skb_dequeue(&p_ch->collect_queue); + if (held_skb) + atomic_dec(&held_skb->users); + } else { + pk_ind = 0; + atomic_inc(&held_skb->users); + skb_queue_head(&p_ch->collect_queue,held_skb); + } + } +#ifdef IOTRACE + printk(KERN_INFO "%s: %s() Packed %d len %d\n", + p_env->ndev->name, + __FUNCTION__,pkt_cnt,new_skb->len); +#endif + } + CLAW_DBF_TEXT(4,trace,"PackSKBx"); + return new_skb; +} + +/*-------------------------------------------------------------------* + * claw_change_mtu * + * * + *-------------------------------------------------------------------*/ + +static int +claw_change_mtu(struct net_device *dev, int new_mtu) +{ + struct claw_privbk *privptr=dev->priv; + int buff_size; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); +#endif +#ifdef DEBUGMSG + printk(KERN_INFO "variable dev =\n"); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "variable new_mtu = %d\n", new_mtu); +#endif + CLAW_DBF_TEXT(4,trace,"setmtu"); + buff_size = privptr->p_env->write_size; + if ((new_mtu < 60) || (new_mtu > buff_size)) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc=EINVAL\n", + dev->name, + __FUNCTION__, __LINE__); +#endif + return -EINVAL; + } + dev->mtu = new_mtu; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n",dev->name, + __FUNCTION__, __LINE__); +#endif + return 0; +} /* end of claw_change_mtu */ + + +/*-------------------------------------------------------------------* + * claw_open * + * * + *-------------------------------------------------------------------*/ +static int +claw_open(struct net_device *dev) +{ + + int rc; + int i; + unsigned long saveflags=0; + unsigned long parm; + struct claw_privbk *privptr; + DECLARE_WAITQUEUE(wait, current); + struct timer_list timer; + struct ccwbk *p_buf; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"open"); + if (!dev | (dev->name[0] == 0x00)) { + CLAW_DBF_TEXT(2,trace,"BadDev"); + printk(KERN_WARNING "claw: Bad device at open failing \n"); + return -ENODEV; + } + privptr = (struct claw_privbk *)dev->priv; + /* allocate and initialize CCW blocks */ + if (privptr->buffs_alloc == 0) { + rc=init_ccw_bk(dev); + if (rc) { + printk(KERN_INFO "%s:%s Exit on line %d, rc=ENOMEM\n", + dev->name, + __FUNCTION__, __LINE__); + CLAW_DBF_TEXT(2,trace,"openmem"); + return -ENOMEM; + } + } + privptr->system_validate_comp=0; + privptr->release_pend=0; + if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { + privptr->p_env->read_size=DEF_PACK_BUFSIZE; + privptr->p_env->write_size=DEF_PACK_BUFSIZE; + privptr->p_env->packing=PACKING_ASK; + } else { + privptr->p_env->packing=0; + privptr->p_env->read_size=CLAW_FRAME_SIZE; + privptr->p_env->write_size=CLAW_FRAME_SIZE; + } + claw_set_busy(dev); + tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet, + (unsigned long) &privptr->channel[READ]); + for ( i = 0; i < 2; i++) { + CLAW_DBF_TEXT_(2,trace,"opn_ch%d",i); + init_waitqueue_head(&privptr->channel[i].wait); + /* skb_queue_head_init(&p_ch->io_queue); */ + if (i == WRITE) + skb_queue_head_init( + &privptr->channel[WRITE].collect_queue); + privptr->channel[i].flag_a = 0; + privptr->channel[i].IO_active = 0; + privptr->channel[i].flag &= ~CLAW_TIMER; + init_timer(&timer); + timer.function = (void *)claw_timer; + timer.data = (unsigned long)(&privptr->channel[i]); + timer.expires = jiffies + 15*HZ; + add_timer(&timer); + spin_lock_irqsave(get_ccwdev_lock( + privptr->channel[i].cdev), saveflags); + parm = (unsigned long) &privptr->channel[i]; + privptr->channel[i].claw_state = CLAW_START_HALT_IO; + rc = 0; + add_wait_queue(&privptr->channel[i].wait, &wait); + rc = ccw_device_halt( + (struct ccw_device *)privptr->channel[i].cdev,parm); + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore( + get_ccwdev_lock(privptr->channel[i].cdev), saveflags); + schedule(); + set_current_state(TASK_RUNNING); + remove_wait_queue(&privptr->channel[i].wait, &wait); + if(rc != 0) + ccw_check_return_code(privptr->channel[i].cdev, rc); + if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) + del_timer(&timer); + } + if ((((privptr->channel[READ].last_dstat | + privptr->channel[WRITE].last_dstat) & + ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || + (((privptr->channel[READ].flag | + privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { +#ifdef DEBUGMSG + printk(KERN_INFO "%s: channel problems during open - read:" + " %02x - write: %02x\n", + dev->name, + privptr->channel[READ].last_dstat, + privptr->channel[WRITE].last_dstat); +#endif + printk(KERN_INFO "%s: remote side is not ready\n", dev->name); + CLAW_DBF_TEXT(2,trace,"notrdy"); + + for ( i = 0; i < 2; i++) { + spin_lock_irqsave( + get_ccwdev_lock(privptr->channel[i].cdev), + saveflags); + parm = (unsigned long) &privptr->channel[i]; + privptr->channel[i].claw_state = CLAW_STOP; + rc = ccw_device_halt( + (struct ccw_device *)&privptr->channel[i].cdev, + parm); + spin_unlock_irqrestore( + get_ccwdev_lock(privptr->channel[i].cdev), + saveflags); + if (rc != 0) { + ccw_check_return_code( + privptr->channel[i].cdev, rc); + } + } + free_pages((unsigned long)privptr->p_buff_ccw, + (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); + if (privptr->p_env->read_size < PAGE_SIZE) { + free_pages((unsigned long)privptr->p_buff_read, + (int)pages_to_order_of_mag( + privptr->p_buff_read_num)); + } + else { + p_buf=privptr->p_read_active_first; + while (p_buf!=NULL) { + free_pages((unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perread )); + p_buf=p_buf->next; + } + } + if (privptr->p_env->write_size < PAGE_SIZE ) { + free_pages((unsigned long)privptr->p_buff_write, + (int)pages_to_order_of_mag( + privptr->p_buff_write_num)); + } + else { + p_buf=privptr->p_write_active_first; + while (p_buf!=NULL) { + free_pages((unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perwrite )); + p_buf=p_buf->next; + } + } + privptr->buffs_alloc = 0; + privptr->channel[READ].flag= 0x00; + privptr->channel[WRITE].flag = 0x00; + privptr->p_buff_ccw=NULL; + privptr->p_buff_read=NULL; + privptr->p_buff_write=NULL; + claw_clear_busy(dev); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc=EIO\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(2,trace,"open EIO"); + return -EIO; + } + + /* Send SystemValidate command */ + + claw_clear_busy(dev); + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc=0\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"openok"); + return 0; +} /* end of claw_open */ + +/*-------------------------------------------------------------------* +* * +* claw_irq_handler * +* * +*--------------------------------------------------------------------*/ +static void +claw_irq_handler(struct ccw_device *cdev, + unsigned long intparm, struct irb *irb) +{ + struct chbk *p_ch = NULL; + struct claw_privbk *privptr = NULL; + struct net_device *dev = NULL; + struct claw_env *p_env; + struct chbk *p_ch_r=NULL; + + +#ifdef FUNCTRACE + printk(KERN_INFO "%s enter \n",__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"clawirq"); + /* Bypass all 'unsolicited interrupts' */ + if (!cdev->dev.driver_data) { + printk(KERN_WARNING "claw: unsolicited interrupt for device:" + "%s received c-%02x d-%02x\n", + cdev->dev.bus_id,irb->scsw.cstat, irb->scsw.dstat); +#ifdef FUNCTRACE + printk(KERN_INFO "claw: %s() " + "exit on line %d\n",__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(2,trace,"badirq"); + return; + } + privptr = (struct claw_privbk *)cdev->dev.driver_data; + + /* Try to extract channel from driver data. */ + if (privptr->channel[READ].cdev == cdev) + p_ch = &privptr->channel[READ]; + else if (privptr->channel[WRITE].cdev == cdev) + p_ch = &privptr->channel[WRITE]; + else { + printk(KERN_WARNING "claw: Can't determine channel for " + "interrupt, device %s\n", cdev->dev.bus_id); + CLAW_DBF_TEXT(2,trace,"badchan"); + return; + } + CLAW_DBF_TEXT_(4,trace,"IRQCH=%d",p_ch->flag); + + dev = (struct net_device *) (p_ch->ndev); + p_env=privptr->p_env; + +#ifdef IOTRACE + printk(KERN_INFO "%s: interrupt for device: %04x " + "received c-%02x d-%02x state-%02x\n", + dev->name, p_ch->devno, irb->scsw.cstat, + irb->scsw.dstat, p_ch->claw_state); +#endif + + /* Copy interruption response block. */ + memcpy(p_ch->irb, irb, sizeof(struct irb)); + + /* Check for good subchannel return code, otherwise error message */ + if (irb->scsw.cstat && !(irb->scsw.cstat & SCHN_STAT_PCI)) { + printk(KERN_INFO "%s: subchannel check for device: %04x -" + " Sch Stat %02x Dev Stat %02x CPA - %04x\n", + dev->name, p_ch->devno, + irb->scsw.cstat, irb->scsw.dstat,irb->scsw.cpa); +#ifdef IOTRACE + dumpit((char *)irb,sizeof(struct irb)); + dumpit((char *)(unsigned long)irb->scsw.cpa, + sizeof(struct ccw1)); +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(2,trace,"chanchk"); + /* return; */ + } + + /* Check the reason-code of a unit check */ + if (irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + ccw_check_unit_check(p_ch, irb->ecw[0]); + } + + /* State machine to bring the connection up, down and to restart */ + p_ch->last_dstat = irb->scsw.dstat; + + switch (p_ch->claw_state) { + case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: CLAW_STOP enter\n", dev->name); +#endif + if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.stctl == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return; + } + wake_up(&p_ch->wait); /* wake up claw_release */ + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: CLAW_STOP exit\n", dev->name); +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"stop"); + return; + + case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO\n", + dev->name); +#endif + if (!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.stctl == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"haltio"); + return; + } + if (p_ch->flag == CLAW_READ) { + p_ch->claw_state = CLAW_START_READ; + wake_up(&p_ch->wait); /* wake claw_open (READ)*/ + } + else + if (p_ch->flag == CLAW_WRITE) { + p_ch->claw_state = CLAW_START_WRITE; + /* send SYSTEM_VALIDATE */ + claw_strt_read(dev, LOCK_NO); + claw_send_control(dev, + SYSTEM_VALIDATE_REQUEST, + 0, 0, 0, + p_env->host_name, + p_env->adapter_name ); + } else { + printk(KERN_WARNING "claw: unsolicited " + "interrupt for device:" + "%s received c-%02x d-%02x\n", + cdev->dev.bus_id, + irb->scsw.cstat, + irb->scsw.dstat); + return; + } +#ifdef DEBUGMSG + printk(KERN_INFO "%s: process CLAW_STAT_HALT_IO exit\n", + dev->name); +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"haltio"); + return; + case CLAW_START_READ: + CLAW_DBF_TEXT(4,trace,"ReadIRQ"); + if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + clear_bit(0, (void *)&p_ch->IO_active); + if ((p_ch->irb->ecw[0] & 0x41) == 0x41 || + (p_ch->irb->ecw[0] & 0x40) == 0x40 || + (p_ch->irb->ecw[0]) == 0) + { + privptr->stats.rx_errors++; + printk(KERN_INFO "%s: Restart is " + "required after remote " + "side recovers \n", + dev->name); + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"notrdy"); + return; + } + if ((p_ch->irb->scsw.cstat & SCHN_STAT_PCI) && + (p_ch->irb->scsw.dstat==0)) { + if (test_and_set_bit(CLAW_BH_ACTIVE, + (void *)&p_ch->flag_a) == 0) { + tasklet_schedule(&p_ch->tasklet); + } + else { + CLAW_DBF_TEXT(4,trace,"PCINoBH"); + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"PCI_read"); + return; + } + if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.stctl == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"SPend_rd"); + return; + } + clear_bit(0, (void *)&p_ch->IO_active); + claw_clearbit_busy(TB_RETRY,dev); + if (test_and_set_bit(CLAW_BH_ACTIVE, + (void *)&p_ch->flag_a) == 0) { + tasklet_schedule(&p_ch->tasklet); + } + else { + CLAW_DBF_TEXT(4,trace,"RdBHAct"); + } + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: process CLAW_START_READ exit\n", + dev->name); +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"RdIRQXit"); + return; + case CLAW_START_WRITE: + if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + printk(KERN_INFO "%s: Unit Check Occured in " + "write channel\n",dev->name); + clear_bit(0, (void *)&p_ch->IO_active); + if (p_ch->irb->ecw[0] & 0x80 ) { + printk(KERN_INFO "%s: Resetting Event " + "occurred:\n",dev->name); + init_timer(&p_ch->timer); + p_ch->timer.function = + (void *)claw_write_retry; + p_ch->timer.data = (unsigned long)p_ch; + p_ch->timer.expires = jiffies + 10*HZ; + add_timer(&p_ch->timer); + printk(KERN_INFO "%s: write connection " + "restarting\n",dev->name); + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"rstrtwrt"); + return; + } + if (p_ch->irb->scsw.dstat & DEV_STAT_UNIT_EXCEP) { + clear_bit(0, (void *)&p_ch->IO_active); + printk(KERN_INFO "%s: Unit Exception " + "Occured in write channel\n", + dev->name); + } + if(!((p_ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || + (p_ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || + (p_ch->irb->scsw.stctl == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"writeUE"); + return; + } + clear_bit(0, (void *)&p_ch->IO_active); + if (claw_test_and_setbit_busy(TB_TX,dev)==0) { + claw_write_next(p_ch); + claw_clearbit_busy(TB_TX,dev); + claw_clear_busy(dev); + } + p_ch_r=(struct chbk *)&privptr->channel[READ]; + if (test_and_set_bit(CLAW_BH_ACTIVE, + (void *)&p_ch_r->flag_a) == 0) { + tasklet_schedule(&p_ch_r->tasklet); + } + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: process CLAW_START_WRITE exit\n", + dev->name); +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"StWtExit"); + return; + default: + printk(KERN_WARNING "%s: wrong selection code - irq " + "state=%d\n",dev->name,p_ch->claw_state); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(2,trace,"badIRQ"); + return; + } + +} /* end of claw_irq_handler */ + + +/*-------------------------------------------------------------------* +* claw_irq_tasklet * +* * +*--------------------------------------------------------------------*/ +static void +claw_irq_tasklet ( unsigned long data ) +{ + struct chbk * p_ch; + struct net_device *dev; + struct claw_privbk * privptr; + + p_ch = (struct chbk *) data; + dev = (struct net_device *)p_ch->ndev; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); +#endif +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable p_ch =\n",dev->name); + dumpit((char *) p_ch, sizeof(struct chbk)); +#endif + CLAW_DBF_TEXT(4,trace,"IRQtask"); + + privptr = (struct claw_privbk *) dev->priv; + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: bh routine - state-%02x\n" , + dev->name, p_ch->claw_state); +#endif + + unpack_read(dev); + clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); + CLAW_DBF_TEXT(4,trace,"TskletXt"); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return; +} /* end of claw_irq_bh */ + +/*-------------------------------------------------------------------* +* claw_release * +* * +*--------------------------------------------------------------------*/ +static int +claw_release(struct net_device *dev) +{ + int rc; + int i; + unsigned long saveflags; + unsigned long parm; + struct claw_privbk *privptr; + DECLARE_WAITQUEUE(wait, current); + struct ccwbk* p_this_ccw; + struct ccwbk* p_buf; + + if (!dev) + return 0; + privptr = (struct claw_privbk *) dev->priv; + if (!privptr) + return 0; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"release"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev =\n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "Priv Buffalloc %d\n",privptr->buffs_alloc); + printk(KERN_INFO "Priv p_buff_ccw = %p\n",&privptr->p_buff_ccw); +#endif + privptr->release_pend=1; + claw_setbit_busy(TB_STOP,dev); + for ( i = 1; i >=0 ; i--) { + spin_lock_irqsave( + get_ccwdev_lock(privptr->channel[i].cdev), saveflags); + /* del_timer(&privptr->channel[READ].timer); */ + privptr->channel[i].claw_state = CLAW_STOP; + privptr->channel[i].IO_active = 0; + parm = (unsigned long) &privptr->channel[i]; + if (i == WRITE) + claw_purge_skb_queue( + &privptr->channel[WRITE].collect_queue); + rc = ccw_device_halt (privptr->channel[i].cdev, parm); + if (privptr->system_validate_comp==0x00) /* never opened? */ + init_waitqueue_head(&privptr->channel[i].wait); + add_wait_queue(&privptr->channel[i].wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore( + get_ccwdev_lock(privptr->channel[i].cdev), saveflags); + schedule(); + set_current_state(TASK_RUNNING); + remove_wait_queue(&privptr->channel[i].wait, &wait); + if (rc != 0) { + ccw_check_return_code(privptr->channel[i].cdev, rc); + } + } + if (privptr->pk_skb != NULL) { + dev_kfree_skb(privptr->pk_skb); + privptr->pk_skb = NULL; + } + if(privptr->buffs_alloc != 1) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"none2fre"); + return 0; + } + CLAW_DBF_TEXT(4,trace,"freebufs"); + if (privptr->p_buff_ccw != NULL) { + free_pages((unsigned long)privptr->p_buff_ccw, + (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); + } + CLAW_DBF_TEXT(4,trace,"freeread"); + if (privptr->p_env->read_size < PAGE_SIZE) { + if (privptr->p_buff_read != NULL) { + free_pages((unsigned long)privptr->p_buff_read, + (int)pages_to_order_of_mag(privptr->p_buff_read_num)); + } + } + else { + p_buf=privptr->p_read_active_first; + while (p_buf!=NULL) { + free_pages((unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perread )); + p_buf=p_buf->next; + } + } + CLAW_DBF_TEXT(4,trace,"freewrit"); + if (privptr->p_env->write_size < PAGE_SIZE ) { + free_pages((unsigned long)privptr->p_buff_write, + (int)pages_to_order_of_mag(privptr->p_buff_write_num)); + } + else { + p_buf=privptr->p_write_active_first; + while (p_buf!=NULL) { + free_pages((unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perwrite )); + p_buf=p_buf->next; + } + } + CLAW_DBF_TEXT(4,trace,"clearptr"); + privptr->buffs_alloc = 0; + privptr->p_buff_ccw=NULL; + privptr->p_buff_read=NULL; + privptr->p_buff_write=NULL; + privptr->system_validate_comp=0; + privptr->release_pend=0; + /* Remove any writes that were pending and reset all reads */ + p_this_ccw=privptr->p_read_active_first; + while (p_this_ccw!=NULL) { + p_this_ccw->header.length=0xffff; + p_this_ccw->header.opcode=0xff; + p_this_ccw->header.flag=0x00; + p_this_ccw=p_this_ccw->next; + } + + while (privptr->p_write_active_first!=NULL) { + p_this_ccw=privptr->p_write_active_first; + p_this_ccw->header.flag=CLAW_PENDING; + privptr->p_write_active_first=p_this_ccw->next; + p_this_ccw->next=privptr->p_write_free_chain; + privptr->p_write_free_chain=p_this_ccw; + ++privptr->write_free_count; + } + privptr->p_write_active_last=NULL; + privptr->mtc_logical_link = -1; + privptr->mtc_skipping = 1; + privptr->mtc_offset=0; + + if (((privptr->channel[READ].last_dstat | + privptr->channel[WRITE].last_dstat) & + ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { + printk(KERN_WARNING "%s: channel problems during close - " + "read: %02x - write: %02x\n", + dev->name, + privptr->channel[READ].last_dstat, + privptr->channel[WRITE].last_dstat); + CLAW_DBF_TEXT(2,trace,"badclose"); + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"rlsexit"); + return 0; +} /* end of claw_release */ + + + +/*-------------------------------------------------------------------* +* claw_write_retry * +* * +*--------------------------------------------------------------------*/ + +static void +claw_write_retry ( struct chbk *p_ch ) +{ + + struct net_device *dev=p_ch->ndev; + + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); + printk(KERN_INFO "claw: variable p_ch =\n"); + dumpit((char *) p_ch, sizeof(struct chbk)); +#endif + CLAW_DBF_TEXT(4,trace,"w_retry"); + if (p_ch->claw_state == CLAW_STOP) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return; + } +#ifdef DEBUGMSG + printk( KERN_INFO "%s:%s state-%02x\n" , + dev->name, + __FUNCTION__, + p_ch->claw_state); +#endif + claw_strt_out_IO( dev ); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"rtry_xit"); + return; +} /* end of claw_write_retry */ + + +/*-------------------------------------------------------------------* +* claw_write_next * +* * +*--------------------------------------------------------------------*/ + +static void +claw_write_next ( struct chbk * p_ch ) +{ + + struct net_device *dev; + struct claw_privbk *privptr=NULL; + struct sk_buff *pk_skb; + int rc; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",p_ch->ndev->name,__FUNCTION__); + printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); + dumpit((char *) p_ch, sizeof(struct chbk)); +#endif + CLAW_DBF_TEXT(4,trace,"claw_wrt"); + if (p_ch->claw_state == CLAW_STOP) + return; + dev = (struct net_device *) p_ch->ndev; + privptr = (struct claw_privbk *) dev->priv; + claw_free_wrt_buf( dev ); + if ((privptr->write_free_count > 0) && + (skb_queue_len(&p_ch->collect_queue) > 0)) { + pk_skb = claw_pack_skb(privptr); + while (pk_skb != NULL) { + rc = claw_hw_tx( pk_skb, dev,1); + if (privptr->write_free_count > 0) { + pk_skb = claw_pack_skb(privptr); + } else + pk_skb = NULL; + } + } + if (privptr->p_write_active_first!=NULL) { + claw_strt_out_IO(dev); + } + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return; +} /* end of claw_write_next */ + +/*-------------------------------------------------------------------* +* * +* claw_timer * +*--------------------------------------------------------------------*/ + +static void +claw_timer ( struct chbk * p_ch ) +{ +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Entry\n",p_ch->ndev->name,__FUNCTION__); + printk(KERN_INFO "%s: variable p_ch =\n",p_ch->ndev->name); + dumpit((char *) p_ch, sizeof(struct chbk)); +#endif + CLAW_DBF_TEXT(4,trace,"timer"); + p_ch->flag |= CLAW_TIMER; + wake_up(&p_ch->wait); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + p_ch->ndev->name,__FUNCTION__,__LINE__); +#endif + return; +} /* end of claw_timer */ + + +/* +* +* functions +*/ + + +/*-------------------------------------------------------------------* +* * +* pages_to_order_of_mag * +* * +* takes a number of pages from 1 to 512 and returns the * +* log(num_pages)/log(2) get_free_pages() needs a base 2 order * +* of magnitude get_free_pages() has an upper order of 9 * +*--------------------------------------------------------------------*/ + +static int inline +pages_to_order_of_mag(int num_of_pages) +{ + int order_of_mag=1; /* assume 2 pages */ + int nump=2; +#ifdef FUNCTRACE + printk(KERN_INFO "%s Enter pages = %d \n",__FUNCTION__,num_of_pages); +#endif + CLAW_DBF_TEXT_(5,trace,"pages%d",num_of_pages); + if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */ + /* 512 pages = 2Meg on 4k page systems */ + if (num_of_pages >= 512) {return 9; } + /* we have two or more pages order is at least 1 */ + for (nump=2 ;nump <= 512;nump*=2) { + if (num_of_pages <= nump) + break; + order_of_mag +=1; + } + if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */ +#ifdef FUNCTRACE + printk(KERN_INFO "%s Exit on line %d, order = %d\n", + __FUNCTION__,__LINE__, order_of_mag); +#endif + CLAW_DBF_TEXT_(5,trace,"mag%d",order_of_mag); + return order_of_mag; +} + +/*-------------------------------------------------------------------* +* * +* add_claw_reads * +* * +*--------------------------------------------------------------------*/ +static int +add_claw_reads(struct net_device *dev, struct ccwbk* p_first, + struct ccwbk* p_last) +{ + struct claw_privbk *privptr; + struct ccw1 temp_ccw; + struct endccw * p_end; +#ifdef IOTRACE + struct ccwbk* p_buf; +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); +#endif +#ifdef DEBUGMSG + printk(KERN_INFO "dev\n"); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "p_first\n"); + dumpit((char *) p_first, sizeof(struct ccwbk)); + printk(KERN_INFO "p_last\n"); + dumpit((char *) p_last, sizeof(struct ccwbk)); +#endif + CLAW_DBF_TEXT(4,trace,"addreads"); + privptr = dev->priv; + p_end = privptr->p_end_ccw; + + /* first CCW and last CCW contains a new set of read channel programs + * to apend the running channel programs + */ + if ( p_first==NULL) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"addexit"); + return 0; + } + + /* set up ending CCW sequence for this segment */ + if (p_end->read1) { + p_end->read1=0x00; /* second ending CCW is now active */ + /* reset ending CCWs and setup TIC CCWs */ + p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; + p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; + p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1); + p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1); + p_end->read2_nop2.cda=0; + p_end->read2_nop2.count=1; + } + else { + p_end->read1=0x01; /* first ending CCW is now active */ + /* reset ending CCWs and setup TIC CCWs */ + p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; + p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; + p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1); + p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1); + p_end->read1_nop2.cda=0; + p_end->read1_nop2.count=1; + } + + if ( privptr-> p_read_active_first ==NULL ) { +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s p_read_active_frist == NULL \n", + dev->name,__FUNCTION__); + printk(KERN_INFO "%s:%s Read active first/last changed \n", + dev->name,__FUNCTION__); +#endif + privptr-> p_read_active_first= p_first; /* set new first */ + privptr-> p_read_active_last = p_last; /* set new last */ + } + else { + +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s Read in progress \n", + dev->name,__FUNCTION__); +#endif + /* set up TIC ccw */ + temp_ccw.cda= (__u32)__pa(&p_first->read); + temp_ccw.count=0; + temp_ccw.flags=0; + temp_ccw.cmd_code = CCW_CLAW_CMD_TIC; + + + if (p_end->read1) { + + /* first set of CCW's is chained to the new read */ + /* chain, so the second set is chained to the active chain. */ + /* Therefore modify the second set to point to the new */ + /* read chain set up TIC CCWs */ + /* make sure we update the CCW so channel doesn't fetch it */ + /* when it's only half done */ + memcpy( &p_end->read2_nop2, &temp_ccw , + sizeof(struct ccw1)); + privptr->p_read_active_last->r_TIC_1.cda= + (__u32)__pa(&p_first->read); + privptr->p_read_active_last->r_TIC_2.cda= + (__u32)__pa(&p_first->read); + } + else { + /* make sure we update the CCW so channel doesn't */ + /* fetch it when it is only half done */ + memcpy( &p_end->read1_nop2, &temp_ccw , + sizeof(struct ccw1)); + privptr->p_read_active_last->r_TIC_1.cda= + (__u32)__pa(&p_first->read); + privptr->p_read_active_last->r_TIC_2.cda= + (__u32)__pa(&p_first->read); + } + /* chain in new set of blocks */ + privptr->p_read_active_last->next = p_first; + privptr->p_read_active_last=p_last; + } /* end of if ( privptr-> p_read_active_first ==NULL) */ +#ifdef IOTRACE + printk(KERN_INFO "%s:%s dump p_last CCW BK \n",dev->name,__FUNCTION__); + dumpit((char *)p_last, sizeof(struct ccwbk)); + printk(KERN_INFO "%s:%s dump p_end CCW BK \n",dev->name,__FUNCTION__); + dumpit((char *)p_end, sizeof(struct endccw)); + + printk(KERN_INFO "%s:%s dump p_first CCW BK \n",dev->name,__FUNCTION__); + dumpit((char *)p_first, sizeof(struct ccwbk)); + printk(KERN_INFO "%s:%s Dump Active CCW chain \n", + dev->name,__FUNCTION__); + p_buf=privptr->p_read_active_first; + while (p_buf!=NULL) { + dumpit((char *)p_buf, sizeof(struct ccwbk)); + p_buf=p_buf->next; + } +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"addexit"); + return 0; +} /* end of add_claw_reads */ + +/*-------------------------------------------------------------------* + * ccw_check_return_code * + * * + *-------------------------------------------------------------------*/ + +static void inline +ccw_check_return_code(struct ccw_device *cdev, int return_code) +{ +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > enter \n", + cdev->dev.bus_id,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"ccwret"); +#ifdef DEBUGMSG + printk(KERN_INFO "variable cdev =\n"); + dumpit((char *) cdev, sizeof(struct ccw_device)); + printk(KERN_INFO "variable return_code = %d\n",return_code); +#endif + if (return_code != 0) { + switch (return_code) { + case -EBUSY: + printk(KERN_INFO "%s: Busy !\n", + cdev->dev.bus_id); + break; + case -ENODEV: + printk(KERN_EMERG "%s: Missing device called " + "for IO ENODEV\n", cdev->dev.bus_id); + break; + case -EIO: + printk(KERN_EMERG "%s: Status pending... EIO \n", + cdev->dev.bus_id); + break; + case -EINVAL: + printk(KERN_EMERG "%s: Invalid Dev State EINVAL \n", + cdev->dev.bus_id); + break; + default: + printk(KERN_EMERG "%s: Unknown error in " + "Do_IO %d\n",cdev->dev.bus_id, return_code); + } + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > exit on line %d\n", + cdev->dev.bus_id,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"ccwret"); +} /* end of ccw_check_return_code */ + +/*-------------------------------------------------------------------* +* ccw_check_unit_check * +*--------------------------------------------------------------------*/ + +static void inline +ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) +{ + struct net_device *dev = p_ch->ndev; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__); +#endif +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev =\n",dev->name); + dumpit((char *)dev, sizeof(struct net_device)); + printk(KERN_INFO "%s: variable sense =\n",dev->name); + dumpit((char *)&sense, 2); +#endif + CLAW_DBF_TEXT(4,trace,"unitchek"); + + printk(KERN_INFO "%s: Unit Check with sense byte:0x%04x\n", + dev->name, sense); + + if (sense & 0x40) { + if (sense & 0x01) { + printk(KERN_WARNING "%s: Interface disconnect or " + "Selective reset " + "occurred (remote side)\n", dev->name); + } + else { + printk(KERN_WARNING "%s: System reset occured" + " (remote side)\n", dev->name); + } + } + else if (sense & 0x20) { + if (sense & 0x04) { + printk(KERN_WARNING "%s: Data-streaming " + "timeout)\n", dev->name); + } + else { + printk(KERN_WARNING "%s: Data-transfer parity" + " error\n", dev->name); + } + } + else if (sense & 0x10) { + if (sense & 0x20) { + printk(KERN_WARNING "%s: Hardware malfunction " + "(remote side)\n", dev->name); + } + else { + printk(KERN_WARNING "%s: read-data parity error " + "(remote side)\n", dev->name); + } + } + +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif +} /* end of ccw_check_unit_check */ + + + +/*-------------------------------------------------------------------* +* Dump buffer format * +* * +*--------------------------------------------------------------------*/ +#ifdef DEBUG +static void +dumpit(char* buf, int len) +{ + + __u32 ct, sw, rm, dup; + char *ptr, *rptr; + char tbuf[82], tdup[82]; +#if (CONFIG_ARCH_S390X) + char addr[22]; +#else + char addr[12]; +#endif + char boff[12]; + char bhex[82], duphex[82]; + char basc[40]; + + sw = 0; + rptr =ptr=buf; + rm = 16; + duphex[0] = 0x00; + dup = 0; + for ( ct=0; ct < len; ct++, ptr++, rptr++ ) { + if (sw == 0) { +#if (CONFIG_ARCH_S390X) + sprintf(addr, "%16.16lX",(unsigned long)rptr); +#else + sprintf(addr, "%8.8X",(__u32)rptr); +#endif + sprintf(boff, "%4.4X", (__u32)ct); + bhex[0] = '\0'; + basc[0] = '\0'; + } + if ((sw == 4) || (sw == 12)) { + strcat(bhex, " "); + } + if (sw == 8) { + strcat(bhex, " "); + } +#if (CONFIG_ARCH_S390X) + sprintf(tbuf,"%2.2lX", (unsigned long)*ptr); +#else + sprintf(tbuf,"%2.2X", (__u32)*ptr); +#endif + tbuf[2] = '\0'; + strcat(bhex, tbuf); + if ((0!=isprint(*ptr)) && (*ptr >= 0x20)) { + basc[sw] = *ptr; + } + else { + basc[sw] = '.'; + } + basc[sw+1] = '\0'; + sw++; + rm--; + if (sw==16) { + if ((strcmp(duphex, bhex)) !=0) { + if (dup !=0) { + sprintf(tdup,"Duplicate as above to" + " %s", addr); + printk( KERN_INFO " " + " --- %s ---\n",tdup); + } + printk( KERN_INFO " %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + dup = 0; + strcpy(duphex, bhex); + } + else { + dup++; + } + sw = 0; + rm = 16; + } + } /* endfor */ + + if (sw != 0) { + for ( ; rm > 0; rm--, sw++ ) { + if ((sw==4) || (sw==12)) strcat(bhex, " "); + if (sw==8) strcat(bhex, " "); + strcat(bhex, " "); + strcat(basc, " "); + } + if (dup !=0) { + sprintf(tdup,"Duplicate as above to %s", addr); + printk( KERN_INFO " --- %s ---\n", + tdup); + } + printk( KERN_INFO " %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + } + else { + if (dup >=1) { + sprintf(tdup,"Duplicate as above to %s", addr); + printk( KERN_INFO " --- %s ---\n", + tdup); + } + if (dup !=0) { + printk( KERN_INFO " %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + } + } + return; + +} /* end of dumpit */ +#endif + +/*-------------------------------------------------------------------* +* find_link * +*--------------------------------------------------------------------*/ +static int +find_link(struct net_device *dev, char *host_name, char *ws_name ) +{ + struct claw_privbk *privptr; + struct claw_env *p_env; + int rc=0; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"findlink"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev = \n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "%s: variable host_name = %s\n",dev->name, host_name); + printk(KERN_INFO "%s: variable ws_name = %s\n",dev->name, ws_name); +#endif + privptr=dev->priv; + p_env=privptr->p_env; + switch (p_env->packing) + { + case PACKING_ASK: + if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) || + (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 )) + rc = EINVAL; + break; + case DO_PACKED: + case PACK_SEND: + if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) || + (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 )) + rc = EINVAL; + break; + default: + if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) || + (memcmp(p_env->api_type , ws_name, 8)!=0)) + rc = EINVAL; + break; + } + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return 0; +} /* end of find_link */ + +/*-------------------------------------------------------------------* + * claw_hw_tx * + * * + * * + *-------------------------------------------------------------------*/ + +static int +claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid) +{ + int rc=0; + struct claw_privbk *privptr; + struct ccwbk *p_this_ccw; + struct ccwbk *p_first_ccw; + struct ccwbk *p_last_ccw; + __u32 numBuffers; + signed long len_of_data; + unsigned long bytesInThisBuffer; + unsigned char *pDataAddress; + struct endccw *pEnd; + struct ccw1 tempCCW; + struct chbk *p_ch; + struct claw_env *p_env; + int lock; + struct clawph *pk_head; + struct chbk *ch; +#ifdef IOTRACE + struct ccwbk *p_buf; +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > enter\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"hw_tx"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev skb =\n",dev->name); + dumpit((char *) skb, sizeof(struct sk_buff)); + printk(KERN_INFO "%s: variable dev =\n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "%s: variable linkid = %ld\n",dev->name,linkid); +#endif + privptr = (struct claw_privbk *) (dev->priv); + p_ch=(struct chbk *)&privptr->channel[WRITE]; + p_env =privptr->p_env; +#ifdef IOTRACE + printk(KERN_INFO "%s: %s() dump sk_buff \n",dev->name,__FUNCTION__); + dumpit((char *)skb ,sizeof(struct sk_buff)); +#endif + claw_free_wrt_buf(dev); /* Clean up free chain if posible */ + /* scan the write queue to free any completed write packets */ + p_first_ccw=NULL; + p_last_ccw=NULL; + if ((p_env->packing >= PACK_SEND) && + (skb->cb[1] != 'P')) { + skb_push(skb,sizeof(struct clawph)); + pk_head=(struct clawph *)skb->data; + pk_head->len=skb->len-sizeof(struct clawph); + if (pk_head->len%4) { + pk_head->len+= 4-(pk_head->len%4); + skb_pad(skb,4-(pk_head->len%4)); + skb_put(skb,4-(pk_head->len%4)); + } + if (p_env->packing == DO_PACKED) + pk_head->link_num = linkid; + else + pk_head->link_num = 0; + pk_head->flag = 0x00; + skb_pad(skb,4); + skb->cb[1] = 'P'; + } + if (linkid == 0) { + if (claw_check_busy(dev)) { + if (privptr->write_free_count!=0) { + claw_clear_busy(dev); + } + else { + claw_strt_out_IO(dev ); + claw_free_wrt_buf( dev ); + if (privptr->write_free_count==0) { +#ifdef IOTRACE + printk(KERN_INFO "%s: " + "(claw_check_busy) no free write " + "buffers\n", dev->name); +#endif + ch = &privptr->channel[WRITE]; + atomic_inc(&skb->users); + skb_queue_tail(&ch->collect_queue, skb); + goto Done; + } + else { + claw_clear_busy(dev); + } + } + } + /* tx lock */ + if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: busy (claw_test_and_setbit_" + "busy)\n", dev->name); +#endif + ch = &privptr->channel[WRITE]; + atomic_inc(&skb->users); + skb_queue_tail(&ch->collect_queue, skb); + claw_strt_out_IO(dev ); + rc=-EBUSY; + goto Done2; + } + } + /* See how many write buffers are required to hold this data */ + numBuffers= ( skb->len + privptr->p_env->write_size - 1) / + ( privptr->p_env->write_size); + + /* If that number of buffers isn't available, give up for now */ + if (privptr->write_free_count < numBuffers || + privptr->p_write_free_chain == NULL ) { + + claw_setbit_busy(TB_NOBUFFER,dev); + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: busy (claw_setbit_busy" + "(TB_NOBUFFER))\n", dev->name); + printk(KERN_INFO " free_count: %d, numBuffers : %d\n", + (int)privptr->write_free_count,(int) numBuffers ); +#endif + ch = &privptr->channel[WRITE]; + atomic_inc(&skb->users); + skb_queue_tail(&ch->collect_queue, skb); + CLAW_DBF_TEXT(2,trace,"clawbusy"); + goto Done2; + } + pDataAddress=skb->data; + len_of_data=skb->len; + + while (len_of_data > 0) { +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() length-of-data is %ld \n", + dev->name ,__FUNCTION__,len_of_data); + dumpit((char *)pDataAddress ,64); +#endif + p_this_ccw=privptr->p_write_free_chain; /* get a block */ + if (p_this_ccw == NULL) { /* lost the race */ + ch = &privptr->channel[WRITE]; + atomic_inc(&skb->users); + skb_queue_tail(&ch->collect_queue, skb); + goto Done2; + } + privptr->p_write_free_chain=p_this_ccw->next; + p_this_ccw->next=NULL; + --privptr->write_free_count; /* -1 */ + bytesInThisBuffer=len_of_data; + memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer); + len_of_data-=bytesInThisBuffer; + pDataAddress+=(unsigned long)bytesInThisBuffer; + /* setup write CCW */ + p_this_ccw->write.cmd_code = (linkid * 8) +1; + if (len_of_data>0) { + p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG; + } + p_this_ccw->write.count=bytesInThisBuffer; + /* now add to end of this chain */ + if (p_first_ccw==NULL) { + p_first_ccw=p_this_ccw; + } + if (p_last_ccw!=NULL) { + p_last_ccw->next=p_this_ccw; + /* set up TIC ccws */ + p_last_ccw->w_TIC_1.cda= + (__u32)__pa(&p_this_ccw->write); + } + p_last_ccw=p_this_ccw; /* save new last block */ +#ifdef IOTRACE + printk(KERN_INFO "%s: %s() > CCW and Buffer %ld bytes long \n", + dev->name,__FUNCTION__,bytesInThisBuffer); + dumpit((char *)p_this_ccw, sizeof(struct ccwbk)); + dumpit((char *)p_this_ccw->p_buffer, 64); +#endif + } + + /* FirstCCW and LastCCW now contain a new set of write channel + * programs to append to the running channel program + */ + + if (p_first_ccw!=NULL) { + /* setup ending ccw sequence for this segment */ + pEnd=privptr->p_end_ccw; + if (pEnd->write1) { + pEnd->write1=0x00; /* second end ccw is now active */ + /* set up Tic CCWs */ + p_last_ccw->w_TIC_1.cda= + (__u32)__pa(&pEnd->write2_nop1); + pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; + pEnd->write2_nop2.flags = + CCW_FLAG_SLI | CCW_FLAG_SKIP; + pEnd->write2_nop2.cda=0; + pEnd->write2_nop2.count=1; + } + else { /* end of if (pEnd->write1)*/ + pEnd->write1=0x01; /* first end ccw is now active */ + /* set up Tic CCWs */ + p_last_ccw->w_TIC_1.cda= + (__u32)__pa(&pEnd->write1_nop1); + pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; + pEnd->write1_nop2.flags = + CCW_FLAG_SLI | CCW_FLAG_SKIP; + pEnd->write1_nop2.cda=0; + pEnd->write1_nop2.count=1; + } /* end if if (pEnd->write1) */ + + + if (privptr->p_write_active_first==NULL ) { + privptr->p_write_active_first=p_first_ccw; + privptr->p_write_active_last=p_last_ccw; + } + else { + + /* set up Tic CCWs */ + + tempCCW.cda=(__u32)__pa(&p_first_ccw->write); + tempCCW.count=0; + tempCCW.flags=0; + tempCCW.cmd_code=CCW_CLAW_CMD_TIC; + + if (pEnd->write1) { + + /* + * first set of ending CCW's is chained to the new write + * chain, so the second set is chained to the active chain + * Therefore modify the second set to point the new write chain. + * make sure we update the CCW atomically + * so channel does not fetch it when it's only half done + */ + memcpy( &pEnd->write2_nop2, &tempCCW , + sizeof(struct ccw1)); + privptr->p_write_active_last->w_TIC_1.cda= + (__u32)__pa(&p_first_ccw->write); + } + else { + + /*make sure we update the CCW atomically + *so channel does not fetch it when it's only half done + */ + memcpy(&pEnd->write1_nop2, &tempCCW , + sizeof(struct ccw1)); + privptr->p_write_active_last->w_TIC_1.cda= + (__u32)__pa(&p_first_ccw->write); + + } /* end if if (pEnd->write1) */ + + privptr->p_write_active_last->next=p_first_ccw; + privptr->p_write_active_last=p_last_ccw; + } + + } /* endif (p_first_ccw!=NULL) */ + + +#ifdef IOTRACE + printk(KERN_INFO "%s: %s() > Dump Active CCW chain \n", + dev->name,__FUNCTION__); + p_buf=privptr->p_write_active_first; + while (p_buf!=NULL) { + dumpit((char *)p_buf, sizeof(struct ccwbk)); + p_buf=p_buf->next; + } + p_buf=(struct ccwbk*)privptr->p_end_ccw; + dumpit((char *)p_buf, sizeof(struct endccw)); +#endif + dev_kfree_skb(skb); + if (linkid==0) { + lock=LOCK_NO; + } + else { + lock=LOCK_YES; + } + claw_strt_out_IO(dev ); + /* if write free count is zero , set NOBUFFER */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() > free_count is %d\n", + dev->name,__FUNCTION__, + (int) privptr->write_free_count ); +#endif + if (privptr->write_free_count==0) { + claw_setbit_busy(TB_NOBUFFER,dev); + } +Done2: + claw_clearbit_busy(TB_TX,dev); +Done: +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > exit on line %d, rc = %d \n", + dev->name,__FUNCTION__,__LINE__, rc); +#endif + return(rc); +} /* end of claw_hw_tx */ + +/*-------------------------------------------------------------------* +* * +* init_ccw_bk * +* * +*--------------------------------------------------------------------*/ + +static int +init_ccw_bk(struct net_device *dev) +{ + + __u32 ccw_blocks_required; + __u32 ccw_blocks_perpage; + __u32 ccw_pages_required; + __u32 claw_reads_perpage=1; + __u32 claw_read_pages; + __u32 claw_writes_perpage=1; + __u32 claw_write_pages; + void *p_buff=NULL; + struct ccwbk*p_free_chain; + struct ccwbk*p_buf; + struct ccwbk*p_last_CCWB; + struct ccwbk*p_first_CCWB; + struct endccw *p_endccw=NULL; + addr_t real_address; + struct claw_privbk *privptr=dev->priv; + struct clawh *pClawH=NULL; + addr_t real_TIC_address; + int i,j; +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() enter \n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"init_ccw"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev =\n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); +#endif + + /* initialize statistics field */ + privptr->active_link_ID=0; + /* initialize ccwbk pointers */ + privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/ + privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/ + privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/ + privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/ + privptr->p_read_active_last=NULL; /* pointer to the last read ccw */ + privptr->p_end_ccw=NULL; /* pointer to ending ccw */ + privptr->p_claw_signal_blk=NULL; /* pointer to signal block */ + privptr->buffs_alloc = 0; + memset(&privptr->end_ccw, 0x00, sizeof(struct endccw)); + memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl)); + /* initialize free write ccwbk counter */ + privptr->write_free_count=0; /* number of free bufs on write chain */ + p_last_CCWB = NULL; + p_first_CCWB= NULL; + /* + * We need 1 CCW block for each read buffer, 1 for each + * write buffer, plus 1 for ClawSignalBlock + */ + ccw_blocks_required = + privptr->p_env->read_buffers+privptr->p_env->write_buffers+1; +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() " + "ccw_blocks_required=%d\n", + dev->name,__FUNCTION__, + ccw_blocks_required); + printk(KERN_INFO "%s: %s() " + "PAGE_SIZE=0x%x\n", + dev->name,__FUNCTION__, + (unsigned int)PAGE_SIZE); + printk(KERN_INFO "%s: %s() > " + "PAGE_MASK=0x%x\n", + dev->name,__FUNCTION__, + (unsigned int)PAGE_MASK); +#endif + /* + * compute number of CCW blocks that will fit in a page + */ + ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; + ccw_pages_required= + (ccw_blocks_required+ccw_blocks_perpage -1) / + ccw_blocks_perpage; + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", + dev->name,__FUNCTION__, + ccw_blocks_perpage); + printk(KERN_INFO "%s: %s() > ccw_pages_required=%d\n", + dev->name,__FUNCTION__, + ccw_pages_required); +#endif + /* + * read and write sizes are set by 2 constants in claw.h + * 4k and 32k. Unpacked values other than 4k are not going to + * provide good performance. With packing buffers support 32k + * buffers are used. + */ + if (privptr->p_env->read_size < PAGE_SIZE) { + claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size; + claw_read_pages= (privptr->p_env->read_buffers + + claw_reads_perpage -1) / claw_reads_perpage; + } + else { /* > or equal */ + privptr->p_buff_pages_perread= + (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; + claw_read_pages= + privptr->p_env->read_buffers * privptr->p_buff_pages_perread; + } + if (privptr->p_env->write_size < PAGE_SIZE) { + claw_writes_perpage= + PAGE_SIZE / privptr->p_env->write_size; + claw_write_pages= + (privptr->p_env->write_buffers + claw_writes_perpage -1) / + claw_writes_perpage; + + } + else { /* > or equal */ + privptr->p_buff_pages_perwrite= + (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; + claw_write_pages= + privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite; + } +#ifdef DEBUGMSG + if (privptr->p_env->read_size < PAGE_SIZE) { + printk(KERN_INFO "%s: %s() reads_perpage=%d\n", + dev->name,__FUNCTION__, + claw_reads_perpage); + } + else { + printk(KERN_INFO "%s: %s() pages_perread=%d\n", + dev->name,__FUNCTION__, + privptr->p_buff_pages_perread); + } + printk(KERN_INFO "%s: %s() read_pages=%d\n", + dev->name,__FUNCTION__, + claw_read_pages); + if (privptr->p_env->write_size < PAGE_SIZE) { + printk(KERN_INFO "%s: %s() writes_perpage=%d\n", + dev->name,__FUNCTION__, + claw_writes_perpage); + } + else { + printk(KERN_INFO "%s: %s() pages_perwrite=%d\n", + dev->name,__FUNCTION__, + privptr->p_buff_pages_perwrite); + } + printk(KERN_INFO "%s: %s() write_pages=%d\n", + dev->name,__FUNCTION__, + claw_write_pages); +#endif + + + /* + * allocate ccw_pages_required + */ + if (privptr->p_buff_ccw==NULL) { + privptr->p_buff_ccw= + (void *)__get_free_pages(__GFP_DMA, + (int)pages_to_order_of_mag(ccw_pages_required )); + if (privptr->p_buff_ccw==NULL) { + printk(KERN_INFO "%s: %s() " + "__get_free_pages for CCWs failed : " + "pages is %d\n", + dev->name,__FUNCTION__, + ccw_pages_required ); +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > " + "exit on line %d, rc = ENOMEM\n", + dev->name,__FUNCTION__, + __LINE__); +#endif + return -ENOMEM; + } + privptr->p_buff_ccw_num=ccw_pages_required; + } + memset(privptr->p_buff_ccw, 0x00, + privptr->p_buff_ccw_num * PAGE_SIZE); + + /* + * obtain ending ccw block address + * + */ + privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw; + real_address = (__u32)__pa(privptr->p_end_ccw); + /* Initialize ending CCW block */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() begin initialize ending CCW blocks\n", + dev->name,__FUNCTION__); +#endif + + p_endccw=privptr->p_end_ccw; + p_endccw->real=real_address; + p_endccw->write1=0x00; + p_endccw->read1=0x00; + + /* write1_nop1 */ + p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP; + p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_endccw->write1_nop1.count = 1; + p_endccw->write1_nop1.cda = 0; + + /* write1_nop2 */ + p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF; + p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; + p_endccw->write1_nop2.count = 1; + p_endccw->write1_nop2.cda = 0; + + /* write2_nop1 */ + p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP; + p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_endccw->write2_nop1.count = 1; + p_endccw->write2_nop1.cda = 0; + + /* write2_nop2 */ + p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF; + p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; + p_endccw->write2_nop2.count = 1; + p_endccw->write2_nop2.cda = 0; + + /* read1_nop1 */ + p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP; + p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_endccw->read1_nop1.count = 1; + p_endccw->read1_nop1.cda = 0; + + /* read1_nop2 */ + p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF; + p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; + p_endccw->read1_nop2.count = 1; + p_endccw->read1_nop2.cda = 0; + + /* read2_nop1 */ + p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP; + p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_endccw->read2_nop1.count = 1; + p_endccw->read2_nop1.cda = 0; + + /* read2_nop2 */ + p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF; + p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP; + p_endccw->read2_nop2.count = 1; + p_endccw->read2_nop2.cda = 0; + +#ifdef IOTRACE + printk(KERN_INFO "%s: %s() dump claw ending CCW BK \n", + dev->name,__FUNCTION__); + dumpit((char *)p_endccw, sizeof(struct endccw)); +#endif + + /* + * Build a chain of CCWs + * + */ + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() Begin build a chain of CCW buffer \n", + dev->name,__FUNCTION__); +#endif + p_buff=privptr->p_buff_ccw; + + p_free_chain=NULL; + for (i=0 ; i < ccw_pages_required; i++ ) { + real_address = (__u32)__pa(p_buff); + p_buf=p_buff; + for (j=0 ; j < ccw_blocks_perpage ; j++) { + p_buf->next = p_free_chain; + p_free_chain = p_buf; + p_buf->real=(__u32)__pa(p_buf); + ++p_buf; + } + p_buff+=PAGE_SIZE; + } +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() " + "End build a chain of CCW buffer \n", + dev->name,__FUNCTION__); + p_buf=p_free_chain; + while (p_buf!=NULL) { + dumpit((char *)p_buf, sizeof(struct ccwbk)); + p_buf=p_buf->next; + } +#endif + + /* + * Initialize ClawSignalBlock + * + */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() " + "Begin initialize ClawSignalBlock \n", + dev->name,__FUNCTION__); +#endif + if (privptr->p_claw_signal_blk==NULL) { + privptr->p_claw_signal_blk=p_free_chain; + p_free_chain=p_free_chain->next; + pClawH=(struct clawh *)privptr->p_claw_signal_blk; + pClawH->length=0xffff; + pClawH->opcode=0xff; + pClawH->flag=CLAW_BUSY; + } +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() > End initialize " + "ClawSignalBlock\n", + dev->name,__FUNCTION__); + dumpit((char *)privptr->p_claw_signal_blk, sizeof(struct ccwbk)); +#endif + + /* + * allocate write_pages_required and add to free chain + */ + if (privptr->p_buff_write==NULL) { + if (privptr->p_env->write_size < PAGE_SIZE) { + privptr->p_buff_write= + (void *)__get_free_pages(__GFP_DMA, + (int)pages_to_order_of_mag(claw_write_pages )); + if (privptr->p_buff_write==NULL) { + printk(KERN_INFO "%s: %s() __get_free_pages for write" + " bufs failed : get is for %d pages\n", + dev->name,__FUNCTION__,claw_write_pages ); + free_pages((unsigned long)privptr->p_buff_ccw, + (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); + privptr->p_buff_ccw=NULL; +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > exit on line %d," + "rc = ENOMEM\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return -ENOMEM; + } + /* + * Build CLAW write free chain + * + */ + + memset(privptr->p_buff_write, 0x00, + ccw_pages_required * PAGE_SIZE); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() Begin build claw write free " + "chain \n",dev->name,__FUNCTION__); +#endif + privptr->p_write_free_chain=NULL; + + p_buff=privptr->p_buff_write; + + for (i=0 ; i< privptr->p_env->write_buffers ; i++) { + p_buf = p_free_chain; /* get a CCW */ + p_free_chain = p_buf->next; + p_buf->next =privptr->p_write_free_chain; + privptr->p_write_free_chain = p_buf; + p_buf-> p_buffer = (struct clawbuf *)p_buff; + p_buf-> write.cda = (__u32)__pa(p_buff); + p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; + p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> w_read_FF.count = 1; + p_buf-> w_read_FF.cda = + (__u32)__pa(&p_buf-> header.flag); + p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; + p_buf-> w_TIC_1.flags = 0; + p_buf-> w_TIC_1.count = 0; + + if (((unsigned long)p_buff+privptr->p_env->write_size) >= + ((unsigned long)(p_buff+2* + (privptr->p_env->write_size) -1) & PAGE_MASK)) { + p_buff= p_buff+privptr->p_env->write_size; + } + } + } + else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */ + { + privptr->p_write_free_chain=NULL; + for (i = 0; i< privptr->p_env->write_buffers ; i++) { + p_buff=(void *)__get_free_pages(__GFP_DMA, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perwrite) ); +#ifdef IOTRACE + printk(KERN_INFO "%s:%s __get_free_pages " + "for writes buf: get for %d pages\n", + dev->name,__FUNCTION__, + privptr->p_buff_pages_perwrite); +#endif + if (p_buff==NULL) { + printk(KERN_INFO "%s:%s __get_free_pages" + "for writes buf failed : get is for %d pages\n", + dev->name, + __FUNCTION__, + privptr->p_buff_pages_perwrite ); + free_pages((unsigned long)privptr->p_buff_ccw, + (int)pages_to_order_of_mag( + privptr->p_buff_ccw_num)); + privptr->p_buff_ccw=NULL; + p_buf=privptr->p_buff_write; + while (p_buf!=NULL) { + free_pages((unsigned long) + p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perwrite)); + p_buf=p_buf->next; + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s exit on line %d, rc = ENOMEM\n", + dev->name, + __FUNCTION__, + __LINE__); +#endif + return -ENOMEM; + } /* Error on get_pages */ + memset(p_buff, 0x00, privptr->p_env->write_size ); + p_buf = p_free_chain; + p_free_chain = p_buf->next; + p_buf->next = privptr->p_write_free_chain; + privptr->p_write_free_chain = p_buf; + privptr->p_buff_write = p_buf; + p_buf->p_buffer=(struct clawbuf *)p_buff; + p_buf-> write.cda = (__u32)__pa(p_buff); + p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF; + p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> w_read_FF.count = 1; + p_buf-> w_read_FF.cda = + (__u32)__pa(&p_buf-> header.flag); + p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; + p_buf-> w_TIC_1.flags = 0; + p_buf-> w_TIC_1.count = 0; + } /* for all write_buffers */ + + } /* else buffers are PAGE_SIZE or bigger */ + + } + privptr->p_buff_write_num=claw_write_pages; + privptr->write_free_count=privptr->p_env->write_buffers; + + +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s End build claw write free chain \n", + dev->name,__FUNCTION__); + p_buf=privptr->p_write_free_chain; + while (p_buf!=NULL) { + dumpit((char *)p_buf, sizeof(struct ccwbk)); + p_buf=p_buf->next; + } +#endif + /* + * allocate read_pages_required and chain to free chain + */ + if (privptr->p_buff_read==NULL) { + if (privptr->p_env->read_size < PAGE_SIZE) { + privptr->p_buff_read= + (void *)__get_free_pages(__GFP_DMA, + (int)pages_to_order_of_mag(claw_read_pages) ); + if (privptr->p_buff_read==NULL) { + printk(KERN_INFO "%s: %s() " + "__get_free_pages for read buf failed : " + "get is for %d pages\n", + dev->name,__FUNCTION__,claw_read_pages ); + free_pages((unsigned long)privptr->p_buff_ccw, + (int)pages_to_order_of_mag( + privptr->p_buff_ccw_num)); + /* free the write pages size is < page size */ + free_pages((unsigned long)privptr->p_buff_write, + (int)pages_to_order_of_mag( + privptr->p_buff_write_num)); + privptr->p_buff_ccw=NULL; + privptr->p_buff_write=NULL; +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > exit on line %d, rc =" + " ENOMEM\n",dev->name,__FUNCTION__,__LINE__); +#endif + return -ENOMEM; + } + memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE); + privptr->p_buff_read_num=claw_read_pages; + /* + * Build CLAW read free chain + * + */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", + dev->name,__FUNCTION__); +#endif + p_buff=privptr->p_buff_read; + for (i=0 ; i< privptr->p_env->read_buffers ; i++) { + p_buf = p_free_chain; + p_free_chain = p_buf->next; + + if (p_last_CCWB==NULL) { + p_buf->next=NULL; + real_TIC_address=0; + p_last_CCWB=p_buf; + } + else { + p_buf->next=p_first_CCWB; + real_TIC_address= + (__u32)__pa(&p_first_CCWB -> read ); + } + + p_first_CCWB=p_buf; + + p_buf->p_buffer=(struct clawbuf *)p_buff; + /* initialize read command */ + p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; + p_buf-> read.cda = (__u32)__pa(p_buff); + p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> read.count = privptr->p_env->read_size; + + /* initialize read_h command */ + p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; + p_buf-> read_h.cda = + (__u32)__pa(&(p_buf->header)); + p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> read_h.count = sizeof(struct clawh); + + /* initialize Signal command */ + p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; + p_buf-> signal.cda = + (__u32)__pa(&(pClawH->flag)); + p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> signal.count = 1; + + /* initialize r_TIC_1 command */ + p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; + p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; + p_buf-> r_TIC_1.flags = 0; + p_buf-> r_TIC_1.count = 0; + + /* initialize r_read_FF command */ + p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; + p_buf-> r_read_FF.cda = + (__u32)__pa(&(pClawH->flag)); + p_buf-> r_read_FF.flags = + CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; + p_buf-> r_read_FF.count = 1; + + /* initialize r_TIC_2 */ + memcpy(&p_buf->r_TIC_2, + &p_buf->r_TIC_1, sizeof(struct ccw1)); + + /* initialize Header */ + p_buf->header.length=0xffff; + p_buf->header.opcode=0xff; + p_buf->header.flag=CLAW_PENDING; + + if (((unsigned long)p_buff+privptr->p_env->read_size) >= + ((unsigned long)(p_buff+2*(privptr->p_env->read_size) -1) + & PAGE_MASK) ) { + p_buff= p_buff+privptr->p_env->read_size; + } + else { + p_buff= + (void *)((unsigned long) + (p_buff+2*(privptr->p_env->read_size) -1) + & PAGE_MASK) ; + } + } /* for read_buffers */ + } /* read_size < PAGE_SIZE */ + else { /* read Size >= PAGE_SIZE */ + +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() Begin build claw read free chain \n", + dev->name,__FUNCTION__); +#endif + for (i=0 ; i< privptr->p_env->read_buffers ; i++) { + p_buff = (void *)__get_free_pages(__GFP_DMA, + (int)pages_to_order_of_mag(privptr->p_buff_pages_perread) ); + if (p_buff==NULL) { + printk(KERN_INFO "%s: %s() __get_free_pages for read " + "buf failed : get is for %d pages\n", + dev->name,__FUNCTION__, + privptr->p_buff_pages_perread ); + free_pages((unsigned long)privptr->p_buff_ccw, + (int)pages_to_order_of_mag(privptr->p_buff_ccw_num)); + /* free the write pages */ + p_buf=privptr->p_buff_write; + while (p_buf!=NULL) { + free_pages((unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perwrite )); + p_buf=p_buf->next; + } + /* free any read pages already alloc */ + p_buf=privptr->p_buff_read; + while (p_buf!=NULL) { + free_pages((unsigned long)p_buf->p_buffer, + (int)pages_to_order_of_mag( + privptr->p_buff_pages_perread )); + p_buf=p_buf->next; + } + privptr->p_buff_ccw=NULL; + privptr->p_buff_write=NULL; +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() exit on line %d, rc = ENOMEM\n", + dev->name,__FUNCTION__, + __LINE__); +#endif + return -ENOMEM; + } + memset(p_buff, 0x00, privptr->p_env->read_size); + p_buf = p_free_chain; + privptr->p_buff_read = p_buf; + p_free_chain = p_buf->next; + + if (p_last_CCWB==NULL) { + p_buf->next=NULL; + real_TIC_address=0; + p_last_CCWB=p_buf; + } + else { + p_buf->next=p_first_CCWB; + real_TIC_address= + (addr_t)__pa( + &p_first_CCWB -> read ); + } + + p_first_CCWB=p_buf; + /* save buff address */ + p_buf->p_buffer=(struct clawbuf *)p_buff; + /* initialize read command */ + p_buf-> read.cmd_code = CCW_CLAW_CMD_READ; + p_buf-> read.cda = (__u32)__pa(p_buff); + p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> read.count = privptr->p_env->read_size; + + /* initialize read_h command */ + p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER; + p_buf-> read_h.cda = + (__u32)__pa(&(p_buf->header)); + p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> read_h.count = sizeof(struct clawh); + + /* initialize Signal command */ + p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD; + p_buf-> signal.cda = + (__u32)__pa(&(pClawH->flag)); + p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC; + p_buf-> signal.count = 1; + + /* initialize r_TIC_1 command */ + p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC; + p_buf-> r_TIC_1.cda = (__u32)real_TIC_address; + p_buf-> r_TIC_1.flags = 0; + p_buf-> r_TIC_1.count = 0; + + /* initialize r_read_FF command */ + p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF; + p_buf-> r_read_FF.cda = + (__u32)__pa(&(pClawH->flag)); + p_buf-> r_read_FF.flags = + CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI; + p_buf-> r_read_FF.count = 1; + + /* initialize r_TIC_2 */ + memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1, + sizeof(struct ccw1)); + + /* initialize Header */ + p_buf->header.length=0xffff; + p_buf->header.opcode=0xff; + p_buf->header.flag=CLAW_PENDING; + + } /* For read_buffers */ + } /* read_size >= PAGE_SIZE */ + } /* pBuffread = NULL */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() > End build claw read free chain \n", + dev->name,__FUNCTION__); + p_buf=p_first_CCWB; + while (p_buf!=NULL) { + dumpit((char *)p_buf, sizeof(struct ccwbk)); + p_buf=p_buf->next; + } + +#endif + add_claw_reads( dev ,p_first_CCWB , p_last_CCWB); + privptr->buffs_alloc = 1; +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return 0; +} /* end of init_ccw_bk */ + +/*-------------------------------------------------------------------* +* * +* probe_error * +* * +*--------------------------------------------------------------------*/ + +static void +probe_error( struct ccwgroup_device *cgdev) +{ + struct claw_privbk *privptr; +#ifdef FUNCTRACE + printk(KERN_INFO "%s enter \n",__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"proberr"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s variable cgdev =\n",__FUNCTION__); + dumpit((char *) cgdev, sizeof(struct ccwgroup_device)); +#endif + privptr=(struct claw_privbk *)cgdev->dev.driver_data; + if (privptr!=NULL) { + if (privptr->p_env != NULL) { + kfree(privptr->p_env); + privptr->p_env=NULL; + } + if (privptr->p_mtc_envelope!=NULL) { + kfree(privptr->p_mtc_envelope); + privptr->p_mtc_envelope=NULL; + } + kfree(privptr); + privptr=NULL; + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s > exit on line %d\n", + __FUNCTION__,__LINE__); +#endif + + return; +} /* probe_error */ + + + +/*-------------------------------------------------------------------* +* claw_process_control * +* * +* * +*--------------------------------------------------------------------*/ + +static int +claw_process_control( struct net_device *dev, struct ccwbk * p_ccw) +{ + + struct clawbuf *p_buf; + struct clawctl ctlbk; + struct clawctl *p_ctlbk; + char temp_host_name[8]; + char temp_ws_name[8]; + struct claw_privbk *privptr; + struct claw_env *p_env; + struct sysval *p_sysval; + struct conncmd *p_connect=NULL; + int rc; + struct chbk *p_ch = NULL; +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > enter \n", + dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"clw_cntl"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev =\n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "%s: variable p_ccw =\n",dev->name); + dumpit((char *) p_ccw, sizeof(struct ccwbk *)); +#endif + udelay(1000); /* Wait a ms for the control packets to + *catch up to each other */ + privptr=dev->priv; + p_env=privptr->p_env; + memcpy( &temp_host_name, p_env->host_name, 8); + memcpy( &temp_ws_name, p_env->adapter_name , 8); + printk(KERN_INFO "%s: CLAW device %.8s: " + "Received Control Packet\n", + dev->name, temp_ws_name); + if (privptr->release_pend==1) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() > " + "exit on line %d, rc=0\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return 0; + } + p_buf=p_ccw->p_buffer; + p_ctlbk=&ctlbk; + if (p_env->packing == DO_PACKED) { /* packing in progress?*/ + memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl)); + } else { + memcpy(p_ctlbk, p_buf, sizeof(struct clawctl)); + } +#ifdef IOTRACE + printk(KERN_INFO "%s: dump claw control data inbound\n",dev->name); + dumpit((char *)p_ctlbk, sizeof(struct clawctl)); +#endif + switch (p_ctlbk->command) + { + case SYSTEM_VALIDATE_REQUEST: + if (p_ctlbk->version!=CLAW_VERSION_ID) { + claw_snd_sys_validate_rsp(dev, p_ctlbk, + CLAW_RC_WRONG_VERSION ); + printk("%s: %d is wrong version id. " + "Expected %d\n", + dev->name, p_ctlbk->version, + CLAW_VERSION_ID); + } + p_sysval=(struct sysval *)&(p_ctlbk->data); + printk( "%s: Recv Sys Validate Request: " + "Vers=%d,link_id=%d,Corr=%d,WS name=%." + "8s,Host name=%.8s\n", + dev->name, p_ctlbk->version, + p_ctlbk->linkid, + p_ctlbk->correlator, + p_sysval->WS_name, + p_sysval->host_name); + if (0!=memcmp(temp_host_name,p_sysval->host_name,8)) { + claw_snd_sys_validate_rsp(dev, p_ctlbk, + CLAW_RC_NAME_MISMATCH ); + CLAW_DBF_TEXT(2,setup,"HSTBAD"); + CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->host_name); + CLAW_DBF_TEXT_(2,setup,"%s",temp_host_name); + printk(KERN_INFO "%s: Host name mismatch\n", + dev->name); + printk(KERN_INFO "%s: Received :%s: " + "expected :%s: \n", + dev->name, + p_sysval->host_name, + temp_host_name); + } + if (0!=memcmp(temp_ws_name,p_sysval->WS_name,8)) { + claw_snd_sys_validate_rsp(dev, p_ctlbk, + CLAW_RC_NAME_MISMATCH ); + CLAW_DBF_TEXT(2,setup,"WSNBAD"); + CLAW_DBF_TEXT_(2,setup,"%s",p_sysval->WS_name); + CLAW_DBF_TEXT_(2,setup,"%s",temp_ws_name); + printk(KERN_INFO "%s: WS name mismatch\n", + dev->name); + printk(KERN_INFO "%s: Received :%s: " + "expected :%s: \n", + dev->name, + p_sysval->WS_name, + temp_ws_name); + } + if (( p_sysval->write_frame_size < p_env->write_size) && + ( p_env->packing == 0)) { + claw_snd_sys_validate_rsp(dev, p_ctlbk, + CLAW_RC_HOST_RCV_TOO_SMALL ); + printk(KERN_INFO "%s: host write size is too " + "small\n", dev->name); + CLAW_DBF_TEXT(2,setup,"wrtszbad"); + } + if (( p_sysval->read_frame_size < p_env->read_size) && + ( p_env->packing == 0)) { + claw_snd_sys_validate_rsp(dev, p_ctlbk, + CLAW_RC_HOST_RCV_TOO_SMALL ); + printk(KERN_INFO "%s: host read size is too " + "small\n", dev->name); + CLAW_DBF_TEXT(2,setup,"rdsizbad"); + } + claw_snd_sys_validate_rsp(dev, p_ctlbk, 0 ); + printk("%s: CLAW device %.8s: System validate" + " completed.\n",dev->name, temp_ws_name); + printk("%s: sys Validate Rsize:%d Wsize:%d\n",dev->name, + p_sysval->read_frame_size,p_sysval->write_frame_size); + privptr->system_validate_comp=1; + if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { + p_env->packing = PACKING_ASK; + } + claw_strt_conn_req(dev); + break; + + case SYSTEM_VALIDATE_RESPONSE: + p_sysval=(struct sysval *)&(p_ctlbk->data); + printk("%s: Recv Sys Validate Resp: Vers=%d,Corr=%d,RC=%d," + "WS name=%.8s,Host name=%.8s\n", + dev->name, + p_ctlbk->version, + p_ctlbk->correlator, + p_ctlbk->rc, + p_sysval->WS_name, + p_sysval->host_name); + switch (p_ctlbk->rc) + { + case 0: + printk(KERN_INFO "%s: CLAW device " + "%.8s: System validate " + "completed.\n", + dev->name, temp_ws_name); + if (privptr->system_validate_comp == 0) + claw_strt_conn_req(dev); + privptr->system_validate_comp=1; + break; + case CLAW_RC_NAME_MISMATCH: + printk(KERN_INFO "%s: Sys Validate " + "Resp : Host, WS name is " + "mismatch\n", + dev->name); + break; + case CLAW_RC_WRONG_VERSION: + printk(KERN_INFO "%s: Sys Validate " + "Resp : Wrong version\n", + dev->name); + break; + case CLAW_RC_HOST_RCV_TOO_SMALL: + printk(KERN_INFO "%s: Sys Validate " + "Resp : bad frame size\n", + dev->name); + break; + default: + printk(KERN_INFO "%s: Sys Validate " + "error code=%d \n", + dev->name, p_ctlbk->rc ); + break; + } + break; + + case CONNECTION_REQUEST: + p_connect=(struct conncmd *)&(p_ctlbk->data); + printk(KERN_INFO "%s: Recv Conn Req: Vers=%d,link_id=%d," + "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n", + dev->name, + p_ctlbk->version, + p_ctlbk->linkid, + p_ctlbk->correlator, + p_connect->host_name, + p_connect->WS_name); + if (privptr->active_link_ID!=0 ) { + claw_snd_disc(dev, p_ctlbk); + printk(KERN_INFO "%s: Conn Req error : " + "already logical link is active \n", + dev->name); + } + if (p_ctlbk->linkid!=1 ) { + claw_snd_disc(dev, p_ctlbk); + printk(KERN_INFO "%s: Conn Req error : " + "req logical link id is not 1\n", + dev->name); + } + rc=find_link(dev, + p_connect->host_name, p_connect->WS_name); + if (rc!=0) { + claw_snd_disc(dev, p_ctlbk); + printk(KERN_INFO "%s: Conn Req error : " + "req appl name does not match\n", + dev->name); + } + claw_send_control(dev, + CONNECTION_CONFIRM, p_ctlbk->linkid, + p_ctlbk->correlator, + 0, p_connect->host_name, + p_connect->WS_name); + if (p_env->packing == PACKING_ASK) { + printk("%s: Now Pack ask\n",dev->name); + p_env->packing = PACK_SEND; + claw_snd_conn_req(dev,0); + } + printk(KERN_INFO "%s: CLAW device %.8s: Connection " + "completed link_id=%d.\n", + dev->name, temp_ws_name, + p_ctlbk->linkid); + privptr->active_link_ID=p_ctlbk->linkid; + p_ch=&privptr->channel[WRITE]; + wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ + break; + case CONNECTION_RESPONSE: + p_connect=(struct conncmd *)&(p_ctlbk->data); + printk(KERN_INFO "%s: Revc Conn Resp: Vers=%d,link_id=%d," + "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n", + dev->name, + p_ctlbk->version, + p_ctlbk->linkid, + p_ctlbk->correlator, + p_ctlbk->rc, + p_connect->host_name, + p_connect->WS_name); + + if (p_ctlbk->rc !=0 ) { + printk(KERN_INFO "%s: Conn Resp error: rc=%d \n", + dev->name, p_ctlbk->rc); + return 1; + } + rc=find_link(dev, + p_connect->host_name, p_connect->WS_name); + if (rc!=0) { + claw_snd_disc(dev, p_ctlbk); + printk(KERN_INFO "%s: Conn Resp error: " + "req appl name does not match\n", + dev->name); + } + /* should be until CONNECTION_CONFIRM */ + privptr->active_link_ID = - (p_ctlbk->linkid); + break; + case CONNECTION_CONFIRM: + p_connect=(struct conncmd *)&(p_ctlbk->data); + printk(KERN_INFO "%s: Recv Conn Confirm:Vers=%d,link_id=%d," + "Corr=%d,Host appl=%.8s,WS appl=%.8s\n", + dev->name, + p_ctlbk->version, + p_ctlbk->linkid, + p_ctlbk->correlator, + p_connect->host_name, + p_connect->WS_name); + if (p_ctlbk->linkid== -(privptr->active_link_ID)) { + privptr->active_link_ID=p_ctlbk->linkid; + if (p_env->packing > PACKING_ASK) { + printk(KERN_INFO "%s: Confirmed Now packing\n",dev->name); + p_env->packing = DO_PACKED; + } + p_ch=&privptr->channel[WRITE]; + wake_up(&p_ch->wait); + } + else { + printk(KERN_INFO "%s: Conn confirm: " + "unexpected linkid=%d \n", + dev->name, p_ctlbk->linkid); + claw_snd_disc(dev, p_ctlbk); + } + break; + case DISCONNECT: + printk(KERN_INFO "%s: Disconnect: " + "Vers=%d,link_id=%d,Corr=%d\n", + dev->name, p_ctlbk->version, + p_ctlbk->linkid, p_ctlbk->correlator); + if ((p_ctlbk->linkid == 2) && + (p_env->packing == PACK_SEND)) { + privptr->active_link_ID = 1; + p_env->packing = DO_PACKED; + } + else + privptr->active_link_ID=0; + break; + case CLAW_ERROR: + printk(KERN_INFO "%s: CLAW ERROR detected\n", + dev->name); + break; + default: + printk(KERN_INFO "%s: Unexpected command code=%d \n", + dev->name, p_ctlbk->command); + break; + } + +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s() exit on line %d, rc = 0\n", + dev->name,__FUNCTION__,__LINE__); +#endif + + return 0; +} /* end of claw_process_control */ + + +/*-------------------------------------------------------------------* +* claw_send_control * +* * +*--------------------------------------------------------------------*/ + +static int +claw_send_control(struct net_device *dev, __u8 type, __u8 link, + __u8 correlator, __u8 rc, char *local_name, char *remote_name) +{ + struct claw_privbk *privptr; + struct clawctl *p_ctl; + struct sysval *p_sysval; + struct conncmd *p_connect; + struct sk_buff *skb; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s > enter \n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"sndcntl"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: Sending Control Packet \n",dev->name); + printk(KERN_INFO "%s: variable type = 0x%X, link = " + "%d, correlator = %d, rc = %d\n", + dev->name,type, link, correlator, rc); + printk(KERN_INFO "%s: variable local_name = %s, " + "remote_name = %s\n",dev->name, local_name, remote_name); +#endif + privptr=dev->priv; + p_ctl=(struct clawctl *)&privptr->ctl_bk; + + p_ctl->command=type; + p_ctl->version=CLAW_VERSION_ID; + p_ctl->linkid=link; + p_ctl->correlator=correlator; + p_ctl->rc=rc; + + p_sysval=(struct sysval *)&p_ctl->data; + p_connect=(struct conncmd *)&p_ctl->data; + + switch (p_ctl->command) { + case SYSTEM_VALIDATE_REQUEST: + case SYSTEM_VALIDATE_RESPONSE: + memcpy(&p_sysval->host_name, local_name, 8); + memcpy(&p_sysval->WS_name, remote_name, 8); + if (privptr->p_env->packing > 0) { + p_sysval->read_frame_size=DEF_PACK_BUFSIZE; + p_sysval->write_frame_size=DEF_PACK_BUFSIZE; + } else { + /* how big is the piggest group of packets */ + p_sysval->read_frame_size=privptr->p_env->read_size; + p_sysval->write_frame_size=privptr->p_env->write_size; + } + memset(&p_sysval->reserved, 0x00, 4); + break; + case CONNECTION_REQUEST: + case CONNECTION_RESPONSE: + case CONNECTION_CONFIRM: + case DISCONNECT: + memcpy(&p_sysval->host_name, local_name, 8); + memcpy(&p_sysval->WS_name, remote_name, 8); + if (privptr->p_env->packing > 0) { + /* How big is the biggest packet */ + p_connect->reserved1[0]=CLAW_FRAME_SIZE; + p_connect->reserved1[1]=CLAW_FRAME_SIZE; + } else { + memset(&p_connect->reserved1, 0x00, 4); + memset(&p_connect->reserved2, 0x00, 4); + } + break; + default: + break; + } + + /* write Control Record to the device */ + + + skb = dev_alloc_skb(sizeof(struct clawctl)); + if (!skb) { + printk( "%s:%s low on mem, returning...\n", + dev->name,__FUNCTION__); +#ifdef DEBUG + printk(KERN_INFO "%s:%s Exit, rc = ENOMEM\n", + dev->name,__FUNCTION__); +#endif + return -ENOMEM; + } + memcpy(skb_put(skb, sizeof(struct clawctl)), + p_ctl, sizeof(struct clawctl)); +#ifdef IOTRACE + printk(KERN_INFO "%s: outbnd claw cntl data \n",dev->name); + dumpit((char *)p_ctl,sizeof(struct clawctl)); +#endif + if (privptr->p_env->packing >= PACK_SEND) + claw_hw_tx(skb, dev, 1); + else + claw_hw_tx(skb, dev, 0); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + + return 0; +} /* end of claw_send_control */ + +/*-------------------------------------------------------------------* +* claw_snd_conn_req * +* * +*--------------------------------------------------------------------*/ +static int +claw_snd_conn_req(struct net_device *dev, __u8 link) +{ + int rc; + struct claw_privbk *privptr=dev->priv; + struct clawctl *p_ctl; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"snd_conn"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable link = %X, dev =\n",dev->name, link); + dumpit((char *) dev, sizeof(struct net_device)); +#endif + rc = 1; + p_ctl=(struct clawctl *)&privptr->ctl_bk; + p_ctl->linkid = link; + if ( privptr->system_validate_comp==0x00 ) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc = 1\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return rc; + } + if (privptr->p_env->packing == PACKING_ASK ) + rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, + WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED); + if (privptr->p_env->packing == PACK_SEND) { + rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, + WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME); + } + if (privptr->p_env->packing == 0) + rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0, + HOST_APPL_NAME, privptr->p_env->api_type); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", + dev->name,__FUNCTION__,__LINE__, rc); +#endif + return rc; + +} /* end of claw_snd_conn_req */ + + +/*-------------------------------------------------------------------* +* claw_snd_disc * +* * +*--------------------------------------------------------------------*/ + +static int +claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl) +{ + int rc; + struct conncmd * p_connect; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"snd_dsc"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev =\n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "%s: variable p_ctl",dev->name); + dumpit((char *) p_ctl, sizeof(struct clawctl)); +#endif + p_connect=(struct conncmd *)&p_ctl->data; + + rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid, + p_ctl->correlator, 0, + p_connect->host_name, p_connect->WS_name); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", + dev->name,__FUNCTION__, __LINE__, rc); +#endif + return rc; +} /* end of claw_snd_disc */ + + +/*-------------------------------------------------------------------* +* claw_snd_sys_validate_rsp * +* * +*--------------------------------------------------------------------*/ + +static int +claw_snd_sys_validate_rsp(struct net_device *dev, + struct clawctl *p_ctl, __u32 return_code) +{ + struct claw_env * p_env; + struct claw_privbk *privptr; + int rc; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n", + dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"chkresp"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable return_code = %d, dev =\n", + dev->name, return_code); + dumpit((char *) dev, sizeof(struct net_device)); + printk(KERN_INFO "%s: variable p_ctl =\n",dev->name); + dumpit((char *) p_ctl, sizeof(struct clawctl)); +#endif + privptr = dev->priv; + p_env=privptr->p_env; + rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, + p_ctl->linkid, + p_ctl->correlator, + return_code, + p_env->host_name, + p_env->adapter_name ); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", + dev->name,__FUNCTION__,__LINE__, rc); +#endif + return rc; +} /* end of claw_snd_sys_validate_rsp */ + +/*-------------------------------------------------------------------* +* claw_strt_conn_req * +* * +*--------------------------------------------------------------------*/ + +static int +claw_strt_conn_req(struct net_device *dev ) +{ + int rc; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"conn_req"); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: variable dev =\n",dev->name); + dumpit((char *) dev, sizeof(struct net_device)); +#endif + rc=claw_snd_conn_req(dev, 1); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d, rc = %d\n", + dev->name,__FUNCTION__,__LINE__, rc); +#endif + return rc; +} /* end of claw_strt_conn_req */ + + + +/*-------------------------------------------------------------------* + * claw_stats * + *-------------------------------------------------------------------*/ + +static struct +net_device_stats *claw_stats(struct net_device *dev) +{ + struct claw_privbk *privptr; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"stats"); + privptr = dev->priv; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return &privptr->stats; +} /* end of claw_stats */ + + +/*-------------------------------------------------------------------* +* unpack_read * +* * +*--------------------------------------------------------------------*/ +static void +unpack_read(struct net_device *dev ) +{ + struct sk_buff *skb; + struct claw_privbk *privptr; + struct claw_env *p_env; + struct ccwbk *p_this_ccw; + struct ccwbk *p_first_ccw; + struct ccwbk *p_last_ccw; + struct clawph *p_packh; + void *p_packd; + struct clawctl *p_ctlrec=NULL; + + __u32 len_of_data; + __u32 pack_off; + __u8 link_num; + __u8 mtc_this_frm=0; + __u32 bytes_to_mov; + struct chbk *p_ch = NULL; + int i=0; + int p=0; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s enter \n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"unpkread"); + p_first_ccw=NULL; + p_last_ccw=NULL; + p_packh=NULL; + p_packd=NULL; + privptr=dev->priv; + p_env = privptr->p_env; + p_this_ccw=privptr->p_read_active_first; + i=0; + while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { +#ifdef IOTRACE + printk(KERN_INFO "%s p_this_ccw \n",dev->name); + dumpit((char*)p_this_ccw, sizeof(struct ccwbk)); + printk(KERN_INFO "%s Inbound p_this_ccw->p_buffer(64)" + " pk=%d \n",dev->name,p_env->packing); + dumpit((char *)p_this_ccw->p_buffer, 64 ); +#endif + pack_off = 0; + p = 0; + p_this_ccw->header.flag=CLAW_PENDING; + privptr->p_read_active_first=p_this_ccw->next; + p_this_ccw->next=NULL; + p_packh = (struct clawph *)p_this_ccw->p_buffer; + if ((p_env->packing == PACK_SEND) && + (p_packh->len == 32) && + (p_packh->link_num == 0)) { /* is it a packed ctl rec? */ + p_packh++; /* peek past pack header */ + p_ctlrec = (struct clawctl *)p_packh; + p_packh--; /* un peek */ + if ((p_ctlrec->command == CONNECTION_RESPONSE) || + (p_ctlrec->command == CONNECTION_CONFIRM)) + p_env->packing = DO_PACKED; + } + if (p_env->packing == DO_PACKED) + link_num=p_packh->link_num; + else + link_num=p_this_ccw->header.opcode / 8; + if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) { +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s > More_to_come is ON\n", + dev->name,__FUNCTION__); +#endif + mtc_this_frm=1; + if (p_this_ccw->header.length!= + privptr->p_env->read_size ) { + printk(KERN_INFO " %s: Invalid frame detected " + "length is %02x\n" , + dev->name, p_this_ccw->header.length); + } + } + + if (privptr->mtc_skipping) { + /* + * We're in the mode of skipping past a + * multi-frame message + * that we can't process for some reason or other. + * The first frame without the More-To-Come flag is + * the last frame of the skipped message. + */ + /* in case of More-To-Come not set in this frame */ + if (mtc_this_frm==0) { + privptr->mtc_skipping=0; /* Ok, the end */ + privptr->mtc_logical_link=-1; + } +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s goto next " + "frame from MoretoComeSkip \n", + dev->name,__FUNCTION__); +#endif + goto NextFrame; + } + + if (link_num==0) { + claw_process_control(dev, p_this_ccw); +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s goto next " + "frame from claw_process_control \n", + dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(4,trace,"UnpkCntl"); + goto NextFrame; + } +unpack_next: + if (p_env->packing == DO_PACKED) { + if (pack_off > p_env->read_size) + goto NextFrame; + p_packd = p_this_ccw->p_buffer+pack_off; + p_packh = (struct clawph *) p_packd; + if ((p_packh->len == 0) || /* all done with this frame? */ + (p_packh->flag != 0)) + goto NextFrame; + bytes_to_mov = p_packh->len; + pack_off += bytes_to_mov+sizeof(struct clawph); + p++; + } else { + bytes_to_mov=p_this_ccw->header.length; + } + if (privptr->mtc_logical_link<0) { +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s mtc_logical_link < 0 \n", + dev->name,__FUNCTION__); +#endif + + /* + * if More-To-Come is set in this frame then we don't know + * length of entire message, and hence have to allocate + * large buffer */ + + /* We are starting a new envelope */ + privptr->mtc_offset=0; + privptr->mtc_logical_link=link_num; + } + + if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) { + /* error */ +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s > goto next " + "frame from MoretoComeSkip \n", + dev->name, + __FUNCTION__); + printk(KERN_INFO " bytes_to_mov %d > (MAX_ENVELOPE_" + "SIZE-privptr->mtc_offset %d)\n", + bytes_to_mov,(MAX_ENVELOPE_SIZE- privptr->mtc_offset)); +#endif + privptr->stats.rx_frame_errors++; + goto NextFrame; + } + if (p_env->packing == DO_PACKED) { + memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, + p_packd+sizeof(struct clawph), bytes_to_mov); + + } else { + memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset, + p_this_ccw->p_buffer, bytes_to_mov); + } +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() received data \n", + dev->name,__FUNCTION__); + if (p_env->packing == DO_PACKED) + dumpit((char *)p_packd+sizeof(struct clawph),32); + else + dumpit((char *)p_this_ccw->p_buffer, 32); + printk(KERN_INFO "%s: %s() bytelength %d \n", + dev->name,__FUNCTION__,bytes_to_mov); +#endif + if (mtc_this_frm==0) { + len_of_data=privptr->mtc_offset+bytes_to_mov; + skb=dev_alloc_skb(len_of_data); + if (skb) { + memcpy(skb_put(skb,len_of_data), + privptr->p_mtc_envelope, + len_of_data); + skb->mac.raw=skb->data; + skb->dev=dev; + skb->protocol=htons(ETH_P_IP); + skb->ip_summed=CHECKSUM_UNNECESSARY; + privptr->stats.rx_packets++; + privptr->stats.rx_bytes+=len_of_data; + netif_rx(skb); +#ifdef DEBUGMSG + printk(KERN_INFO "%s: %s() netif_" + "rx(skb) completed \n", + dev->name,__FUNCTION__); +#endif + } + else { + privptr->stats.rx_dropped++; + printk(KERN_WARNING "%s: %s() low on memory\n", + dev->name,__FUNCTION__); + } + privptr->mtc_offset=0; + privptr->mtc_logical_link=-1; + } + else { + privptr->mtc_offset+=bytes_to_mov; + } + if (p_env->packing == DO_PACKED) + goto unpack_next; +NextFrame: + /* + * Remove ThisCCWblock from active read queue, and add it + * to queue of free blocks to be reused. + */ + i++; + p_this_ccw->header.length=0xffff; + p_this_ccw->header.opcode=0xff; + /* + * add this one to the free queue for later reuse + */ + if (p_first_ccw==NULL) { + p_first_ccw = p_this_ccw; + } + else { + p_last_ccw->next = p_this_ccw; + } + p_last_ccw = p_this_ccw; + /* + * chain to next block on active read queue + */ + p_this_ccw = privptr->p_read_active_first; + CLAW_DBF_TEXT_(4,trace,"rxpkt %d",p); + } /* end of while */ + + /* check validity */ + +#ifdef IOTRACE + printk(KERN_INFO "%s:%s processed frame is %d \n", + dev->name,__FUNCTION__,i); + printk(KERN_INFO "%s:%s F:%lx L:%lx\n", + dev->name, + __FUNCTION__, + (unsigned long)p_first_ccw, + (unsigned long)p_last_ccw); +#endif + CLAW_DBF_TEXT_(4,trace,"rxfrm %d",i); + add_claw_reads(dev, p_first_ccw, p_last_ccw); + p_ch=&privptr->channel[READ]; + claw_strt_read(dev, LOCK_YES); +#ifdef FUNCTRACE + printk(KERN_INFO "%s: %s exit on line %d\n", + dev->name, __FUNCTION__, __LINE__); +#endif + return; +} /* end of unpack_read */ + +/*-------------------------------------------------------------------* +* claw_strt_read * +* * +*--------------------------------------------------------------------*/ +static void +claw_strt_read (struct net_device *dev, int lock ) +{ + int rc = 0; + __u32 parm; + unsigned long saveflags = 0; + struct claw_privbk *privptr=dev->priv; + struct ccwbk*p_ccwbk; + struct chbk *p_ch; + struct clawh *p_clawh; + p_ch=&privptr->channel[READ]; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); + printk(KERN_INFO "%s: variable lock = %d, dev =\n",dev->name, lock); + dumpit((char *) dev, sizeof(struct net_device)); +#endif + CLAW_DBF_TEXT(4,trace,"StRdNter"); + p_clawh=(struct clawh *)privptr->p_claw_signal_blk; + p_clawh->flag=CLAW_IDLE; /* 0x00 */ + + if ((privptr->p_write_active_first!=NULL && + privptr->p_write_active_first->header.flag!=CLAW_PENDING) || + (privptr->p_read_active_first!=NULL && + privptr->p_read_active_first->header.flag!=CLAW_PENDING )) { + p_clawh->flag=CLAW_BUSY; /* 0xff */ + } +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s state-%02x\n" , + dev->name,__FUNCTION__, p_ch->claw_state); +#endif + if (lock==LOCK_YES) { + spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); + } + if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { +#ifdef DEBUGMSG + printk(KERN_INFO "%s: HOT READ started in %s\n" , + dev->name,__FUNCTION__); + p_clawh=(struct clawh *)privptr->p_claw_signal_blk; + dumpit((char *)&p_clawh->flag , 1); +#endif + CLAW_DBF_TEXT(4,trace,"HotRead"); + p_ccwbk=privptr->p_read_active_first; + parm = (unsigned long) p_ch; + rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm, + 0xff, 0); + if (rc != 0) { + ccw_check_return_code(p_ch->cdev, rc); + } + } + else { +#ifdef DEBUGMSG + printk(KERN_INFO "%s: No READ started by %s() In progress\n" , + dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,trace,"ReadAct"); + } + + if (lock==LOCK_YES) { + spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); + } +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + CLAW_DBF_TEXT(4,trace,"StRdExit"); + return; +} /* end of claw_strt_read */ + +/*-------------------------------------------------------------------* +* claw_strt_out_IO * +* * +*--------------------------------------------------------------------*/ + +static void +claw_strt_out_IO( struct net_device *dev ) +{ + int rc = 0; + unsigned long parm; + struct claw_privbk *privptr; + struct chbk *p_ch; + struct ccwbk *p_first_ccw; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); +#endif + if (!dev) { + return; + } + privptr=(struct claw_privbk *)dev->priv; + p_ch=&privptr->channel[WRITE]; + +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s state-%02x\n" , + dev->name,__FUNCTION__,p_ch->claw_state); +#endif + CLAW_DBF_TEXT(4,trace,"strt_io"); + p_first_ccw=privptr->p_write_active_first; + + if (p_ch->claw_state == CLAW_STOP) + return; + if (p_first_ccw == NULL) { +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + return; + } + if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) { + parm = (unsigned long) p_ch; +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s do_io \n" ,dev->name,__FUNCTION__); + dumpit((char *)p_first_ccw, sizeof(struct ccwbk)); +#endif + CLAW_DBF_TEXT(2,trace,"StWrtIO"); + rc = ccw_device_start (p_ch->cdev,&p_first_ccw->write, parm, + 0xff, 0); + if (rc != 0) { + ccw_check_return_code(p_ch->cdev, rc); + } + } + dev->trans_start = jiffies; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + dev->name,__FUNCTION__,__LINE__); +#endif + + return; +} /* end of claw_strt_out_IO */ + +/*-------------------------------------------------------------------* +* Free write buffers * +* * +*--------------------------------------------------------------------*/ + +static void +claw_free_wrt_buf( struct net_device *dev ) +{ + + struct claw_privbk *privptr=(struct claw_privbk *)dev->priv; + struct ccwbk*p_first_ccw; + struct ccwbk*p_last_ccw; + struct ccwbk*p_this_ccw; + struct ccwbk*p_next_ccw; +#ifdef IOTRACE + struct ccwbk*p_buf; +#endif +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); + printk(KERN_INFO "%s: free count = %d variable dev =\n", + dev->name,privptr->write_free_count); +#endif + CLAW_DBF_TEXT(4,trace,"freewrtb"); + /* scan the write queue to free any completed write packets */ + p_first_ccw=NULL; + p_last_ccw=NULL; +#ifdef IOTRACE + printk(KERN_INFO "%s: Dump current CCW chain \n",dev->name ); + p_buf=privptr->p_write_active_first; + while (p_buf!=NULL) { + dumpit((char *)p_buf, sizeof(struct ccwbk)); + p_buf=p_buf->next; + } + if (p_buf==NULL) { + printk(KERN_INFO "%s: privptr->p_write_" + "active_first==NULL\n",dev->name ); + } + p_buf=(struct ccwbk*)privptr->p_end_ccw; + dumpit((char *)p_buf, sizeof(struct endccw)); +#endif + p_this_ccw=privptr->p_write_active_first; + while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING)) + { + p_next_ccw = p_this_ccw->next; + if (((p_next_ccw!=NULL) && + (p_next_ccw->header.flag!=CLAW_PENDING)) || + ((p_this_ccw == privptr->p_write_active_last) && + (p_this_ccw->header.flag!=CLAW_PENDING))) { + /* The next CCW is OK or this is */ + /* the last CCW...free it @A1A */ + privptr->p_write_active_first=p_this_ccw->next; + p_this_ccw->header.flag=CLAW_PENDING; + p_this_ccw->next=privptr->p_write_free_chain; + privptr->p_write_free_chain=p_this_ccw; + ++privptr->write_free_count; + privptr->stats.tx_bytes+= p_this_ccw->write.count; + p_this_ccw=privptr->p_write_active_first; + privptr->stats.tx_packets++; + } + else { + break; + } + } + if (privptr->write_free_count!=0) { + claw_clearbit_busy(TB_NOBUFFER,dev); + } + /* whole chain removed? */ + if (privptr->p_write_active_first==NULL) { + privptr->p_write_active_last=NULL; +#ifdef DEBUGMSG + printk(KERN_INFO "%s:%s p_write_" + "active_first==NULL\n",dev->name,__FUNCTION__); +#endif + } +#ifdef IOTRACE + printk(KERN_INFO "%s: Dump arranged CCW chain \n",dev->name ); + p_buf=privptr->p_write_active_first; + while (p_buf!=NULL) { + dumpit((char *)p_buf, sizeof(struct ccwbk)); + p_buf=p_buf->next; + } + if (p_buf==NULL) { + printk(KERN_INFO "%s: privptr->p_write_active_" + "first==NULL\n",dev->name ); + } + p_buf=(struct ccwbk*)privptr->p_end_ccw; + dumpit((char *)p_buf, sizeof(struct endccw)); +#endif + + CLAW_DBF_TEXT_(4,trace,"FWC=%d",privptr->write_free_count); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d free_count =%d\n", + dev->name,__FUNCTION__, __LINE__,privptr->write_free_count); +#endif + return; +} + +/*-------------------------------------------------------------------* +* claw free netdevice * +* * +*--------------------------------------------------------------------*/ +static void +claw_free_netdevice(struct net_device * dev, int free_dev) +{ + struct claw_privbk *privptr; +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"free_dev"); + + if (!dev) + return; + CLAW_DBF_TEXT_(2,setup,"%s",dev->name); + privptr = dev->priv; + if (dev->flags & IFF_RUNNING) + claw_release(dev); + if (privptr) { + privptr->channel[READ].ndev = NULL; /* say it's free */ + } + dev->priv=NULL; +#ifdef MODULE + if (free_dev) { + free_netdev(dev); + } +#endif + CLAW_DBF_TEXT(2,setup,"feee_ok"); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__); +#endif +} + +/** + * Claw init netdevice + * Initialize everything of the net device except the name and the + * channel structs. + */ +static void +claw_init_netdevice(struct net_device * dev) +{ +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"init_dev"); + CLAW_DBF_TEXT_(2,setup,"%s",dev->name); + if (!dev) { + printk(KERN_WARNING "claw:%s BAD Device exit line %d\n", + __FUNCTION__,__LINE__); + CLAW_DBF_TEXT(2,setup,"baddev"); + return; + } + dev->mtu = CLAW_DEFAULT_MTU_SIZE; + dev->hard_start_xmit = claw_tx; + dev->open = claw_open; + dev->stop = claw_release; + dev->get_stats = claw_stats; + dev->change_mtu = claw_change_mtu; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = 1300; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + SET_MODULE_OWNER(dev); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit\n",dev->name,__FUNCTION__); +#endif + CLAW_DBF_TEXT(2,setup,"initok"); + return; +} + +/** + * Init a new channel in the privptr->channel[i]. + * + * @param cdev The ccw_device to be added. + * + * @return 0 on success, !0 on error. + */ +static int +add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr) +{ + struct chbk *p_ch; + +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Enter\n",cdev->dev.bus_id,__FUNCTION__); +#endif + CLAW_DBF_TEXT_(2,setup,"%s",cdev->dev.bus_id); + privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */ + p_ch = &privptr->channel[i]; + p_ch->cdev = cdev; + snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", cdev->dev.bus_id); + sscanf(cdev->dev.bus_id+4,"%x",&p_ch->devno); + if ((p_ch->irb = kmalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) { + printk(KERN_WARNING "%s Out of memory in %s for irb\n", + p_ch->id,__FUNCTION__); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + p_ch->id,__FUNCTION__,__LINE__); +#endif + return -ENOMEM; + } + memset(p_ch->irb, 0, sizeof (struct irb)); +#ifdef FUNCTRACE + printk(KERN_INFO "%s:%s Exit on line %d\n", + cdev->dev.bus_id,__FUNCTION__,__LINE__); +#endif + return 0; +} + + +/** + * + * Setup an interface. + * + * @param cgdev Device to be setup. + * + * @returns 0 on success, !0 on failure. + */ +static int +claw_new_device(struct ccwgroup_device *cgdev) +{ + struct claw_privbk *privptr; + struct claw_env *p_env; + struct net_device *dev; + int ret; + + pr_debug("%s() called\n", __FUNCTION__); + printk(KERN_INFO "claw: add for %s\n",cgdev->cdev[READ]->dev.bus_id); + CLAW_DBF_TEXT(2,setup,"new_dev"); + privptr = cgdev->dev.driver_data; + cgdev->cdev[READ]->dev.driver_data = privptr; + cgdev->cdev[WRITE]->dev.driver_data = privptr; + if (!privptr) + return -ENODEV; + p_env = privptr->p_env; + sscanf(cgdev->cdev[READ]->dev.bus_id+4,"%x", + &p_env->devno[READ]); + sscanf(cgdev->cdev[WRITE]->dev.bus_id+4,"%x", + &p_env->devno[WRITE]); + ret = add_channel(cgdev->cdev[0],0,privptr); + if (ret == 0) + ret = add_channel(cgdev->cdev[1],1,privptr); + if (ret != 0) { + printk(KERN_WARNING + "add channel failed " + "with ret = %d\n", ret); + goto out; + } + ret = ccw_device_set_online(cgdev->cdev[READ]); + if (ret != 0) { + printk(KERN_WARNING + "claw: ccw_device_set_online %s READ failed " + "with ret = %d\n",cgdev->cdev[READ]->dev.bus_id,ret); + goto out; + } + ret = ccw_device_set_online(cgdev->cdev[WRITE]); + if (ret != 0) { + printk(KERN_WARNING + "claw: ccw_device_set_online %s WRITE failed " + "with ret = %d\n",cgdev->cdev[WRITE]->dev.bus_id, ret); + goto out; + } + dev = alloc_netdev(0,"claw%d",claw_init_netdevice); + if (!dev) { + printk(KERN_WARNING "%s:alloc_netdev failed\n",__FUNCTION__); + goto out; + } + dev->priv = privptr; + cgdev->dev.driver_data = privptr; + cgdev->cdev[READ]->dev.driver_data = privptr; + cgdev->cdev[WRITE]->dev.driver_data = privptr; + /* sysfs magic */ + SET_NETDEV_DEV(dev, &cgdev->dev); + if (register_netdev(dev) != 0) { + claw_free_netdevice(dev, 1); + CLAW_DBF_TEXT(2,trace,"regfail"); + goto out; + } + dev->flags &=~IFF_RUNNING; + if (privptr->buffs_alloc == 0) { + ret=init_ccw_bk(dev); + if (ret !=0) { + printk(KERN_WARNING + "claw: init_ccw_bk failed with ret=%d\n", ret); + unregister_netdev(dev); + claw_free_netdevice(dev,1); + CLAW_DBF_TEXT(2,trace,"ccwmem"); + goto out; + } + } + privptr->channel[READ].ndev = dev; + privptr->channel[WRITE].ndev = dev; + privptr->p_env->ndev = dev; + + printk(KERN_INFO "%s:readsize=%d writesize=%d " + "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", + dev->name, p_env->read_size, + p_env->write_size, p_env->read_buffers, + p_env->write_buffers, p_env->devno[READ], + p_env->devno[WRITE]); + printk(KERN_INFO "%s:host_name:%.8s, adapter_name " + ":%.8s api_type: %.8s\n", + dev->name, p_env->host_name, + p_env->adapter_name , p_env->api_type); + return 0; +out: + ccw_device_set_offline(cgdev->cdev[1]); + ccw_device_set_offline(cgdev->cdev[0]); + + return -ENODEV; +} + +static void +claw_purge_skb_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + CLAW_DBF_TEXT(4,trace,"purgque"); + + while ((skb = skb_dequeue(q))) { + atomic_dec(&skb->users); + dev_kfree_skb_irq(skb); + } +} + +/** + * Shutdown an interface. + * + * @param cgdev Device to be shut down. + * + * @returns 0 on success, !0 on failure. + */ +static int +claw_shutdown_device(struct ccwgroup_device *cgdev) +{ + struct claw_privbk *priv; + struct net_device *ndev; + int ret; + + pr_debug("%s() called\n", __FUNCTION__); + CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); + priv = cgdev->dev.driver_data; + if (!priv) + return -ENODEV; + ndev = priv->channel[READ].ndev; + if (ndev) { + /* Close the device */ + printk(KERN_INFO + "%s: shuting down \n",ndev->name); + if (ndev->flags & IFF_RUNNING) + ret = claw_release(ndev); + ndev->flags &=~IFF_RUNNING; + unregister_netdev(ndev); + ndev->priv = NULL; /* cgdev data, not ndev's to free */ + claw_free_netdevice(ndev, 1); + priv->channel[READ].ndev = NULL; + priv->channel[WRITE].ndev = NULL; + priv->p_env->ndev = NULL; + } + ccw_device_set_offline(cgdev->cdev[1]); + ccw_device_set_offline(cgdev->cdev[0]); + return 0; +} + +static void +claw_remove_device(struct ccwgroup_device *cgdev) +{ + struct claw_privbk *priv; + + pr_debug("%s() called\n", __FUNCTION__); + CLAW_DBF_TEXT_(2,setup,"%s",cgdev->dev.bus_id); + priv = cgdev->dev.driver_data; + if (!priv) { + printk(KERN_WARNING "claw: %s() no Priv exiting\n",__FUNCTION__); + return; + } + printk(KERN_INFO "claw: %s() called %s will be removed.\n", + __FUNCTION__,cgdev->cdev[0]->dev.bus_id); + if (cgdev->state == CCWGROUP_ONLINE) + claw_shutdown_device(cgdev); + claw_remove_files(&cgdev->dev); + if (priv->p_mtc_envelope!=NULL) { + kfree(priv->p_mtc_envelope); + priv->p_mtc_envelope=NULL; + } + if (priv->p_env != NULL) { + kfree(priv->p_env); + priv->p_env=NULL; + } + if (priv->channel[0].irb != NULL) { + kfree(priv->channel[0].irb); + priv->channel[0].irb=NULL; + } + if (priv->channel[1].irb != NULL) { + kfree(priv->channel[1].irb); + priv->channel[1].irb=NULL; + } + kfree(priv); + cgdev->dev.driver_data=NULL; + cgdev->cdev[READ]->dev.driver_data = NULL; + cgdev->cdev[WRITE]->dev.driver_data = NULL; + put_device(&cgdev->dev); +} + + +/* + * sysfs attributes + */ +static ssize_t +claw_hname_show(struct device *dev, char *buf) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + return sprintf(buf, "%s\n",p_env->host_name); +} + +static ssize_t +claw_hname_write(struct device *dev, const char *buf, size_t count) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + if (count > MAX_NAME_LEN+1) + return -EINVAL; + memset(p_env->host_name, 0x20, MAX_NAME_LEN); + strncpy(p_env->host_name,buf, count); + p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */ + p_env->host_name[MAX_NAME_LEN] = 0x00; + CLAW_DBF_TEXT(2,setup,"HstnSet"); + CLAW_DBF_TEXT_(2,setup,"%s",p_env->host_name); + + return count; +} + +static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write); + +static ssize_t +claw_adname_show(struct device *dev, char *buf) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + return sprintf(buf, "%s\n",p_env->adapter_name); +} + +static ssize_t +claw_adname_write(struct device *dev, const char *buf, size_t count) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + if (count > MAX_NAME_LEN+1) + return -EINVAL; + memset(p_env->adapter_name, 0x20, MAX_NAME_LEN); + strncpy(p_env->adapter_name,buf, count); + p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */ + p_env->adapter_name[MAX_NAME_LEN] = 0x00; + CLAW_DBF_TEXT(2,setup,"AdnSet"); + CLAW_DBF_TEXT_(2,setup,"%s",p_env->adapter_name); + + return count; +} + +static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write); + +static ssize_t +claw_apname_show(struct device *dev, char *buf) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + return sprintf(buf, "%s\n", + p_env->api_type); +} + +static ssize_t +claw_apname_write(struct device *dev, const char *buf, size_t count) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + if (count > MAX_NAME_LEN+1) + return -EINVAL; + memset(p_env->api_type, 0x20, MAX_NAME_LEN); + strncpy(p_env->api_type,buf, count); + p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */ + p_env->api_type[MAX_NAME_LEN] = 0x00; + if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) { + p_env->read_size=DEF_PACK_BUFSIZE; + p_env->write_size=DEF_PACK_BUFSIZE; + p_env->packing=PACKING_ASK; + CLAW_DBF_TEXT(2,setup,"PACKING"); + } + else { + p_env->packing=0; + p_env->read_size=CLAW_FRAME_SIZE; + p_env->write_size=CLAW_FRAME_SIZE; + CLAW_DBF_TEXT(2,setup,"ApiSet"); + } + CLAW_DBF_TEXT_(2,setup,"%s",p_env->api_type); + return count; +} + +static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write); + +static ssize_t +claw_wbuff_show(struct device *dev, char *buf) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + return sprintf(buf, "%d\n", p_env->write_buffers); +} + +static ssize_t +claw_wbuff_write(struct device *dev, const char *buf, size_t count) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + int nnn,max; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + sscanf(buf, "%i", &nnn); + if (p_env->packing) { + max = 64; + } + else { + max = 512; + } + if ((nnn > max ) || (nnn < 2)) + return -EINVAL; + p_env->write_buffers = nnn; + CLAW_DBF_TEXT(2,setup,"Wbufset"); + CLAW_DBF_TEXT_(2,setup,"WB=%d",p_env->write_buffers); + return count; +} + +static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write); + +static ssize_t +claw_rbuff_show(struct device *dev, char *buf) +{ + struct claw_privbk *priv; + struct claw_env * p_env; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + return sprintf(buf, "%d\n", p_env->read_buffers); +} + +static ssize_t +claw_rbuff_write(struct device *dev, const char *buf, size_t count) +{ + struct claw_privbk *priv; + struct claw_env *p_env; + int nnn,max; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + p_env = priv->p_env; + sscanf(buf, "%i", &nnn); + if (p_env->packing) { + max = 64; + } + else { + max = 512; + } + if ((nnn > max ) || (nnn < 2)) + return -EINVAL; + p_env->read_buffers = nnn; + CLAW_DBF_TEXT(2,setup,"Rbufset"); + CLAW_DBF_TEXT_(2,setup,"RB=%d",p_env->read_buffers); + return count; +} + +static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write); + +static struct attribute *claw_attr[] = { + &dev_attr_read_buffer.attr, + &dev_attr_write_buffer.attr, + &dev_attr_adapter_name.attr, + &dev_attr_api_type.attr, + &dev_attr_host_name.attr, + NULL, +}; + +static struct attribute_group claw_attr_group = { + .attrs = claw_attr, +}; + +static int +claw_add_files(struct device *dev) +{ + pr_debug("%s() called\n", __FUNCTION__); + CLAW_DBF_TEXT(2,setup,"add_file"); + return sysfs_create_group(&dev->kobj, &claw_attr_group); +} + +static void +claw_remove_files(struct device *dev) +{ + pr_debug("%s() called\n", __FUNCTION__); + CLAW_DBF_TEXT(2,setup,"rem_file"); + sysfs_remove_group(&dev->kobj, &claw_attr_group); +} + +/*--------------------------------------------------------------------* +* claw_init and cleanup * +*---------------------------------------------------------------------*/ + +static void __exit +claw_cleanup(void) +{ + unregister_cu3088_discipline(&claw_group_driver); + claw_unregister_debug_facility(); + printk(KERN_INFO "claw: Driver unloaded\n"); + +} + +/** + * Initialize module. + * This is called just after the module is loaded. + * + * @return 0 on success, !0 on error. + */ +static int __init +claw_init(void) +{ + int ret = 0; + printk(KERN_INFO "claw: starting driver " +#ifdef MODULE + "module " +#else + "compiled into kernel " +#endif + " $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ \n"); + + +#ifdef FUNCTRACE + printk(KERN_INFO "claw: %s() enter \n",__FUNCTION__); +#endif + ret = claw_register_debug_facility(); + if (ret) { + printk(KERN_WARNING "claw: %s() debug_register failed %d\n", + __FUNCTION__,ret); + return ret; + } + CLAW_DBF_TEXT(2,setup,"init_mod"); + ret = register_cu3088_discipline(&claw_group_driver); + if (ret) { + claw_unregister_debug_facility(); + printk(KERN_WARNING "claw; %s() cu3088 register failed %d\n", + __FUNCTION__,ret); + } +#ifdef FUNCTRACE + printk(KERN_INFO "claw: %s() exit \n",__FUNCTION__); +#endif + return ret; +} + +module_init(claw_init); +module_exit(claw_cleanup); + + + +/*--------------------------------------------------------------------* +* End of File * +*---------------------------------------------------------------------*/ + + diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h new file mode 100644 index 000000000000..3df71970f601 --- /dev/null +++ b/drivers/s390/net/claw.h @@ -0,0 +1,335 @@ +/******************************************************* +* Define constants * +* * +********************************************************/ +#define VERSION_CLAW_H "$Revision: 1.6 $" +/*-----------------------------------------------------* +* CCW command codes for CLAW protocol * +*------------------------------------------------------*/ + +#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */ +#define CCW_CLAW_CMD_READ 0x02 /* read */ +#define CCW_CLAW_CMD_NOP 0x03 /* NOP */ +#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */ +#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */ +#define CCW_CLAW_CMD_TIC 0x08 /* TIC */ +#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */ +#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */ +#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */ + + +/*-----------------------------------------------------* +* CLAW Unique constants * +*------------------------------------------------------*/ + +#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */ +#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */ +#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */ +#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */ +#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */ + +/*-----------------------------------------------------* +* CLAW control comand code * +*------------------------------------------------------*/ + +#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */ +#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */ +#define CONNECTION_REQUEST 0x21 /* Connection request */ +#define CONNECTION_RESPONSE 0x22 /* Connection response */ +#define CONNECTION_CONFIRM 0x23 /* Connection confirm */ +#define DISCONNECT 0x24 /* Disconnect */ +#define CLAW_ERROR 0x41 /* CLAW error message */ +#define CLAW_VERSION_ID 2 /* CLAW version ID */ + +/*-----------------------------------------------------* +* CLAW adater sense bytes * +*------------------------------------------------------*/ + +#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */ + +/*-----------------------------------------------------* +* CLAW control command return codes * +*------------------------------------------------------*/ + +#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */ +#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */ +#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */ + /* less than Linux on zSeries*/ + /* transmit size */ + +/*-----------------------------------------------------* +* CLAW Constants application name * +*------------------------------------------------------*/ + +#define HOST_APPL_NAME "TCPIP " +#define WS_APPL_NAME_IP_LINK "TCPIP " +#define WS_APPL_NAME_IP_NAME "IP " +#define WS_APPL_NAME_API_LINK "API " +#define WS_APPL_NAME_PACKED "PACKED " +#define WS_NAME_NOT_DEF "NOT_DEF " +#define PACKING_ASK 1 +#define PACK_SEND 2 +#define DO_PACKED 3 + +#define MAX_ENVELOPE_SIZE 65536 +#define CLAW_DEFAULT_MTU_SIZE 4096 +#define DEF_PACK_BUFSIZE 32768 +#define READ 0 +#define WRITE 1 + +#define TB_TX 0 /* sk buffer handling in process */ +#define TB_STOP 1 /* network device stop in process */ +#define TB_RETRY 2 /* retry in process */ +#define TB_NOBUFFER 3 /* no buffer on free queue */ +#define CLAW_MAX_LINK_ID 1 +#define CLAW_MAX_DEV 256 /* max claw devices */ +#define MAX_NAME_LEN 8 /* host name, adapter name length */ +#define CLAW_FRAME_SIZE 4096 +#define CLAW_ID_SIZE BUS_ID_SIZE+3 + +/* state machine codes used in claw_irq_handler */ + +#define CLAW_STOP 0 +#define CLAW_START_HALT_IO 1 +#define CLAW_START_SENSEID 2 +#define CLAW_START_READ 3 +#define CLAW_START_WRITE 4 + +/*-----------------------------------------------------* +* Lock flag * +*------------------------------------------------------*/ +#define LOCK_YES 0 +#define LOCK_NO 1 + +/*-----------------------------------------------------* +* DBF Debug macros * +*------------------------------------------------------*/ +#define CLAW_DBF_TEXT(level, name, text) \ + do { \ + debug_text_event(claw_dbf_##name, level, text); \ + } while (0) + +#define CLAW_DBF_HEX(level,name,addr,len) \ +do { \ + debug_event(claw_dbf_##name,level,(void*)(addr),len); \ +} while (0) + +#define CLAW_DBF_TEXT_(level,name,text...) \ +do { \ + sprintf(debug_buffer, text); \ + debug_text_event(claw_dbf_##name,level, debug_buffer);\ +} while (0) + +/******************************************************* +* Define Control Blocks * +* * +********************************************************/ + +/*------------------------------------------------------*/ +/* CLAW header */ +/*------------------------------------------------------*/ + +struct clawh { + __u16 length; /* length of data read by preceding read CCW */ + __u8 opcode; /* equivalent read CCW */ + __u8 flag; /* flag of FF to indicate read was completed */ +}; + +/*------------------------------------------------------*/ +/* CLAW Packing header 4 bytes */ +/*------------------------------------------------------*/ +struct clawph { + __u16 len; /* Length of Packed Data Area */ + __u8 flag; /* Reserved not used */ + __u8 link_num; /* Link ID */ +}; + +/*------------------------------------------------------*/ +/* CLAW Ending struct ccwbk */ +/*------------------------------------------------------*/ +struct endccw { + __u32 real; /* real address of this block */ + __u8 write1; /* write 1 is active */ + __u8 read1; /* read 1 is active */ + __u16 reserved; /* reserved for future use */ + struct ccw1 write1_nop1; + struct ccw1 write1_nop2; + struct ccw1 write2_nop1; + struct ccw1 write2_nop2; + struct ccw1 read1_nop1; + struct ccw1 read1_nop2; + struct ccw1 read2_nop1; + struct ccw1 read2_nop2; +}; + +/*------------------------------------------------------*/ +/* CLAW struct ccwbk */ +/*------------------------------------------------------*/ +struct ccwbk { + void *next; /* pointer to next ccw block */ + __u32 real; /* real address of this ccw */ + void *p_buffer; /* virtual address of data */ + struct clawh header; /* claw header */ + struct ccw1 write; /* write CCW */ + struct ccw1 w_read_FF; /* read FF */ + struct ccw1 w_TIC_1; /* TIC */ + struct ccw1 read; /* read CCW */ + struct ccw1 read_h; /* read header */ + struct ccw1 signal; /* signal SMOD */ + struct ccw1 r_TIC_1; /* TIC1 */ + struct ccw1 r_read_FF; /* read FF */ + struct ccw1 r_TIC_2; /* TIC2 */ +}; + +/*------------------------------------------------------*/ +/* CLAW control block */ +/*------------------------------------------------------*/ +struct clawctl { + __u8 command; /* control command */ + __u8 version; /* CLAW protocol version */ + __u8 linkid; /* link ID */ + __u8 correlator; /* correlator */ + __u8 rc; /* return code */ + __u8 reserved1; /* reserved */ + __u8 reserved2; /* reserved */ + __u8 reserved3; /* reserved */ + __u8 data[24]; /* command specific fields */ +}; + +/*------------------------------------------------------*/ +/* Data for SYSTEMVALIDATE command */ +/*------------------------------------------------------*/ +struct sysval { + char WS_name[8]; /* Workstation System name */ + char host_name[8]; /* Host system name */ + __u16 read_frame_size; /* read frame size */ + __u16 write_frame_size; /* write frame size */ + __u8 reserved[4]; /* reserved */ +}; + +/*------------------------------------------------------*/ +/* Data for Connect command */ +/*------------------------------------------------------*/ +struct conncmd { + char WS_name[8]; /* Workstation application name */ + char host_name[8]; /* Host application name */ + __u16 reserved1[2]; /* read frame size */ + __u8 reserved2[4]; /* reserved */ +}; + +/*------------------------------------------------------*/ +/* Data for CLAW error */ +/*------------------------------------------------------*/ +struct clawwerror { + char reserved1[8]; /* reserved */ + char reserved2[8]; /* reserved */ + char reserved3[8]; /* reserved */ +}; + +/*------------------------------------------------------*/ +/* Data buffer for CLAW */ +/*------------------------------------------------------*/ +struct clawbuf { + char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */ +}; + +/*------------------------------------------------------*/ +/* Channel control block for read and write channel */ +/*------------------------------------------------------*/ + +struct chbk { + unsigned int devno; + int irq; + char id[CLAW_ID_SIZE]; + __u32 IO_active; + __u8 claw_state; + struct irb *irb; + struct ccw_device *cdev; /* pointer to the channel device */ + struct net_device *ndev; + wait_queue_head_t wait; + struct tasklet_struct tasklet; + struct timer_list timer; + unsigned long flag_a; /* atomic flags */ +#define CLAW_BH_ACTIVE 0 + unsigned long flag_b; /* atomic flags */ +#define CLAW_WRITE_ACTIVE 0 + __u8 last_dstat; + __u8 flag; + struct sk_buff_head collect_queue; + spinlock_t collect_lock; +#define CLAW_WRITE 0x02 /* - Set if this is a write channel */ +#define CLAW_READ 0x01 /* - Set if this is a read channel */ +#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */ +}; + +/*--------------------------------------------------------------* +* CLAW environment block * +*---------------------------------------------------------------*/ + +struct claw_env { + unsigned int devno[2]; /* device number */ + char host_name[9]; /* Host name */ + char adapter_name [9]; /* adapter name */ + char api_type[9]; /* TCPIP, API or PACKED */ + void *p_priv; /* privptr */ + __u16 read_buffers; /* read buffer number */ + __u16 write_buffers; /* write buffer number */ + __u16 read_size; /* read buffer size */ + __u16 write_size; /* write buffer size */ + __u16 dev_id; /* device ident */ + __u8 packing; /* are we packing? */ + volatile __u8 queme_switch; /* gate for imed packing */ + volatile unsigned long pk_delay; /* Delay for adaptive packing */ + __u8 in_use; /* device active flag */ + struct net_device *ndev; /* backward ptr to the net dev*/ +}; + +/*--------------------------------------------------------------* +* CLAW main control block * +*---------------------------------------------------------------*/ + +struct claw_privbk { + void *p_buff_ccw; + __u32 p_buff_ccw_num; + void *p_buff_read; + __u32 p_buff_read_num; + __u32 p_buff_pages_perread; + void *p_buff_write; + __u32 p_buff_write_num; + __u32 p_buff_pages_perwrite; + long active_link_ID; /* Active logical link ID */ + struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */ + struct ccwbk *p_write_active_first; /* ptr to the first write ccw */ + struct ccwbk *p_write_active_last; /* ptr to the last write ccw */ + struct ccwbk *p_read_active_first; /* ptr to the first read ccw */ + struct ccwbk *p_read_active_last; /* ptr to the last read ccw */ + struct endccw *p_end_ccw; /*ptr to ending ccw */ + struct ccwbk *p_claw_signal_blk; /* ptr to signal block */ + __u32 write_free_count; /* number of free bufs for write */ + struct net_device_stats stats; /* device status */ + struct chbk channel[2]; /* Channel control blocks */ + __u8 mtc_skipping; + int mtc_offset; + int mtc_logical_link; + void *p_mtc_envelope; + struct sk_buff *pk_skb; /* packing buffer */ + int pk_cnt; + struct clawctl ctl_bk; + struct claw_env *p_env; + __u8 system_validate_comp; + __u8 release_pend; + __u8 checksum_received_ip_pkts; + __u8 buffs_alloc; + struct endccw end_ccw; + unsigned long tbusy; + +}; + + +/************************************************************/ +/* define global constants */ +/************************************************************/ + +#define CCWBK_SIZE sizeof(struct ccwbk) + + diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c new file mode 100644 index 000000000000..2c86bfa11b2f --- /dev/null +++ b/drivers/s390/net/ctcdbug.c @@ -0,0 +1,83 @@ +/* + * + * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.4 $) + * + * CTC / ESCON network driver - s390 dbf exploit. + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Original Code written by + * Peter Tiedemann (ptiedem@de.ibm.com) + * + * $Revision: 1.4 $ $Date: 2004/08/04 10:11:59 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include "ctcdbug.h" + +/** + * Debug Facility Stuff + */ +debug_info_t *ctc_dbf_setup = NULL; +debug_info_t *ctc_dbf_data = NULL; +debug_info_t *ctc_dbf_trace = NULL; + +DEFINE_PER_CPU(char[256], ctc_dbf_txt_buf); + +void +ctc_unregister_dbf_views(void) +{ + if (ctc_dbf_setup) + debug_unregister(ctc_dbf_setup); + if (ctc_dbf_data) + debug_unregister(ctc_dbf_data); + if (ctc_dbf_trace) + debug_unregister(ctc_dbf_trace); +} +int +ctc_register_dbf_views(void) +{ + ctc_dbf_setup = debug_register(CTC_DBF_SETUP_NAME, + CTC_DBF_SETUP_INDEX, + CTC_DBF_SETUP_NR_AREAS, + CTC_DBF_SETUP_LEN); + ctc_dbf_data = debug_register(CTC_DBF_DATA_NAME, + CTC_DBF_DATA_INDEX, + CTC_DBF_DATA_NR_AREAS, + CTC_DBF_DATA_LEN); + ctc_dbf_trace = debug_register(CTC_DBF_TRACE_NAME, + CTC_DBF_TRACE_INDEX, + CTC_DBF_TRACE_NR_AREAS, + CTC_DBF_TRACE_LEN); + + if ((ctc_dbf_setup == NULL) || (ctc_dbf_data == NULL) || + (ctc_dbf_trace == NULL)) { + ctc_unregister_dbf_views(); + return -ENOMEM; + } + debug_register_view(ctc_dbf_setup, &debug_hex_ascii_view); + debug_set_level(ctc_dbf_setup, CTC_DBF_SETUP_LEVEL); + + debug_register_view(ctc_dbf_data, &debug_hex_ascii_view); + debug_set_level(ctc_dbf_data, CTC_DBF_DATA_LEVEL); + + debug_register_view(ctc_dbf_trace, &debug_hex_ascii_view); + debug_set_level(ctc_dbf_trace, CTC_DBF_TRACE_LEVEL); + + return 0; +} + + diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h new file mode 100644 index 000000000000..ef8883951720 --- /dev/null +++ b/drivers/s390/net/ctcdbug.h @@ -0,0 +1,123 @@ +/* + * + * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.4 $) + * + * CTC / ESCON network driver - s390 dbf exploit. + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Original Code written by + * Peter Tiedemann (ptiedem@de.ibm.com) + * + * $Revision: 1.4 $ $Date: 2004/10/15 09:26:58 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include <asm/debug.h> +/** + * Debug Facility stuff + */ +#define CTC_DBF_SETUP_NAME "ctc_setup" +#define CTC_DBF_SETUP_LEN 16 +#define CTC_DBF_SETUP_INDEX 3 +#define CTC_DBF_SETUP_NR_AREAS 1 +#define CTC_DBF_SETUP_LEVEL 3 + +#define CTC_DBF_DATA_NAME "ctc_data" +#define CTC_DBF_DATA_LEN 128 +#define CTC_DBF_DATA_INDEX 3 +#define CTC_DBF_DATA_NR_AREAS 1 +#define CTC_DBF_DATA_LEVEL 2 + +#define CTC_DBF_TRACE_NAME "ctc_trace" +#define CTC_DBF_TRACE_LEN 16 +#define CTC_DBF_TRACE_INDEX 2 +#define CTC_DBF_TRACE_NR_AREAS 2 +#define CTC_DBF_TRACE_LEVEL 3 + +#define DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(ctc_dbf_##name,level,text); \ + } while (0) + +#define DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(ctc_dbf_##name,level,(void*)(addr),len); \ + } while (0) + +DECLARE_PER_CPU(char[256], ctc_dbf_txt_buf); +extern debug_info_t *ctc_dbf_setup; +extern debug_info_t *ctc_dbf_data; +extern debug_info_t *ctc_dbf_trace; + + +#define DBF_TEXT_(name,level,text...) \ + do { \ + char* ctc_dbf_txt_buf = get_cpu_var(ctc_dbf_txt_buf); \ + sprintf(ctc_dbf_txt_buf, text); \ + debug_text_event(ctc_dbf_##name,level,ctc_dbf_txt_buf); \ + put_cpu_var(ctc_dbf_txt_buf); \ + } while (0) + +#define DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(ctc_dbf_trace, level, ##text ); \ + debug_sprintf_event(ctc_dbf_trace, level, text ); \ + } while (0) + + +int ctc_register_dbf_views(void); + +void ctc_unregister_dbf_views(void); + +/** + * some more debug stuff + */ + +#define HEXDUMP16(importance,header,ptr) \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ + *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ + *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ + *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ + *(((char*)ptr)+12),*(((char*)ptr)+13), \ + *(((char*)ptr)+14),*(((char*)ptr)+15)); \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)+16),*(((char*)ptr)+17), \ + *(((char*)ptr)+18),*(((char*)ptr)+19), \ + *(((char*)ptr)+20),*(((char*)ptr)+21), \ + *(((char*)ptr)+22),*(((char*)ptr)+23), \ + *(((char*)ptr)+24),*(((char*)ptr)+25), \ + *(((char*)ptr)+26),*(((char*)ptr)+27), \ + *(((char*)ptr)+28),*(((char*)ptr)+29), \ + *(((char*)ptr)+30),*(((char*)ptr)+31)); + +static inline void +hex_dump(unsigned char *buf, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + printk("\n"); + printk("%02x ", *(buf + i)); + } + printk("\n"); +} + diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c new file mode 100644 index 000000000000..7266bf5ea659 --- /dev/null +++ b/drivers/s390/net/ctcmain.c @@ -0,0 +1,3304 @@ +/* + * $Id: ctcmain.c,v 1.72 2005/03/17 10:51:52 ptiedem Exp $ + * + * CTC / ESCON network driver + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * Fixes by : Jochen Röhrig (roehrig@de.ibm.com) + * Arnaldo Carvalho de Melo <acme@conectiva.com.br> + Peter Tiedemann (ptiedem@de.ibm.com) + * Driver Model stuff by : Cornelia Huck <cohuck@de.ibm.com> + * + * Documentation used: + * - Principles of Operation (IBM doc#: SA22-7201-06) + * - Common IO/-Device Commands and Self Description (IBM doc#: SA22-7204-02) + * - Common IO/-Device Commands and Self Description (IBM doc#: SN22-5535) + * - ESCON Channel-to-Channel Adapter (IBM doc#: SA22-7203-00) + * - ESCON I/O Interface (IBM doc#: SA22-7202-029 + * + * and the source of the original CTC driver by: + * Dieter Wellerdiek (wel@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Jochen Röhrig (roehrig@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * RELEASE-TAG: CTC/ESCON network driver $Revision: 1.72 $ + * + */ + +#undef DEBUG + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <asm/io.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <asm/uaccess.h> + +#include <asm/idals.h> + +#include "ctctty.h" +#include "fsm.h" +#include "cu3088.h" +#include "ctcdbug.h" + +MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver"); +MODULE_LICENSE("GPL"); + +/** + * CCW commands, used in this driver. + */ +#define CCW_CMD_WRITE 0x01 +#define CCW_CMD_READ 0x02 +#define CCW_CMD_SET_EXTENDED 0xc3 +#define CCW_CMD_PREPARE 0xe3 + +#define CTC_PROTO_S390 0 +#define CTC_PROTO_LINUX 1 +#define CTC_PROTO_LINUX_TTY 2 +#define CTC_PROTO_OS390 3 +#define CTC_PROTO_MAX 3 + +#define CTC_BUFSIZE_LIMIT 65535 +#define CTC_BUFSIZE_DEFAULT 32768 + +#define CTC_TIMEOUT_5SEC 5000 + +#define CTC_INITIAL_BLOCKLEN 2 + +#define READ 0 +#define WRITE 1 + +#define CTC_ID_SIZE BUS_ID_SIZE+3 + + +struct ctc_profile { + unsigned long maxmulti; + unsigned long maxcqueue; + unsigned long doios_single; + unsigned long doios_multi; + unsigned long txlen; + unsigned long tx_time; + struct timespec send_stamp; +}; + +/** + * Definition of one channel + */ +struct channel { + + /** + * Pointer to next channel in list. + */ + struct channel *next; + char id[CTC_ID_SIZE]; + struct ccw_device *cdev; + + /** + * Type of this channel. + * CTC/A or Escon for valid channels. + */ + enum channel_types type; + + /** + * Misc. flags. See CHANNEL_FLAGS_... below + */ + __u32 flags; + + /** + * The protocol of this channel + */ + __u16 protocol; + + /** + * I/O and irq related stuff + */ + struct ccw1 *ccw; + struct irb *irb; + + /** + * RX/TX buffer size + */ + int max_bufsize; + + /** + * Transmit/Receive buffer. + */ + struct sk_buff *trans_skb; + + /** + * Universal I/O queue. + */ + struct sk_buff_head io_queue; + + /** + * TX queue for collecting skb's during busy. + */ + struct sk_buff_head collect_queue; + + /** + * Amount of data in collect_queue. + */ + int collect_len; + + /** + * spinlock for collect_queue and collect_len + */ + spinlock_t collect_lock; + + /** + * Timer for detecting unresposive + * I/O operations. + */ + fsm_timer timer; + + /** + * Retry counter for misc. operations. + */ + int retry; + + /** + * The finite state machine of this channel + */ + fsm_instance *fsm; + + /** + * The corresponding net_device this channel + * belongs to. + */ + struct net_device *netdev; + + struct ctc_profile prof; + + unsigned char *trans_skb_data; + + __u16 logflags; +}; + +#define CHANNEL_FLAGS_READ 0 +#define CHANNEL_FLAGS_WRITE 1 +#define CHANNEL_FLAGS_INUSE 2 +#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4 +#define CHANNEL_FLAGS_FAILED 8 +#define CHANNEL_FLAGS_WAITIRQ 16 +#define CHANNEL_FLAGS_RWMASK 1 +#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK) + +#define LOG_FLAG_ILLEGALPKT 1 +#define LOG_FLAG_ILLEGALSIZE 2 +#define LOG_FLAG_OVERRUN 4 +#define LOG_FLAG_NOMEM 8 + +#define CTC_LOGLEVEL_INFO 1 +#define CTC_LOGLEVEL_NOTICE 2 +#define CTC_LOGLEVEL_WARN 4 +#define CTC_LOGLEVEL_EMERG 8 +#define CTC_LOGLEVEL_ERR 16 +#define CTC_LOGLEVEL_DEBUG 32 +#define CTC_LOGLEVEL_CRIT 64 + +#define CTC_LOGLEVEL_DEFAULT \ +(CTC_LOGLEVEL_INFO | CTC_LOGLEVEL_NOTICE | CTC_LOGLEVEL_WARN | CTC_LOGLEVEL_CRIT) + +#define CTC_LOGLEVEL_MAX ((CTC_LOGLEVEL_CRIT<<1)-1) + +static int loglevel = CTC_LOGLEVEL_DEFAULT; + +#define ctc_pr_debug(fmt, arg...) \ +do { if (loglevel & CTC_LOGLEVEL_DEBUG) printk(KERN_DEBUG fmt,##arg); } while (0) + +#define ctc_pr_info(fmt, arg...) \ +do { if (loglevel & CTC_LOGLEVEL_INFO) printk(KERN_INFO fmt,##arg); } while (0) + +#define ctc_pr_notice(fmt, arg...) \ +do { if (loglevel & CTC_LOGLEVEL_NOTICE) printk(KERN_NOTICE fmt,##arg); } while (0) + +#define ctc_pr_warn(fmt, arg...) \ +do { if (loglevel & CTC_LOGLEVEL_WARN) printk(KERN_WARNING fmt,##arg); } while (0) + +#define ctc_pr_emerg(fmt, arg...) \ +do { if (loglevel & CTC_LOGLEVEL_EMERG) printk(KERN_EMERG fmt,##arg); } while (0) + +#define ctc_pr_err(fmt, arg...) \ +do { if (loglevel & CTC_LOGLEVEL_ERR) printk(KERN_ERR fmt,##arg); } while (0) + +#define ctc_pr_crit(fmt, arg...) \ +do { if (loglevel & CTC_LOGLEVEL_CRIT) printk(KERN_CRIT fmt,##arg); } while (0) + +/** + * Linked list of all detected channels. + */ +static struct channel *channels = NULL; + +struct ctc_priv { + struct net_device_stats stats; + unsigned long tbusy; + /** + * The finite state machine of this interface. + */ + fsm_instance *fsm; + /** + * The protocol of this device + */ + __u16 protocol; + /** + * Timer for restarting after I/O Errors + */ + fsm_timer restart_timer; + + int buffer_size; + + struct channel *channel[2]; +}; + +/** + * Definition of our link level header. + */ +struct ll_header { + __u16 length; + __u16 type; + __u16 unused; +}; +#define LL_HEADER_LENGTH (sizeof(struct ll_header)) + +/** + * Compatibility macros for busy handling + * of network devices. + */ +static __inline__ void +ctc_clear_busy(struct net_device * dev) +{ + clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy)); + if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY) + netif_wake_queue(dev); +} + +static __inline__ int +ctc_test_and_set_busy(struct net_device * dev) +{ + if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY) + netif_stop_queue(dev); + return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy); +} + +/** + * Print Banner. + */ +static void +print_banner(void) +{ + static int printed = 0; + char vbuf[] = "$Revision: 1.72 $"; + char *version = vbuf; + + if (printed) + return; + if ((version = strchr(version, ':'))) { + char *p = strchr(version + 1, '$'); + if (p) + *p = '\0'; + } else + version = " ??? "; + printk(KERN_INFO "CTC driver Version%s" +#ifdef DEBUG + " (DEBUG-VERSION, " __DATE__ __TIME__ ")" +#endif + " initialized\n", version); + printed = 1; +} + +/** + * Return type of a detected device. + */ +static enum channel_types +get_channel_type(struct ccw_device_id *id) +{ + enum channel_types type = (enum channel_types) id->driver_info; + + if (type == channel_type_ficon) + type = channel_type_escon; + + return type; +} + +/** + * States of the interface statemachine. + */ +enum dev_states { + DEV_STATE_STOPPED, + DEV_STATE_STARTWAIT_RXTX, + DEV_STATE_STARTWAIT_RX, + DEV_STATE_STARTWAIT_TX, + DEV_STATE_STOPWAIT_RXTX, + DEV_STATE_STOPWAIT_RX, + DEV_STATE_STOPWAIT_TX, + DEV_STATE_RUNNING, + /** + * MUST be always the last element!! + */ + NR_DEV_STATES +}; + +static const char *dev_state_names[] = { + "Stopped", + "StartWait RXTX", + "StartWait RX", + "StartWait TX", + "StopWait RXTX", + "StopWait RX", + "StopWait TX", + "Running", +}; + +/** + * Events of the interface statemachine. + */ +enum dev_events { + DEV_EVENT_START, + DEV_EVENT_STOP, + DEV_EVENT_RXUP, + DEV_EVENT_TXUP, + DEV_EVENT_RXDOWN, + DEV_EVENT_TXDOWN, + DEV_EVENT_RESTART, + /** + * MUST be always the last element!! + */ + NR_DEV_EVENTS +}; + +static const char *dev_event_names[] = { + "Start", + "Stop", + "RX up", + "TX up", + "RX down", + "TX down", + "Restart", +}; + +/** + * Events of the channel statemachine + */ +enum ch_events { + /** + * Events, representing return code of + * I/O operations (ccw_device_start, ccw_device_halt et al.) + */ + CH_EVENT_IO_SUCCESS, + CH_EVENT_IO_EBUSY, + CH_EVENT_IO_ENODEV, + CH_EVENT_IO_EIO, + CH_EVENT_IO_UNKNOWN, + + CH_EVENT_ATTNBUSY, + CH_EVENT_ATTN, + CH_EVENT_BUSY, + + /** + * Events, representing unit-check + */ + CH_EVENT_UC_RCRESET, + CH_EVENT_UC_RSRESET, + CH_EVENT_UC_TXTIMEOUT, + CH_EVENT_UC_TXPARITY, + CH_EVENT_UC_HWFAIL, + CH_EVENT_UC_RXPARITY, + CH_EVENT_UC_ZERO, + CH_EVENT_UC_UNKNOWN, + + /** + * Events, representing subchannel-check + */ + CH_EVENT_SC_UNKNOWN, + + /** + * Events, representing machine checks + */ + CH_EVENT_MC_FAIL, + CH_EVENT_MC_GOOD, + + /** + * Event, representing normal IRQ + */ + CH_EVENT_IRQ, + CH_EVENT_FINSTAT, + + /** + * Event, representing timer expiry. + */ + CH_EVENT_TIMER, + + /** + * Events, representing commands from upper levels. + */ + CH_EVENT_START, + CH_EVENT_STOP, + + /** + * MUST be always the last element!! + */ + NR_CH_EVENTS, +}; + +static const char *ch_event_names[] = { + "ccw_device success", + "ccw_device busy", + "ccw_device enodev", + "ccw_device ioerr", + "ccw_device unknown", + + "Status ATTN & BUSY", + "Status ATTN", + "Status BUSY", + + "Unit check remote reset", + "Unit check remote system reset", + "Unit check TX timeout", + "Unit check TX parity", + "Unit check Hardware failure", + "Unit check RX parity", + "Unit check ZERO", + "Unit check Unknown", + + "SubChannel check Unknown", + + "Machine check failure", + "Machine check operational", + + "IRQ normal", + "IRQ final", + + "Timer", + + "Start", + "Stop", +}; + +/** + * States of the channel statemachine. + */ +enum ch_states { + /** + * Channel not assigned to any device, + * initial state, direction invalid + */ + CH_STATE_IDLE, + + /** + * Channel assigned but not operating + */ + CH_STATE_STOPPED, + CH_STATE_STARTWAIT, + CH_STATE_STARTRETRY, + CH_STATE_SETUPWAIT, + CH_STATE_RXINIT, + CH_STATE_TXINIT, + CH_STATE_RX, + CH_STATE_TX, + CH_STATE_RXIDLE, + CH_STATE_TXIDLE, + CH_STATE_RXERR, + CH_STATE_TXERR, + CH_STATE_TERM, + CH_STATE_DTERM, + CH_STATE_NOTOP, + + /** + * MUST be always the last element!! + */ + NR_CH_STATES, +}; + +static const char *ch_state_names[] = { + "Idle", + "Stopped", + "StartWait", + "StartRetry", + "SetupWait", + "RX init", + "TX init", + "RX", + "TX", + "RX idle", + "TX idle", + "RX error", + "TX error", + "Terminating", + "Restarting", + "Not operational", +}; + +#ifdef DEBUG +/** + * Dump header and first 16 bytes of an sk_buff for debugging purposes. + * + * @param skb The sk_buff to dump. + * @param offset Offset relative to skb-data, where to start the dump. + */ +static void +ctc_dump_skb(struct sk_buff *skb, int offset) +{ + unsigned char *p = skb->data; + __u16 bl; + struct ll_header *header; + int i; + + if (!(loglevel & CTC_LOGLEVEL_DEBUG)) + return; + p += offset; + bl = *((__u16 *) p); + p += 2; + header = (struct ll_header *) p; + p -= 2; + + printk(KERN_DEBUG "dump:\n"); + printk(KERN_DEBUG "blocklen=%d %04x\n", bl, bl); + + printk(KERN_DEBUG "h->length=%d %04x\n", header->length, + header->length); + printk(KERN_DEBUG "h->type=%04x\n", header->type); + printk(KERN_DEBUG "h->unused=%04x\n", header->unused); + if (bl > 16) + bl = 16; + printk(KERN_DEBUG "data: "); + for (i = 0; i < bl; i++) + printk("%02x%s", *p++, (i % 16) ? " " : "\n<7>"); + printk("\n"); +} +#else +static inline void +ctc_dump_skb(struct sk_buff *skb, int offset) +{ +} +#endif + +/** + * Unpack a just received skb and hand it over to + * upper layers. + * + * @param ch The channel where this skb has been received. + * @param pskb The received skb. + */ +static __inline__ void +ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) +{ + struct net_device *dev = ch->netdev; + struct ctc_priv *privptr = (struct ctc_priv *) dev->priv; + __u16 len = *((__u16 *) pskb->data); + + DBF_TEXT(trace, 4, __FUNCTION__); + skb_put(pskb, 2 + LL_HEADER_LENGTH); + skb_pull(pskb, 2); + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + while (len > 0) { + struct sk_buff *skb; + struct ll_header *header = (struct ll_header *) pskb->data; + + skb_pull(pskb, LL_HEADER_LENGTH); + if ((ch->protocol == CTC_PROTO_S390) && + (header->type != ETH_P_IP)) { + +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { +#endif + /** + * Check packet type only if we stick strictly + * to S/390's protocol of OS390. This only + * supports IP. Otherwise allow any packet + * type. + */ + ctc_pr_warn( + "%s Illegal packet type 0x%04x received, dropping\n", + dev->name, header->type); + ch->logflags |= LOG_FLAG_ILLEGALPKT; +#ifndef DEBUG + } +#endif +#ifdef DEBUG + ctc_dump_skb(pskb, -6); +#endif + privptr->stats.rx_dropped++; + privptr->stats.rx_frame_errors++; + return; + } + pskb->protocol = ntohs(header->type); + if (header->length <= LL_HEADER_LENGTH) { +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { +#endif + ctc_pr_warn( + "%s Illegal packet size %d " + "received (MTU=%d blocklen=%d), " + "dropping\n", dev->name, header->length, + dev->mtu, len); + ch->logflags |= LOG_FLAG_ILLEGALSIZE; +#ifndef DEBUG + } +#endif +#ifdef DEBUG + ctc_dump_skb(pskb, -6); +#endif + privptr->stats.rx_dropped++; + privptr->stats.rx_length_errors++; + return; + } + header->length -= LL_HEADER_LENGTH; + len -= LL_HEADER_LENGTH; + if ((header->length > skb_tailroom(pskb)) || + (header->length > len)) { +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_OVERRUN)) { +#endif + ctc_pr_warn( + "%s Illegal packet size %d " + "(beyond the end of received data), " + "dropping\n", dev->name, header->length); + ch->logflags |= LOG_FLAG_OVERRUN; +#ifndef DEBUG + } +#endif +#ifdef DEBUG + ctc_dump_skb(pskb, -6); +#endif + privptr->stats.rx_dropped++; + privptr->stats.rx_length_errors++; + return; + } + skb_put(pskb, header->length); + pskb->mac.raw = pskb->data; + len -= header->length; + skb = dev_alloc_skb(pskb->len); + if (!skb) { +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_NOMEM)) { +#endif + ctc_pr_warn( + "%s Out of memory in ctc_unpack_skb\n", + dev->name); + ch->logflags |= LOG_FLAG_NOMEM; +#ifndef DEBUG + } +#endif + privptr->stats.rx_dropped++; + return; + } + memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); + skb->mac.raw = skb->data; + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + if (ch->protocol == CTC_PROTO_LINUX_TTY) + ctc_tty_netif_rx(skb); + else + netif_rx_ni(skb); + /** + * Successful rx; reset logflags + */ + ch->logflags = 0; + dev->last_rx = jiffies; + privptr->stats.rx_packets++; + privptr->stats.rx_bytes += skb->len; + if (len > 0) { + skb_pull(pskb, header->length); + if (skb_tailroom(pskb) < LL_HEADER_LENGTH) { +#ifndef DEBUG + if (!(ch->logflags & LOG_FLAG_OVERRUN)) { +#endif + ctc_pr_warn( + "%s Overrun in ctc_unpack_skb\n", + dev->name); + ch->logflags |= LOG_FLAG_OVERRUN; +#ifndef DEBUG + } +#endif + return; + } + skb_put(pskb, LL_HEADER_LENGTH); + } + } +} + +/** + * Check return code of a preceeding ccw_device call, halt_IO etc... + * + * @param ch The channel, the error belongs to. + * @param return_code The error code to inspect. + */ +static void inline +ccw_check_return_code(struct channel *ch, int return_code, char *msg) +{ + DBF_TEXT(trace, 5, __FUNCTION__); + switch (return_code) { + case 0: + fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch); + break; + case -EBUSY: + ctc_pr_warn("%s (%s): Busy !\n", ch->id, msg); + fsm_event(ch->fsm, CH_EVENT_IO_EBUSY, ch); + break; + case -ENODEV: + ctc_pr_emerg("%s (%s): Invalid device called for IO\n", + ch->id, msg); + fsm_event(ch->fsm, CH_EVENT_IO_ENODEV, ch); + break; + case -EIO: + ctc_pr_emerg("%s (%s): Status pending... \n", + ch->id, msg); + fsm_event(ch->fsm, CH_EVENT_IO_EIO, ch); + break; + default: + ctc_pr_emerg("%s (%s): Unknown error in do_IO %04x\n", + ch->id, msg, return_code); + fsm_event(ch->fsm, CH_EVENT_IO_UNKNOWN, ch); + } +} + +/** + * Check sense of a unit check. + * + * @param ch The channel, the sense code belongs to. + * @param sense The sense code to inspect. + */ +static void inline +ccw_unit_check(struct channel *ch, unsigned char sense) +{ + DBF_TEXT(trace, 5, __FUNCTION__); + if (sense & SNS0_INTERVENTION_REQ) { + if (sense & 0x01) { + if (ch->protocol != CTC_PROTO_LINUX_TTY) + ctc_pr_debug("%s: Interface disc. or Sel. reset " + "(remote)\n", ch->id); + fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch); + } else { + ctc_pr_debug("%s: System reset (remote)\n", ch->id); + fsm_event(ch->fsm, CH_EVENT_UC_RSRESET, ch); + } + } else if (sense & SNS0_EQUIPMENT_CHECK) { + if (sense & SNS0_BUS_OUT_CHECK) { + ctc_pr_warn("%s: Hardware malfunction (remote)\n", + ch->id); + fsm_event(ch->fsm, CH_EVENT_UC_HWFAIL, ch); + } else { + ctc_pr_warn("%s: Read-data parity error (remote)\n", + ch->id); + fsm_event(ch->fsm, CH_EVENT_UC_RXPARITY, ch); + } + } else if (sense & SNS0_BUS_OUT_CHECK) { + if (sense & 0x04) { + ctc_pr_warn("%s: Data-streaming timeout)\n", ch->id); + fsm_event(ch->fsm, CH_EVENT_UC_TXTIMEOUT, ch); + } else { + ctc_pr_warn("%s: Data-transfer parity error\n", ch->id); + fsm_event(ch->fsm, CH_EVENT_UC_TXPARITY, ch); + } + } else if (sense & SNS0_CMD_REJECT) { + ctc_pr_warn("%s: Command reject\n", ch->id); + } else if (sense == 0) { + ctc_pr_debug("%s: Unit check ZERO\n", ch->id); + fsm_event(ch->fsm, CH_EVENT_UC_ZERO, ch); + } else { + ctc_pr_warn("%s: Unit Check with sense code: %02x\n", + ch->id, sense); + fsm_event(ch->fsm, CH_EVENT_UC_UNKNOWN, ch); + } +} + +static void +ctc_purge_skb_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + DBF_TEXT(trace, 5, __FUNCTION__); + + while ((skb = skb_dequeue(q))) { + atomic_dec(&skb->users); + dev_kfree_skb_irq(skb); + } +} + +static __inline__ int +ctc_checkalloc_buffer(struct channel *ch, int warn) +{ + DBF_TEXT(trace, 5, __FUNCTION__); + if ((ch->trans_skb == NULL) || + (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) { + if (ch->trans_skb != NULL) + dev_kfree_skb(ch->trans_skb); + clear_normalized_cda(&ch->ccw[1]); + ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, + GFP_ATOMIC | GFP_DMA); + if (ch->trans_skb == NULL) { + if (warn) + ctc_pr_warn( + "%s: Couldn't alloc %s trans_skb\n", + ch->id, + (CHANNEL_DIRECTION(ch->flags) == READ) ? + "RX" : "TX"); + return -ENOMEM; + } + ch->ccw[1].count = ch->max_bufsize; + if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { + dev_kfree_skb(ch->trans_skb); + ch->trans_skb = NULL; + if (warn) + ctc_pr_warn( + "%s: set_normalized_cda for %s " + "trans_skb failed, dropping packets\n", + ch->id, + (CHANNEL_DIRECTION(ch->flags) == READ) ? + "RX" : "TX"); + return -ENOMEM; + } + ch->ccw[1].count = 0; + ch->trans_skb_data = ch->trans_skb->data; + ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED; + } + return 0; +} + +/** + * Dummy NOP action for statemachines + */ +static void +fsm_action_nop(fsm_instance * fi, int event, void *arg) +{ +} + +/** + * Actions for channel - statemachines. + *****************************************************************************/ + +/** + * Normal data has been send. Free the corresponding + * skb (it's in io_queue), reset dev->tbusy and + * revert to idle state. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_txdone(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + struct ctc_priv *privptr = dev->priv; + struct sk_buff *skb; + int first = 1; + int i; + unsigned long duration; + struct timespec done_stamp = xtime; + + DBF_TEXT(trace, 4, __FUNCTION__); + + duration = + (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + + (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; + if (duration > ch->prof.tx_time) + ch->prof.tx_time = duration; + + if (ch->irb->scsw.count != 0) + ctc_pr_debug("%s: TX not complete, remaining %d bytes\n", + dev->name, ch->irb->scsw.count); + fsm_deltimer(&ch->timer); + while ((skb = skb_dequeue(&ch->io_queue))) { + privptr->stats.tx_packets++; + privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + if (first) { + privptr->stats.tx_bytes += 2; + first = 0; + } + atomic_dec(&skb->users); + dev_kfree_skb_irq(skb); + } + spin_lock(&ch->collect_lock); + clear_normalized_cda(&ch->ccw[4]); + if (ch->collect_len > 0) { + int rc; + + if (ctc_checkalloc_buffer(ch, 1)) { + spin_unlock(&ch->collect_lock); + return; + } + ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data; + ch->trans_skb->len = 0; + if (ch->prof.maxmulti < (ch->collect_len + 2)) + ch->prof.maxmulti = ch->collect_len + 2; + if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) + ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); + *((__u16 *) skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; + i = 0; + while ((skb = skb_dequeue(&ch->collect_queue))) { + memcpy(skb_put(ch->trans_skb, skb->len), skb->data, + skb->len); + privptr->stats.tx_packets++; + privptr->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + atomic_dec(&skb->users); + dev_kfree_skb_irq(skb); + i++; + } + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + ch->ccw[1].count = ch->trans_skb->len; + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch); + ch->prof.send_stamp = xtime; + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long) ch, 0xff, 0); + ch->prof.doios_multi++; + if (rc != 0) { + privptr->stats.tx_dropped += i; + privptr->stats.tx_errors += i; + fsm_deltimer(&ch->timer); + ccw_check_return_code(ch, rc, "chained TX"); + } + } else { + spin_unlock(&ch->collect_lock); + fsm_newstate(fi, CH_STATE_TXIDLE); + } + ctc_clear_busy(dev); +} + +/** + * Initial data is sent. + * Notify device statemachine that we are up and + * running. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_txidle(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + + DBF_TEXT(trace, 4, __FUNCTION__); + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CH_STATE_TXIDLE); + fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP, + ch->netdev); +} + +/** + * Got normal data, check for sanity, queue it up, allocate new buffer + * trigger bottom half, and initiate next read. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_rx(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + struct ctc_priv *privptr = dev->priv; + int len = ch->max_bufsize - ch->irb->scsw.count; + struct sk_buff *skb = ch->trans_skb; + __u16 block_len = *((__u16 *) skb->data); + int check_len; + int rc; + + DBF_TEXT(trace, 4, __FUNCTION__); + fsm_deltimer(&ch->timer); + if (len < 8) { + ctc_pr_debug("%s: got packet with length %d < 8\n", + dev->name, len); + privptr->stats.rx_dropped++; + privptr->stats.rx_length_errors++; + goto again; + } + if (len > ch->max_bufsize) { + ctc_pr_debug("%s: got packet with length %d > %d\n", + dev->name, len, ch->max_bufsize); + privptr->stats.rx_dropped++; + privptr->stats.rx_length_errors++; + goto again; + } + + /** + * VM TCP seems to have a bug sending 2 trailing bytes of garbage. + */ + switch (ch->protocol) { + case CTC_PROTO_S390: + case CTC_PROTO_OS390: + check_len = block_len + 2; + break; + default: + check_len = block_len; + break; + } + if ((len < block_len) || (len > check_len)) { + ctc_pr_debug("%s: got block length %d != rx length %d\n", + dev->name, block_len, len); +#ifdef DEBUG + ctc_dump_skb(skb, 0); +#endif + *((__u16 *) skb->data) = len; + privptr->stats.rx_dropped++; + privptr->stats.rx_length_errors++; + goto again; + } + block_len -= 2; + if (block_len > 0) { + *((__u16 *) skb->data) = block_len; + ctc_unpack_skb(ch, skb); + } + again: + skb->data = skb->tail = ch->trans_skb_data; + skb->len = 0; + if (ctc_checkalloc_buffer(ch, 1)) + return; + ch->ccw[1].count = ch->max_bufsize; + rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0); + if (rc != 0) + ccw_check_return_code(ch, rc, "normal RX"); +} + +static void ch_action_rxidle(fsm_instance * fi, int event, void *arg); + +/** + * Initialize connection by sending a __u16 of value 0. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_firstio(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + int rc; + + DBF_TEXT(trace, 4, __FUNCTION__); + + if (fsm_getstate(fi) == CH_STATE_TXIDLE) + ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id); + fsm_deltimer(&ch->timer); + if (ctc_checkalloc_buffer(ch, 1)) + return; + if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) && + (ch->protocol == CTC_PROTO_OS390)) { + /* OS/390 resp. z/OS */ + if (CHANNEL_DIRECTION(ch->flags) == READ) { + *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN; + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, + CH_EVENT_TIMER, ch); + ch_action_rxidle(fi, event, arg); + } else { + struct net_device *dev = ch->netdev; + fsm_newstate(fi, CH_STATE_TXIDLE); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXUP, dev); + } + return; + } + + /** + * Don´t setup a timer for receiving the initial RX frame + * if in compatibility mode, since VM TCP delays the initial + * frame until it has some data to send. + */ + if ((CHANNEL_DIRECTION(ch->flags) == WRITE) || + (ch->protocol != CTC_PROTO_S390)) + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch); + + *((__u16 *) ch->trans_skb->data) = CTC_INITIAL_BLOCKLEN; + ch->ccw[1].count = 2; /* Transfer only length */ + + fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) + ? CH_STATE_RXINIT : CH_STATE_TXINIT); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], (unsigned long) ch, 0xff, 0); + if (rc != 0) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CH_STATE_SETUPWAIT); + ccw_check_return_code(ch, rc, "init IO"); + } + /** + * If in compatibility mode since we don´t setup a timer, we + * also signal RX channel up immediately. This enables us + * to send packets early which in turn usually triggers some + * reply from VM TCP which brings up the RX channel to it´s + * final state. + */ + if ((CHANNEL_DIRECTION(ch->flags) == READ) && + (ch->protocol == CTC_PROTO_S390)) { + struct net_device *dev = ch->netdev; + fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXUP, + dev); + } +} + +/** + * Got initial data, check it. If OK, + * notify device statemachine that we are up and + * running. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_rxidle(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + __u16 buflen; + int rc; + + DBF_TEXT(trace, 4, __FUNCTION__); + fsm_deltimer(&ch->timer); + buflen = *((__u16 *) ch->trans_skb->data); +#ifdef DEBUG + ctc_pr_debug("%s: Initial RX count %d\n", dev->name, buflen); +#endif + if (buflen >= CTC_INITIAL_BLOCKLEN) { + if (ctc_checkalloc_buffer(ch, 1)) + return; + ch->ccw[1].count = ch->max_bufsize; + fsm_newstate(fi, CH_STATE_RXIDLE); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long) ch, 0xff, 0); + if (rc != 0) { + fsm_newstate(fi, CH_STATE_RXINIT); + ccw_check_return_code(ch, rc, "initial RX"); + } else + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_RXUP, dev); + } else { + ctc_pr_debug("%s: Initial RX count %d not %d\n", + dev->name, buflen, CTC_INITIAL_BLOCKLEN); + ch_action_firstio(fi, event, arg); + } +} + +/** + * Set channel into extended mode. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_setmode(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + int rc; + unsigned long saveflags; + + DBF_TEXT(trace, 4, __FUNCTION__); + fsm_deltimer(&ch->timer); + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch); + fsm_newstate(fi, CH_STATE_SETUPWAIT); + saveflags = 0; /* avoids compiler warning with + spin_unlock_irqrestore */ + if (event == CH_EVENT_TIMER) // only for timer not yet locked + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[6], (unsigned long) ch, 0xff, 0); + if (event == CH_EVENT_TIMER) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CH_STATE_STARTWAIT); + ccw_check_return_code(ch, rc, "set Mode"); + } else + ch->retry = 0; +} + +/** + * Setup channel. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_start(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + unsigned long saveflags; + int rc; + struct net_device *dev; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ch == NULL) { + ctc_pr_warn("ch_action_start ch=NULL\n"); + return; + } + if (ch->netdev == NULL) { + ctc_pr_warn("ch_action_start dev=NULL, id=%s\n", ch->id); + return; + } + dev = ch->netdev; + +#ifdef DEBUG + ctc_pr_debug("%s: %s channel start\n", dev->name, + (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); +#endif + + if (ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb(ch->trans_skb); + ch->trans_skb = NULL; + } + if (CHANNEL_DIRECTION(ch->flags) == READ) { + ch->ccw[1].cmd_code = CCW_CMD_READ; + ch->ccw[1].flags = CCW_FLAG_SLI; + ch->ccw[1].count = 0; + } else { + ch->ccw[1].cmd_code = CCW_CMD_WRITE; + ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[1].count = 0; + } + if (ctc_checkalloc_buffer(ch, 0)) { + ctc_pr_notice( + "%s: Could not allocate %s trans_skb, delaying " + "allocation until first transfer\n", + dev->name, + (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); + } + + ch->ccw[0].cmd_code = CCW_CMD_PREPARE; + ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[0].count = 0; + ch->ccw[0].cda = 0; + ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ + ch->ccw[2].flags = CCW_FLAG_SLI; + ch->ccw[2].count = 0; + ch->ccw[2].cda = 0; + memcpy(&ch->ccw[3], &ch->ccw[0], sizeof (struct ccw1) * 3); + ch->ccw[4].cda = 0; + ch->ccw[4].flags &= ~CCW_FLAG_IDA; + + fsm_newstate(fi, CH_STATE_STARTWAIT); + fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch); + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_halt(ch->cdev, (unsigned long) ch); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + if (rc != -EBUSY) + fsm_deltimer(&ch->timer); + ccw_check_return_code(ch, rc, "initial HaltIO"); + } +#ifdef DEBUG + ctc_pr_debug("ctc: %s(): leaving\n", __func__); +#endif +} + +/** + * Shutdown a channel. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_haltio(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + unsigned long saveflags; + int rc; + int oldstate; + + DBF_TEXT(trace, 3, __FUNCTION__); + fsm_deltimer(&ch->timer); + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch); + saveflags = 0; /* avoids comp warning with + spin_unlock_irqrestore */ + if (event == CH_EVENT_STOP) // only for STOP not yet locked + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + oldstate = fsm_getstate(fi); + fsm_newstate(fi, CH_STATE_TERM); + rc = ccw_device_halt(ch->cdev, (unsigned long) ch); + if (event == CH_EVENT_STOP) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + if (rc != -EBUSY) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, oldstate); + } + ccw_check_return_code(ch, rc, "HaltIO in ch_action_haltio"); + } +} + +/** + * A channel has successfully been halted. + * Cleanup it's queue and notify interface statemachine. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_stopped(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(trace, 3, __FUNCTION__); + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CH_STATE_STOPPED); + if (ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb(ch->trans_skb); + ch->trans_skb = NULL; + } + if (CHANNEL_DIRECTION(ch->flags) == READ) { + skb_queue_purge(&ch->io_queue); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_RXDOWN, dev); + } else { + ctc_purge_skb_queue(&ch->io_queue); + spin_lock(&ch->collect_lock); + ctc_purge_skb_queue(&ch->collect_queue); + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXDOWN, dev); + } +} + +/** + * A stop command from device statemachine arrived and we are in + * not operational mode. Set state to stopped. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_stop(fsm_instance * fi, int event, void *arg) +{ + fsm_newstate(fi, CH_STATE_STOPPED); +} + +/** + * A machine check for no path, not operational status or gone device has + * happened. + * Cleanup queue and notify interface statemachine. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_fail(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(trace, 3, __FUNCTION__); + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CH_STATE_NOTOP); + if (CHANNEL_DIRECTION(ch->flags) == READ) { + skb_queue_purge(&ch->io_queue); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_RXDOWN, dev); + } else { + ctc_purge_skb_queue(&ch->io_queue); + spin_lock(&ch->collect_lock); + ctc_purge_skb_queue(&ch->collect_queue); + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXDOWN, dev); + } +} + +/** + * Handle error during setup of channel. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_setuperr(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(setup, 3, __FUNCTION__); + /** + * Special case: Got UC_RCRESET on setmode. + * This means that remote side isn't setup. In this case + * simply retry after some 10 secs... + */ + if ((fsm_getstate(fi) == CH_STATE_SETUPWAIT) && + ((event == CH_EVENT_UC_RCRESET) || + (event == CH_EVENT_UC_RSRESET))) { + fsm_newstate(fi, CH_STATE_STARTRETRY); + fsm_deltimer(&ch->timer); + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch); + if (CHANNEL_DIRECTION(ch->flags) == READ) { + int rc = ccw_device_halt(ch->cdev, (unsigned long) ch); + if (rc != 0) + ccw_check_return_code( + ch, rc, "HaltIO in ch_action_setuperr"); + } + return; + } + + ctc_pr_debug("%s: Error %s during %s channel setup state=%s\n", + dev->name, ch_event_names[event], + (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", + fsm_getstate_str(fi)); + if (CHANNEL_DIRECTION(ch->flags) == READ) { + fsm_newstate(fi, CH_STATE_RXERR); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_RXDOWN, dev); + } else { + fsm_newstate(fi, CH_STATE_TXERR); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXDOWN, dev); + } +} + +/** + * Restart a channel after an error. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_restart(fsm_instance * fi, int event, void *arg) +{ + unsigned long saveflags; + int oldstate; + int rc; + + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(trace, 3, __FUNCTION__); + fsm_deltimer(&ch->timer); + ctc_pr_debug("%s: %s channel restart\n", dev->name, + (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch); + oldstate = fsm_getstate(fi); + fsm_newstate(fi, CH_STATE_STARTWAIT); + saveflags = 0; /* avoids compiler warning with + spin_unlock_irqrestore */ + if (event == CH_EVENT_TIMER) // only for timer not yet locked + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_halt(ch->cdev, (unsigned long) ch); + if (event == CH_EVENT_TIMER) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + if (rc != -EBUSY) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, oldstate); + } + ccw_check_return_code(ch, rc, "HaltIO in ch_action_restart"); + } +} + +/** + * Handle error during RX initial handshake (exchange of + * 0-length block header) + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_rxiniterr(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(setup, 3, __FUNCTION__); + if (event == CH_EVENT_TIMER) { + fsm_deltimer(&ch->timer); + ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name); + if (ch->retry++ < 3) + ch_action_restart(fi, event, arg); + else { + fsm_newstate(fi, CH_STATE_RXERR); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_RXDOWN, dev); + } + } else + ctc_pr_warn("%s: Error during RX init handshake\n", dev->name); +} + +/** + * Notify device statemachine if we gave up initialization + * of RX channel. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_rxinitfail(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(setup, 3, __FUNCTION__); + fsm_newstate(fi, CH_STATE_RXERR); + ctc_pr_warn("%s: RX initialization failed\n", dev->name); + ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev); +} + +/** + * Handle RX Unit check remote reset (remote disconnected) + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_rxdisc(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct channel *ch2; + struct net_device *dev = ch->netdev; + + DBF_TEXT(trace, 3, __FUNCTION__); + fsm_deltimer(&ch->timer); + ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n", + dev->name); + + /** + * Notify device statemachine + */ + fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_RXDOWN, dev); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_TXDOWN, dev); + + fsm_newstate(fi, CH_STATE_DTERM); + ch2 = ((struct ctc_priv *) dev->priv)->channel[WRITE]; + fsm_newstate(ch2->fsm, CH_STATE_DTERM); + + ccw_device_halt(ch->cdev, (unsigned long) ch); + ccw_device_halt(ch2->cdev, (unsigned long) ch2); +} + +/** + * Handle error during TX channel initialization. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_txiniterr(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(setup, 2, __FUNCTION__); + if (event == CH_EVENT_TIMER) { + fsm_deltimer(&ch->timer); + ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name); + if (ch->retry++ < 3) + ch_action_restart(fi, event, arg); + else { + fsm_newstate(fi, CH_STATE_TXERR); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXDOWN, dev); + } + } else + ctc_pr_warn("%s: Error during TX init handshake\n", dev->name); +} + +/** + * Handle TX timeout by retrying operation. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_txretry(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + unsigned long saveflags; + + DBF_TEXT(trace, 4, __FUNCTION__); + fsm_deltimer(&ch->timer); + if (ch->retry++ > 3) { + ctc_pr_debug("%s: TX retry failed, restarting channel\n", + dev->name); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXDOWN, dev); + ch_action_restart(fi, event, arg); + } else { + struct sk_buff *skb; + + ctc_pr_debug("%s: TX retry %d\n", dev->name, ch->retry); + if ((skb = skb_peek(&ch->io_queue))) { + int rc = 0; + + clear_normalized_cda(&ch->ccw[4]); + ch->ccw[4].count = skb->len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + ctc_pr_debug( + "%s: IDAL alloc failed, chan restart\n", + dev->name); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXDOWN, dev); + ch_action_restart(fi, event, arg); + return; + } + fsm_addtimer(&ch->timer, 1000, CH_EVENT_TIMER, ch); + saveflags = 0; /* avoids compiler warning with + spin_unlock_irqrestore */ + if (event == CH_EVENT_TIMER) // only for TIMER not yet locked + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), + saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[3], + (unsigned long) ch, 0xff, 0); + if (event == CH_EVENT_TIMER) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), + saveflags); + if (rc != 0) { + fsm_deltimer(&ch->timer); + ccw_check_return_code(ch, rc, "TX in ch_action_txretry"); + ctc_purge_skb_queue(&ch->io_queue); + } + } + } + +} + +/** + * Handle fatal errors during an I/O command. + * + * @param fi An instance of a channel statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from channel * upon call. + */ +static void +ch_action_iofatal(fsm_instance * fi, int event, void *arg) +{ + struct channel *ch = (struct channel *) arg; + struct net_device *dev = ch->netdev; + + DBF_TEXT(trace, 3, __FUNCTION__); + fsm_deltimer(&ch->timer); + if (CHANNEL_DIRECTION(ch->flags) == READ) { + ctc_pr_debug("%s: RX I/O error\n", dev->name); + fsm_newstate(fi, CH_STATE_RXERR); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_RXDOWN, dev); + } else { + ctc_pr_debug("%s: TX I/O error\n", dev->name); + fsm_newstate(fi, CH_STATE_TXERR); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, + DEV_EVENT_TXDOWN, dev); + } +} + +static void +ch_action_reinit(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = (struct channel *)arg; + struct net_device *dev = ch->netdev; + struct ctc_priv *privptr = dev->priv; + + DBF_TEXT(trace, 4, __FUNCTION__); + ch_action_iofatal(fi, event, arg); + fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev); +} + + +/** + * The statemachine for a channel. + */ +static const fsm_node ch_fsm[] = { + {CH_STATE_STOPPED, CH_EVENT_STOP, fsm_action_nop }, + {CH_STATE_STOPPED, CH_EVENT_START, ch_action_start }, + {CH_STATE_STOPPED, CH_EVENT_FINSTAT, fsm_action_nop }, + {CH_STATE_STOPPED, CH_EVENT_MC_FAIL, fsm_action_nop }, + + {CH_STATE_NOTOP, CH_EVENT_STOP, ch_action_stop }, + {CH_STATE_NOTOP, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_NOTOP, CH_EVENT_FINSTAT, fsm_action_nop }, + {CH_STATE_NOTOP, CH_EVENT_MC_FAIL, fsm_action_nop }, + {CH_STATE_NOTOP, CH_EVENT_MC_GOOD, ch_action_start }, + + {CH_STATE_STARTWAIT, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_STARTWAIT, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_STARTWAIT, CH_EVENT_FINSTAT, ch_action_setmode }, + {CH_STATE_STARTWAIT, CH_EVENT_TIMER, ch_action_setuperr }, + {CH_STATE_STARTWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, + {CH_STATE_STARTWAIT, CH_EVENT_IO_EIO, ch_action_reinit }, + {CH_STATE_STARTWAIT, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_STARTRETRY, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_STARTRETRY, CH_EVENT_TIMER, ch_action_setmode }, + {CH_STATE_STARTRETRY, CH_EVENT_FINSTAT, fsm_action_nop }, + {CH_STATE_STARTRETRY, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_SETUPWAIT, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_SETUPWAIT, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_SETUPWAIT, CH_EVENT_FINSTAT, ch_action_firstio }, + {CH_STATE_SETUPWAIT, CH_EVENT_UC_RCRESET, ch_action_setuperr }, + {CH_STATE_SETUPWAIT, CH_EVENT_UC_RSRESET, ch_action_setuperr }, + {CH_STATE_SETUPWAIT, CH_EVENT_TIMER, ch_action_setmode }, + {CH_STATE_SETUPWAIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, + {CH_STATE_SETUPWAIT, CH_EVENT_IO_EIO, ch_action_reinit }, + {CH_STATE_SETUPWAIT, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_RXINIT, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_RXINIT, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_RXINIT, CH_EVENT_FINSTAT, ch_action_rxidle }, + {CH_STATE_RXINIT, CH_EVENT_UC_RCRESET, ch_action_rxiniterr }, + {CH_STATE_RXINIT, CH_EVENT_UC_RSRESET, ch_action_rxiniterr }, + {CH_STATE_RXINIT, CH_EVENT_TIMER, ch_action_rxiniterr }, + {CH_STATE_RXINIT, CH_EVENT_ATTNBUSY, ch_action_rxinitfail }, + {CH_STATE_RXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, + {CH_STATE_RXINIT, CH_EVENT_IO_EIO, ch_action_reinit }, + {CH_STATE_RXINIT, CH_EVENT_UC_ZERO, ch_action_firstio }, + {CH_STATE_RXINIT, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_RXIDLE, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_RXIDLE, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_RXIDLE, CH_EVENT_FINSTAT, ch_action_rx }, + {CH_STATE_RXIDLE, CH_EVENT_UC_RCRESET, ch_action_rxdisc }, +// {CH_STATE_RXIDLE, CH_EVENT_UC_RSRESET, ch_action_rxretry }, + {CH_STATE_RXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal }, + {CH_STATE_RXIDLE, CH_EVENT_IO_EIO, ch_action_reinit }, + {CH_STATE_RXIDLE, CH_EVENT_MC_FAIL, ch_action_fail }, + {CH_STATE_RXIDLE, CH_EVENT_UC_ZERO, ch_action_rx }, + + {CH_STATE_TXINIT, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_TXINIT, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_TXINIT, CH_EVENT_FINSTAT, ch_action_txidle }, + {CH_STATE_TXINIT, CH_EVENT_UC_RCRESET, ch_action_txiniterr }, + {CH_STATE_TXINIT, CH_EVENT_UC_RSRESET, ch_action_txiniterr }, + {CH_STATE_TXINIT, CH_EVENT_TIMER, ch_action_txiniterr }, + {CH_STATE_TXINIT, CH_EVENT_IO_ENODEV, ch_action_iofatal }, + {CH_STATE_TXINIT, CH_EVENT_IO_EIO, ch_action_reinit }, + {CH_STATE_TXINIT, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_TXIDLE, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_TXIDLE, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_TXIDLE, CH_EVENT_FINSTAT, ch_action_firstio }, + {CH_STATE_TXIDLE, CH_EVENT_UC_RCRESET, fsm_action_nop }, + {CH_STATE_TXIDLE, CH_EVENT_UC_RSRESET, fsm_action_nop }, + {CH_STATE_TXIDLE, CH_EVENT_IO_ENODEV, ch_action_iofatal }, + {CH_STATE_TXIDLE, CH_EVENT_IO_EIO, ch_action_reinit }, + {CH_STATE_TXIDLE, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_TERM, CH_EVENT_STOP, fsm_action_nop }, + {CH_STATE_TERM, CH_EVENT_START, ch_action_restart }, + {CH_STATE_TERM, CH_EVENT_FINSTAT, ch_action_stopped }, + {CH_STATE_TERM, CH_EVENT_UC_RCRESET, fsm_action_nop }, + {CH_STATE_TERM, CH_EVENT_UC_RSRESET, fsm_action_nop }, + {CH_STATE_TERM, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_DTERM, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_DTERM, CH_EVENT_START, ch_action_restart }, + {CH_STATE_DTERM, CH_EVENT_FINSTAT, ch_action_setmode }, + {CH_STATE_DTERM, CH_EVENT_UC_RCRESET, fsm_action_nop }, + {CH_STATE_DTERM, CH_EVENT_UC_RSRESET, fsm_action_nop }, + {CH_STATE_DTERM, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_TX, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_TX, CH_EVENT_START, fsm_action_nop }, + {CH_STATE_TX, CH_EVENT_FINSTAT, ch_action_txdone }, + {CH_STATE_TX, CH_EVENT_UC_RCRESET, ch_action_txretry }, + {CH_STATE_TX, CH_EVENT_UC_RSRESET, ch_action_txretry }, + {CH_STATE_TX, CH_EVENT_TIMER, ch_action_txretry }, + {CH_STATE_TX, CH_EVENT_IO_ENODEV, ch_action_iofatal }, + {CH_STATE_TX, CH_EVENT_IO_EIO, ch_action_reinit }, + {CH_STATE_TX, CH_EVENT_MC_FAIL, ch_action_fail }, + + {CH_STATE_RXERR, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_TXERR, CH_EVENT_STOP, ch_action_haltio }, + {CH_STATE_TXERR, CH_EVENT_MC_FAIL, ch_action_fail }, + {CH_STATE_RXERR, CH_EVENT_MC_FAIL, ch_action_fail }, +}; + +static const int CH_FSM_LEN = sizeof (ch_fsm) / sizeof (fsm_node); + +/** + * Functions related to setup and device detection. + *****************************************************************************/ + +static inline int +less_than(char *id1, char *id2) +{ + int dev1, dev2, i; + + for (i = 0; i < 5; i++) { + id1++; + id2++; + } + dev1 = simple_strtoul(id1, &id1, 16); + dev2 = simple_strtoul(id2, &id2, 16); + + return (dev1 < dev2); +} + +/** + * Add a new channel to the list of channels. + * Keeps the channel list sorted. + * + * @param cdev The ccw_device to be added. + * @param type The type class of the new channel. + * + * @return 0 on success, !0 on error. + */ +static int +add_channel(struct ccw_device *cdev, enum channel_types type) +{ + struct channel **c = &channels; + struct channel *ch; + + DBF_TEXT(trace, 2, __FUNCTION__); + if ((ch = + (struct channel *) kmalloc(sizeof (struct channel), + GFP_KERNEL)) == NULL) { + ctc_pr_warn("ctc: Out of memory in add_channel\n"); + return -1; + } + memset(ch, 0, sizeof (struct channel)); + if ((ch->ccw = (struct ccw1 *) kmalloc(8*sizeof(struct ccw1), + GFP_KERNEL | GFP_DMA)) == NULL) { + kfree(ch); + ctc_pr_warn("ctc: Out of memory in add_channel\n"); + return -1; + } + + memset(ch->ccw, 0, 8*sizeof(struct ccw1)); // assure all flags and counters are reset + + /** + * "static" ccws are used in the following way: + * + * ccw[0..2] (Channel program for generic I/O): + * 0: prepare + * 1: read or write (depending on direction) with fixed + * buffer (idal allocated once when buffer is allocated) + * 2: nop + * ccw[3..5] (Channel program for direct write of packets) + * 3: prepare + * 4: write (idal allocated on every write). + * 5: nop + * ccw[6..7] (Channel program for initial channel setup): + * 6: set extended mode + * 7: nop + * + * ch->ccw[0..5] are initialized in ch_action_start because + * the channel's direction is yet unknown here. + */ + ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED; + ch->ccw[6].flags = CCW_FLAG_SLI; + + ch->ccw[7].cmd_code = CCW_CMD_NOOP; + ch->ccw[7].flags = CCW_FLAG_SLI; + + ch->cdev = cdev; + snprintf(ch->id, CTC_ID_SIZE, "ch-%s", cdev->dev.bus_id); + ch->type = type; + loglevel = CTC_LOGLEVEL_DEFAULT; + ch->fsm = init_fsm(ch->id, ch_state_names, + ch_event_names, NR_CH_STATES, NR_CH_EVENTS, + ch_fsm, CH_FSM_LEN, GFP_KERNEL); + if (ch->fsm == NULL) { + ctc_pr_warn("ctc: Could not create FSM in add_channel\n"); + kfree(ch->ccw); + kfree(ch); + return -1; + } + fsm_newstate(ch->fsm, CH_STATE_IDLE); + if ((ch->irb = (struct irb *) kmalloc(sizeof (struct irb), + GFP_KERNEL)) == NULL) { + ctc_pr_warn("ctc: Out of memory in add_channel\n"); + kfree_fsm(ch->fsm); + kfree(ch->ccw); + kfree(ch); + return -1; + } + memset(ch->irb, 0, sizeof (struct irb)); + while (*c && less_than((*c)->id, ch->id)) + c = &(*c)->next; + if (*c && (!strncmp((*c)->id, ch->id, CTC_ID_SIZE))) { + ctc_pr_debug( + "ctc: add_channel: device %s already in list, " + "using old entry\n", (*c)->id); + kfree(ch->irb); + kfree_fsm(ch->fsm); + kfree(ch->ccw); + kfree(ch); + return 0; + } + fsm_settimer(ch->fsm, &ch->timer); + skb_queue_head_init(&ch->io_queue); + skb_queue_head_init(&ch->collect_queue); + ch->next = *c; + *c = ch; + return 0; +} + +/** + * Release a specific channel in the channel list. + * + * @param ch Pointer to channel struct to be released. + */ +static void +channel_free(struct channel *ch) +{ + ch->flags &= ~CHANNEL_FLAGS_INUSE; + fsm_newstate(ch->fsm, CH_STATE_IDLE); +} + +/** + * Remove a specific channel in the channel list. + * + * @param ch Pointer to channel struct to be released. + */ +static void +channel_remove(struct channel *ch) +{ + struct channel **c = &channels; + + DBF_TEXT(trace, 2, __FUNCTION__); + if (ch == NULL) + return; + + channel_free(ch); + while (*c) { + if (*c == ch) { + *c = ch->next; + fsm_deltimer(&ch->timer); + kfree_fsm(ch->fsm); + clear_normalized_cda(&ch->ccw[4]); + if (ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb(ch->trans_skb); + } + kfree(ch->ccw); + kfree(ch->irb); + kfree(ch); + return; + } + c = &((*c)->next); + } +} + +/** + * Get a specific channel from the channel list. + * + * @param type Type of channel we are interested in. + * @param id Id of channel we are interested in. + * @param direction Direction we want to use this channel for. + * + * @return Pointer to a channel or NULL if no matching channel available. + */ +static struct channel +* +channel_get(enum channel_types type, char *id, int direction) +{ + struct channel *ch = channels; + + DBF_TEXT(trace, 3, __FUNCTION__); +#ifdef DEBUG + ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n", + __func__, id, type); +#endif + + while (ch && ((strncmp(ch->id, id, CTC_ID_SIZE)) || (ch->type != type))) { +#ifdef DEBUG + ctc_pr_debug("ctc: %s(): ch=0x%p (id=%s, type=%d\n", + __func__, ch, ch->id, ch->type); +#endif + ch = ch->next; + } +#ifdef DEBUG + ctc_pr_debug("ctc: %s(): ch=0x%pq (id=%s, type=%d\n", + __func__, ch, ch->id, ch->type); +#endif + if (!ch) { + ctc_pr_warn("ctc: %s(): channel with id %s " + "and type %d not found in channel list\n", + __func__, id, type); + } else { + if (ch->flags & CHANNEL_FLAGS_INUSE) + ch = NULL; + else { + ch->flags |= CHANNEL_FLAGS_INUSE; + ch->flags &= ~CHANNEL_FLAGS_RWMASK; + ch->flags |= (direction == WRITE) + ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; + fsm_newstate(ch->fsm, CH_STATE_STOPPED); + } + } + return ch; +} + +/** + * Return the channel type by name. + * + * @param name Name of network interface. + * + * @return Type class of channel to be used for that interface. + */ +static enum channel_types inline +extract_channel_media(char *name) +{ + enum channel_types ret = channel_type_unknown; + + if (name != NULL) { + if (strncmp(name, "ctc", 3) == 0) + ret = channel_type_parallel; + if (strncmp(name, "escon", 5) == 0) + ret = channel_type_escon; + } + return ret; +} + +static long +__ctc_check_irb_error(struct ccw_device *cdev, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + ctc_pr_warn("i/o-error on device %s\n", cdev->dev.bus_id); +// CTC_DBF_TEXT(trace, 2, "ckirberr"); +// CTC_DBF_TEXT_(trace, 2, " rc%d", -EIO); + break; + case -ETIMEDOUT: + ctc_pr_warn("timeout on device %s\n", cdev->dev.bus_id); +// CTC_DBF_TEXT(trace, 2, "ckirberr"); +// CTC_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT); + break; + default: + ctc_pr_warn("unknown error %ld on device %s\n", PTR_ERR(irb), + cdev->dev.bus_id); +// CTC_DBF_TEXT(trace, 2, "ckirberr"); +// CTC_DBF_TEXT(trace, 2, " rc???"); + } + return PTR_ERR(irb); +} + +/** + * Main IRQ handler. + * + * @param cdev The ccw_device the interrupt is for. + * @param intparm interruption parameter. + * @param irb interruption response block. + */ +static void +ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) +{ + struct channel *ch; + struct net_device *dev; + struct ctc_priv *priv; + + DBF_TEXT(trace, 5, __FUNCTION__); + if (__ctc_check_irb_error(cdev, irb)) + return; + + /* Check for unsolicited interrupts. */ + if (!cdev->dev.driver_data) { + ctc_pr_warn("ctc: Got unsolicited irq: %s c-%02x d-%02x\n", + cdev->dev.bus_id, irb->scsw.cstat, + irb->scsw.dstat); + return; + } + + priv = ((struct ccwgroup_device *)cdev->dev.driver_data) + ->dev.driver_data; + + /* Try to extract channel from driver data. */ + if (priv->channel[READ]->cdev == cdev) + ch = priv->channel[READ]; + else if (priv->channel[WRITE]->cdev == cdev) + ch = priv->channel[WRITE]; + else { + ctc_pr_err("ctc: Can't determine channel for interrupt, " + "device %s\n", cdev->dev.bus_id); + return; + } + + dev = (struct net_device *) (ch->netdev); + if (dev == NULL) { + ctc_pr_crit("ctc: ctc_irq_handler dev=NULL bus_id=%s, ch=0x%p\n", + cdev->dev.bus_id, ch); + return; + } + +#ifdef DEBUG + ctc_pr_debug("%s: interrupt for device: %s received c-%02x d-%02x\n", + dev->name, ch->id, irb->scsw.cstat, irb->scsw.dstat); +#endif + + /* Copy interruption response block. */ + memcpy(ch->irb, irb, sizeof(struct irb)); + + /* Check for good subchannel return code, otherwise error message */ + if (ch->irb->scsw.cstat) { + fsm_event(ch->fsm, CH_EVENT_SC_UNKNOWN, ch); + ctc_pr_warn("%s: subchannel check for device: %s - %02x %02x\n", + dev->name, ch->id, ch->irb->scsw.cstat, + ch->irb->scsw.dstat); + return; + } + + /* Check the reason-code of a unit check */ + if (ch->irb->scsw.dstat & DEV_STAT_UNIT_CHECK) { + ccw_unit_check(ch, ch->irb->ecw[0]); + return; + } + if (ch->irb->scsw.dstat & DEV_STAT_BUSY) { + if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) + fsm_event(ch->fsm, CH_EVENT_ATTNBUSY, ch); + else + fsm_event(ch->fsm, CH_EVENT_BUSY, ch); + return; + } + if (ch->irb->scsw.dstat & DEV_STAT_ATTENTION) { + fsm_event(ch->fsm, CH_EVENT_ATTN, ch); + return; + } + if ((ch->irb->scsw.stctl & SCSW_STCTL_SEC_STATUS) || + (ch->irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) || + (ch->irb->scsw.stctl == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) + fsm_event(ch->fsm, CH_EVENT_FINSTAT, ch); + else + fsm_event(ch->fsm, CH_EVENT_IRQ, ch); + +} + +/** + * Actions for interface - statemachine. + *****************************************************************************/ + +/** + * Startup channels by sending CH_EVENT_START to each channel. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_start(fsm_instance * fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct ctc_priv *privptr = dev->priv; + int direction; + + DBF_TEXT(setup, 3, __FUNCTION__); + fsm_deltimer(&privptr->restart_timer); + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + for (direction = READ; direction <= WRITE; direction++) { + struct channel *ch = privptr->channel[direction]; + fsm_event(ch->fsm, CH_EVENT_START, ch); + } +} + +/** + * Shutdown channels by sending CH_EVENT_STOP to each channel. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_stop(fsm_instance * fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct ctc_priv *privptr = dev->priv; + int direction; + + DBF_TEXT(trace, 3, __FUNCTION__); + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + for (direction = READ; direction <= WRITE; direction++) { + struct channel *ch = privptr->channel[direction]; + fsm_event(ch->fsm, CH_EVENT_STOP, ch); + } +} +static void +dev_action_restart(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *)arg; + struct ctc_priv *privptr = dev->priv; + + DBF_TEXT(trace, 3, __FUNCTION__); + ctc_pr_debug("%s: Restarting\n", dev->name); + dev_action_stop(fi, event, arg); + fsm_event(privptr->fsm, DEV_EVENT_STOP, dev); + fsm_addtimer(&privptr->restart_timer, CTC_TIMEOUT_5SEC, + DEV_EVENT_START, dev); +} + +/** + * Called from channel statemachine + * when a channel is up and running. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_chup(fsm_instance * fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct ctc_priv *privptr = dev->priv; + + DBF_TEXT(trace, 3, __FUNCTION__); + switch (fsm_getstate(fi)) { + case DEV_STATE_STARTWAIT_RXTX: + if (event == DEV_EVENT_RXUP) + fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); + else + fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); + break; + case DEV_STATE_STARTWAIT_RX: + if (event == DEV_EVENT_RXUP) { + fsm_newstate(fi, DEV_STATE_RUNNING); + ctc_pr_info("%s: connected with remote side\n", + dev->name); + if (privptr->protocol == CTC_PROTO_LINUX_TTY) + ctc_tty_setcarrier(dev, 1); + ctc_clear_busy(dev); + } + break; + case DEV_STATE_STARTWAIT_TX: + if (event == DEV_EVENT_TXUP) { + fsm_newstate(fi, DEV_STATE_RUNNING); + ctc_pr_info("%s: connected with remote side\n", + dev->name); + if (privptr->protocol == CTC_PROTO_LINUX_TTY) + ctc_tty_setcarrier(dev, 1); + ctc_clear_busy(dev); + } + break; + case DEV_STATE_STOPWAIT_TX: + if (event == DEV_EVENT_RXUP) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + break; + case DEV_STATE_STOPWAIT_RX: + if (event == DEV_EVENT_TXUP) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + break; + } +} + +/** + * Called from channel statemachine + * when a channel has been shutdown. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_chdown(fsm_instance * fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *) arg; + struct ctc_priv *privptr = dev->priv; + + DBF_TEXT(trace, 3, __FUNCTION__); + switch (fsm_getstate(fi)) { + case DEV_STATE_RUNNING: + if (privptr->protocol == CTC_PROTO_LINUX_TTY) + ctc_tty_setcarrier(dev, 0); + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); + else + fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); + break; + case DEV_STATE_STARTWAIT_RX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + break; + case DEV_STATE_STARTWAIT_TX: + if (event == DEV_EVENT_RXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + break; + case DEV_STATE_STOPWAIT_RXTX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); + else + fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); + break; + case DEV_STATE_STOPWAIT_RX: + if (event == DEV_EVENT_RXDOWN) + fsm_newstate(fi, DEV_STATE_STOPPED); + break; + case DEV_STATE_STOPWAIT_TX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STOPPED); + break; + } +} + +static const fsm_node dev_fsm[] = { + {DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start}, + + {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, + {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + {DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + + {DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, + {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + {DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, + {DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + + {DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, + {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + {DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, + {DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + + {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, + {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, + {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, + {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + {DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + + {DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, + {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + {DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, + {DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + + {DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, + {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + {DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, + {DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + + {DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, + {DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, + {DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, + {DEV_STATE_RUNNING, DEV_EVENT_TXUP, fsm_action_nop }, + {DEV_STATE_RUNNING, DEV_EVENT_RXUP, fsm_action_nop }, + {DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, +}; + +static const int DEV_FSM_LEN = sizeof (dev_fsm) / sizeof (fsm_node); + +/** + * Transmit a packet. + * This is a helper function for ctc_tx(). + * + * @param ch Channel to be used for sending. + * @param skb Pointer to struct sk_buff of packet to send. + * The linklevel header has already been set up + * by ctc_tx(). + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int +transmit_skb(struct channel *ch, struct sk_buff *skb) +{ + unsigned long saveflags; + struct ll_header header; + int rc = 0; + + DBF_TEXT(trace, 5, __FUNCTION__); + if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) { + int l = skb->len + LL_HEADER_LENGTH; + + spin_lock_irqsave(&ch->collect_lock, saveflags); + if (ch->collect_len + l > ch->max_bufsize - 2) + rc = -EBUSY; + else { + atomic_inc(&skb->users); + header.length = l; + header.type = skb->protocol; + header.unused = 0; + memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, + LL_HEADER_LENGTH); + skb_queue_tail(&ch->collect_queue, skb); + ch->collect_len += l; + } + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + } else { + __u16 block_len; + int ccw_idx; + struct sk_buff *nskb; + unsigned long hi; + + /** + * Protect skb against beeing free'd by upper + * layers. + */ + atomic_inc(&skb->users); + ch->prof.txlen += skb->len; + header.length = skb->len + LL_HEADER_LENGTH; + header.type = skb->protocol; + header.unused = 0; + memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, + LL_HEADER_LENGTH); + block_len = skb->len + 2; + *((__u16 *) skb_push(skb, 2)) = block_len; + + /** + * IDAL support in CTC is broken, so we have to + * care about skb's above 2G ourselves. + */ + hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31; + if (hi) { + nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + atomic_dec(&skb->users); + skb_pull(skb, LL_HEADER_LENGTH + 2); + return -ENOMEM; + } else { + memcpy(skb_put(nskb, skb->len), + skb->data, skb->len); + atomic_inc(&nskb->users); + atomic_dec(&skb->users); + dev_kfree_skb_irq(skb); + skb = nskb; + } + } + + ch->ccw[4].count = block_len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + /** + * idal allocation failed, try via copying to + * trans_skb. trans_skb usually has a pre-allocated + * idal. + */ + if (ctc_checkalloc_buffer(ch, 1)) { + /** + * Remove our header. It gets added + * again on retransmit. + */ + atomic_dec(&skb->users); + skb_pull(skb, LL_HEADER_LENGTH + 2); + return -EBUSY; + } + + ch->trans_skb->tail = ch->trans_skb->data; + ch->trans_skb->len = 0; + ch->ccw[1].count = skb->len; + memcpy(skb_put(ch->trans_skb, skb->len), skb->data, + skb->len); + atomic_dec(&skb->users); + dev_kfree_skb_irq(skb); + ccw_idx = 0; + } else { + skb_queue_tail(&ch->io_queue, skb); + ccw_idx = 3; + } + ch->retry = 0; + fsm_newstate(ch->fsm, CH_STATE_TX); + fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch); + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + ch->prof.send_stamp = xtime; + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], + (unsigned long) ch, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (ccw_idx == 3) + ch->prof.doios_single++; + if (rc != 0) { + fsm_deltimer(&ch->timer); + ccw_check_return_code(ch, rc, "single skb TX"); + if (ccw_idx == 3) + skb_dequeue_tail(&ch->io_queue); + /** + * Remove our header. It gets added + * again on retransmit. + */ + skb_pull(skb, LL_HEADER_LENGTH + 2); + } else { + if (ccw_idx == 0) { + struct net_device *dev = ch->netdev; + struct ctc_priv *privptr = dev->priv; + privptr->stats.tx_packets++; + privptr->stats.tx_bytes += + skb->len - LL_HEADER_LENGTH; + } + } + } + + return rc; +} + +/** + * Interface API for upper network layers + *****************************************************************************/ + +/** + * Open an interface. + * Called from generic network layer when ifconfig up is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int +ctc_open(struct net_device * dev) +{ + DBF_TEXT(trace, 5, __FUNCTION__); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev); + return 0; +} + +/** + * Close an interface. + * Called from generic network layer when ifconfig down is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int +ctc_close(struct net_device * dev) +{ + DBF_TEXT(trace, 5, __FUNCTION__); + fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev); + return 0; +} + +/** + * Start transmission of a packet. + * Called from generic network device layer. + * + * @param skb Pointer to buffer containing the packet. + * @param dev Pointer to interface struct. + * + * @return 0 if packet consumed, !0 if packet rejected. + * Note: If we return !0, then the packet is free'd by + * the generic network layer. + */ +static int +ctc_tx(struct sk_buff *skb, struct net_device * dev) +{ + int rc = 0; + struct ctc_priv *privptr = (struct ctc_priv *) dev->priv; + + DBF_TEXT(trace, 5, __FUNCTION__); + /** + * Some sanity checks ... + */ + if (skb == NULL) { + ctc_pr_warn("%s: NULL sk_buff passed\n", dev->name); + privptr->stats.tx_dropped++; + return 0; + } + if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) { + ctc_pr_warn("%s: Got sk_buff with head room < %ld bytes\n", + dev->name, LL_HEADER_LENGTH + 2); + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + return 0; + } + + /** + * If channels are not running, try to restart them + * and throw away packet. + */ + if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { + fsm_event(privptr->fsm, DEV_EVENT_START, dev); + if (privptr->protocol == CTC_PROTO_LINUX_TTY) + return -EBUSY; + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + privptr->stats.tx_errors++; + privptr->stats.tx_carrier_errors++; + return 0; + } + + if (ctc_test_and_set_busy(dev)) + return -EBUSY; + + dev->trans_start = jiffies; + if (transmit_skb(privptr->channel[WRITE], skb) != 0) + rc = 1; + ctc_clear_busy(dev); + return rc; +} + +/** + * Sets MTU of an interface. + * + * @param dev Pointer to interface struct. + * @param new_mtu The new MTU to use for this interface. + * + * @return 0 on success, -EINVAL if MTU is out of valid range. + * (valid range is 576 .. 65527). If VM is on the + * remote side, maximum MTU is 32760, however this is + * <em>not</em> checked here. + */ +static int +ctc_change_mtu(struct net_device * dev, int new_mtu) +{ + struct ctc_priv *privptr = (struct ctc_priv *) dev->priv; + + DBF_TEXT(trace, 3, __FUNCTION__); + if ((new_mtu < 576) || (new_mtu > 65527) || + (new_mtu > (privptr->channel[READ]->max_bufsize - + LL_HEADER_LENGTH - 2))) + return -EINVAL; + dev->mtu = new_mtu; + dev->hard_header_len = LL_HEADER_LENGTH + 2; + return 0; +} + +/** + * Returns interface statistics of a device. + * + * @param dev Pointer to interface struct. + * + * @return Pointer to stats struct of this interface. + */ +static struct net_device_stats * +ctc_stats(struct net_device * dev) +{ + return &((struct ctc_priv *) dev->priv)->stats; +} + +/* + * sysfs attributes + */ +static ssize_t +buffer_show(struct device *dev, char *buf) +{ + struct ctc_priv *priv; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + return sprintf(buf, "%d\n", + priv->buffer_size); +} + +static ssize_t +buffer_write(struct device *dev, const char *buf, size_t count) +{ + struct ctc_priv *priv; + struct net_device *ndev; + int bs1; + + DBF_TEXT(trace, 3, __FUNCTION__); + priv = dev->driver_data; + if (!priv) + return -ENODEV; + ndev = priv->channel[READ]->netdev; + if (!ndev) + return -ENODEV; + sscanf(buf, "%u", &bs1); + + if (bs1 > CTC_BUFSIZE_LIMIT) + return -EINVAL; + if ((ndev->flags & IFF_RUNNING) && + (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) + return -EINVAL; + if (bs1 < (576 + LL_HEADER_LENGTH + 2)) + return -EINVAL; + + priv->buffer_size = bs1; + priv->channel[READ]->max_bufsize = + priv->channel[WRITE]->max_bufsize = bs1; + if (!(ndev->flags & IFF_RUNNING)) + ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; + priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + + return count; + +} + +static ssize_t +loglevel_show(struct device *dev, char *buf) +{ + struct ctc_priv *priv; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + return sprintf(buf, "%d\n", loglevel); +} + +static ssize_t +loglevel_write(struct device *dev, const char *buf, size_t count) +{ + struct ctc_priv *priv; + int ll1; + + DBF_TEXT(trace, 5, __FUNCTION__); + priv = dev->driver_data; + if (!priv) + return -ENODEV; + sscanf(buf, "%i", &ll1); + + if ((ll1 > CTC_LOGLEVEL_MAX) || (ll1 < 0)) + return -EINVAL; + loglevel = ll1; + return count; +} + +static void +ctc_print_statistics(struct ctc_priv *priv) +{ + char *sbuf; + char *p; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (!priv) + return; + sbuf = (char *)kmalloc(2048, GFP_KERNEL); + if (sbuf == NULL) + return; + p = sbuf; + + p += sprintf(p, " Device FSM state: %s\n", + fsm_getstate_str(priv->fsm)); + p += sprintf(p, " RX channel FSM state: %s\n", + fsm_getstate_str(priv->channel[READ]->fsm)); + p += sprintf(p, " TX channel FSM state: %s\n", + fsm_getstate_str(priv->channel[WRITE]->fsm)); + p += sprintf(p, " Max. TX buffer used: %ld\n", + priv->channel[WRITE]->prof.maxmulti); + p += sprintf(p, " Max. chained SKBs: %ld\n", + priv->channel[WRITE]->prof.maxcqueue); + p += sprintf(p, " TX single write ops: %ld\n", + priv->channel[WRITE]->prof.doios_single); + p += sprintf(p, " TX multi write ops: %ld\n", + priv->channel[WRITE]->prof.doios_multi); + p += sprintf(p, " Netto bytes written: %ld\n", + priv->channel[WRITE]->prof.txlen); + p += sprintf(p, " Max. TX IO-time: %ld\n", + priv->channel[WRITE]->prof.tx_time); + + ctc_pr_debug("Statistics for %s:\n%s", + priv->channel[WRITE]->netdev->name, sbuf); + kfree(sbuf); + return; +} + +static ssize_t +stats_show(struct device *dev, char *buf) +{ + struct ctc_priv *priv = dev->driver_data; + if (!priv) + return -ENODEV; + ctc_print_statistics(priv); + return sprintf(buf, "0\n"); +} + +static ssize_t +stats_write(struct device *dev, const char *buf, size_t count) +{ + struct ctc_priv *priv = dev->driver_data; + if (!priv) + return -ENODEV; + /* Reset statistics */ + memset(&priv->channel[WRITE]->prof, 0, + sizeof(priv->channel[WRITE]->prof)); + return count; +} + +static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); +static DEVICE_ATTR(loglevel, 0644, loglevel_show, loglevel_write); +static DEVICE_ATTR(stats, 0644, stats_show, stats_write); + +static int +ctc_add_attributes(struct device *dev) +{ +// device_create_file(dev, &dev_attr_buffer); + device_create_file(dev, &dev_attr_loglevel); + device_create_file(dev, &dev_attr_stats); + return 0; +} + +static void +ctc_remove_attributes(struct device *dev) +{ + device_remove_file(dev, &dev_attr_stats); + device_remove_file(dev, &dev_attr_loglevel); +// device_remove_file(dev, &dev_attr_buffer); +} + + +static void +ctc_netdev_unregister(struct net_device * dev) +{ + struct ctc_priv *privptr; + + if (!dev) + return; + privptr = (struct ctc_priv *) dev->priv; + if (privptr->protocol != CTC_PROTO_LINUX_TTY) + unregister_netdev(dev); + else + ctc_tty_unregister_netdev(dev); +} + +static int +ctc_netdev_register(struct net_device * dev) +{ + struct ctc_priv *privptr = (struct ctc_priv *) dev->priv; + if (privptr->protocol != CTC_PROTO_LINUX_TTY) + return register_netdev(dev); + else + return ctc_tty_register_netdev(dev); +} + +static void +ctc_free_netdevice(struct net_device * dev, int free_dev) +{ + struct ctc_priv *privptr; + if (!dev) + return; + privptr = dev->priv; + if (privptr) { + if (privptr->fsm) + kfree_fsm(privptr->fsm); + kfree(privptr); + } +#ifdef MODULE + if (free_dev) + free_netdev(dev); +#endif +} + +/** + * Initialize everything of the net device except the name and the + * channel structs. + */ +static struct net_device * +ctc_init_netdevice(struct net_device * dev, int alloc_device, + struct ctc_priv *privptr) +{ + if (!privptr) + return NULL; + + DBF_TEXT(setup, 3, __FUNCTION__); + if (alloc_device) { + dev = kmalloc(sizeof (struct net_device), GFP_KERNEL); + if (!dev) + return NULL; + memset(dev, 0, sizeof (struct net_device)); + } + + dev->priv = privptr; + privptr->fsm = init_fsm("ctcdev", dev_state_names, + dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, + dev_fsm, DEV_FSM_LEN, GFP_KERNEL); + if (privptr->fsm == NULL) { + if (alloc_device) + kfree(dev); + return NULL; + } + fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); + fsm_settimer(privptr->fsm, &privptr->restart_timer); + if (dev->mtu == 0) + dev->mtu = CTC_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; + dev->hard_start_xmit = ctc_tx; + dev->open = ctc_open; + dev->stop = ctc_close; + dev->get_stats = ctc_stats; + dev->change_mtu = ctc_change_mtu; + dev->hard_header_len = LL_HEADER_LENGTH + 2; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = 100; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + SET_MODULE_OWNER(dev); + return dev; +} + +static ssize_t +ctc_proto_show(struct device *dev, char *buf) +{ + struct ctc_priv *priv; + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + + return sprintf(buf, "%d\n", priv->protocol); +} + +static ssize_t +ctc_proto_store(struct device *dev, const char *buf, size_t count) +{ + struct ctc_priv *priv; + int value; + + DBF_TEXT(trace, 3, __FUNCTION__); + pr_debug("%s() called\n", __FUNCTION__); + + priv = dev->driver_data; + if (!priv) + return -ENODEV; + sscanf(buf, "%u", &value); + if ((value < 0) || (value > CTC_PROTO_MAX)) + return -EINVAL; + priv->protocol = value; + + return count; +} + +static DEVICE_ATTR(protocol, 0644, ctc_proto_show, ctc_proto_store); + +static ssize_t +ctc_type_show(struct device *dev, char *buf) +{ + struct ccwgroup_device *cgdev; + + cgdev = to_ccwgroupdev(dev); + if (!cgdev) + return -ENODEV; + + return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); +} + +static DEVICE_ATTR(type, 0444, ctc_type_show, NULL); + +static struct attribute *ctc_attr[] = { + &dev_attr_protocol.attr, + &dev_attr_type.attr, + &dev_attr_buffer.attr, + NULL, +}; + +static struct attribute_group ctc_attr_group = { + .attrs = ctc_attr, +}; + +static int +ctc_add_files(struct device *dev) +{ + pr_debug("%s() called\n", __FUNCTION__); + + return sysfs_create_group(&dev->kobj, &ctc_attr_group); +} + +static void +ctc_remove_files(struct device *dev) +{ + pr_debug("%s() called\n", __FUNCTION__); + + sysfs_remove_group(&dev->kobj, &ctc_attr_group); +} + +/** + * Add ctc specific attributes. + * Add ctc private data. + * + * @param cgdev pointer to ccwgroup_device just added + * + * @returns 0 on success, !0 on failure. + */ + +static int +ctc_probe_device(struct ccwgroup_device *cgdev) +{ + struct ctc_priv *priv; + int rc; + + pr_debug("%s() called\n", __FUNCTION__); + DBF_TEXT(trace, 3, __FUNCTION__); + + if (!get_device(&cgdev->dev)) + return -ENODEV; + + priv = kmalloc(sizeof (struct ctc_priv), GFP_KERNEL); + if (!priv) { + ctc_pr_err("%s: Out of memory\n", __func__); + put_device(&cgdev->dev); + return -ENOMEM; + } + + memset(priv, 0, sizeof (struct ctc_priv)); + rc = ctc_add_files(&cgdev->dev); + if (rc) { + kfree(priv); + put_device(&cgdev->dev); + return rc; + } + priv->buffer_size = CTC_BUFSIZE_DEFAULT; + cgdev->cdev[0]->handler = ctc_irq_handler; + cgdev->cdev[1]->handler = ctc_irq_handler; + cgdev->dev.driver_data = priv; + + return 0; +} + +/** + * + * Setup an interface. + * + * @param cgdev Device to be setup. + * + * @returns 0 on success, !0 on failure. + */ +static int +ctc_new_device(struct ccwgroup_device *cgdev) +{ + char read_id[CTC_ID_SIZE]; + char write_id[CTC_ID_SIZE]; + int direction; + enum channel_types type; + struct ctc_priv *privptr; + struct net_device *dev; + int ret; + + pr_debug("%s() called\n", __FUNCTION__); + DBF_TEXT(setup, 3, __FUNCTION__); + + privptr = cgdev->dev.driver_data; + if (!privptr) + return -ENODEV; + + type = get_channel_type(&cgdev->cdev[0]->id); + + snprintf(read_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[0]->dev.bus_id); + snprintf(write_id, CTC_ID_SIZE, "ch-%s", cgdev->cdev[1]->dev.bus_id); + + if (add_channel(cgdev->cdev[0], type)) + return -ENOMEM; + if (add_channel(cgdev->cdev[1], type)) + return -ENOMEM; + + ret = ccw_device_set_online(cgdev->cdev[0]); + if (ret != 0) { + printk(KERN_WARNING + "ccw_device_set_online (cdev[0]) failed with ret = %d\n", ret); + } + + ret = ccw_device_set_online(cgdev->cdev[1]); + if (ret != 0) { + printk(KERN_WARNING + "ccw_device_set_online (cdev[1]) failed with ret = %d\n", ret); + } + + dev = ctc_init_netdevice(NULL, 1, privptr); + + if (!dev) { + ctc_pr_warn("ctc_init_netdevice failed\n"); + goto out; + } + + if (privptr->protocol == CTC_PROTO_LINUX_TTY) + strlcpy(dev->name, "ctctty%d", IFNAMSIZ); + else + strlcpy(dev->name, "ctc%d", IFNAMSIZ); + + for (direction = READ; direction <= WRITE; direction++) { + privptr->channel[direction] = + channel_get(type, direction == READ ? read_id : write_id, + direction); + if (privptr->channel[direction] == NULL) { + if (direction == WRITE) + channel_free(privptr->channel[READ]); + + ctc_free_netdevice(dev, 1); + goto out; + } + privptr->channel[direction]->netdev = dev; + privptr->channel[direction]->protocol = privptr->protocol; + privptr->channel[direction]->max_bufsize = privptr->buffer_size; + } + /* sysfs magic */ + SET_NETDEV_DEV(dev, &cgdev->dev); + + if (ctc_netdev_register(dev) != 0) { + ctc_free_netdevice(dev, 1); + goto out; + } + + ctc_add_attributes(&cgdev->dev); + + strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); + + print_banner(); + + ctc_pr_info("%s: read: %s, write: %s, proto: %d\n", + dev->name, privptr->channel[READ]->id, + privptr->channel[WRITE]->id, privptr->protocol); + + return 0; +out: + ccw_device_set_offline(cgdev->cdev[1]); + ccw_device_set_offline(cgdev->cdev[0]); + + return -ENODEV; +} + +/** + * Shutdown an interface. + * + * @param cgdev Device to be shut down. + * + * @returns 0 on success, !0 on failure. + */ +static int +ctc_shutdown_device(struct ccwgroup_device *cgdev) +{ + struct ctc_priv *priv; + struct net_device *ndev; + + DBF_TEXT(trace, 3, __FUNCTION__); + pr_debug("%s() called\n", __FUNCTION__); + + priv = cgdev->dev.driver_data; + ndev = NULL; + if (!priv) + return -ENODEV; + + if (priv->channel[READ]) { + ndev = priv->channel[READ]->netdev; + + /* Close the device */ + ctc_close(ndev); + ndev->flags &=~IFF_RUNNING; + + ctc_remove_attributes(&cgdev->dev); + + channel_free(priv->channel[READ]); + } + if (priv->channel[WRITE]) + channel_free(priv->channel[WRITE]); + + if (ndev) { + ctc_netdev_unregister(ndev); + ndev->priv = NULL; + ctc_free_netdevice(ndev, 1); + } + + if (priv->fsm) + kfree_fsm(priv->fsm); + + ccw_device_set_offline(cgdev->cdev[1]); + ccw_device_set_offline(cgdev->cdev[0]); + + if (priv->channel[READ]) + channel_remove(priv->channel[READ]); + if (priv->channel[WRITE]) + channel_remove(priv->channel[WRITE]); + + priv->channel[READ] = priv->channel[WRITE] = NULL; + + return 0; + +} + +static void +ctc_remove_device(struct ccwgroup_device *cgdev) +{ + struct ctc_priv *priv; + + pr_debug("%s() called\n", __FUNCTION__); + DBF_TEXT(trace, 3, __FUNCTION__); + + priv = cgdev->dev.driver_data; + if (!priv) + return; + if (cgdev->state == CCWGROUP_ONLINE) + ctc_shutdown_device(cgdev); + ctc_remove_files(&cgdev->dev); + cgdev->dev.driver_data = NULL; + kfree(priv); + put_device(&cgdev->dev); +} + +static struct ccwgroup_driver ctc_group_driver = { + .owner = THIS_MODULE, + .name = "ctc", + .max_slaves = 2, + .driver_id = 0xC3E3C3, + .probe = ctc_probe_device, + .remove = ctc_remove_device, + .set_online = ctc_new_device, + .set_offline = ctc_shutdown_device, +}; + +/** + * Module related routines + *****************************************************************************/ + +/** + * Prepare to be unloaded. Free IRQ's and release all resources. + * This is called just before this module is unloaded. It is + * <em>not</em> called, if the usage count is !0, so we don't need to check + * for that. + */ +static void __exit +ctc_exit(void) +{ + unregister_cu3088_discipline(&ctc_group_driver); + ctc_tty_cleanup(); + ctc_unregister_dbf_views(); + ctc_pr_info("CTC driver unloaded\n"); +} + +/** + * Initialize module. + * This is called just after the module is loaded. + * + * @return 0 on success, !0 on error. + */ +static int __init +ctc_init(void) +{ + int ret = 0; + + print_banner(); + + ret = ctc_register_dbf_views(); + if (ret){ + ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret); + return ret; + } + ctc_tty_init(); + ret = register_cu3088_discipline(&ctc_group_driver); + if (ret) { + ctc_tty_cleanup(); + ctc_unregister_dbf_views(); + } + return ret; +} + +module_init(ctc_init); +module_exit(ctc_exit); + +/* --- This is the END my friend --- */ diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c new file mode 100644 index 000000000000..9257d60c7833 --- /dev/null +++ b/drivers/s390/net/ctctty.c @@ -0,0 +1,1276 @@ +/* + * $Id: ctctty.c,v 1.26 2004/08/04 11:06:55 mschwide Exp $ + * + * CTC / ESCON network driver, tty interface. + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/tty.h> +#include <linux/serial_reg.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <asm/uaccess.h> +#include <linux/devfs_fs_kernel.h> +#include "ctctty.h" +#include "ctcdbug.h" + +#define CTC_TTY_MAJOR 43 +#define CTC_TTY_MAX_DEVICES 64 + +#define CTC_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */ +#define CTC_ASYNC_INITIALIZED 0x80000000 /* port was initialized */ +#define CTC_ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device active */ +#define CTC_ASYNC_CLOSING 0x08000000 /* Serial port is closing */ +#define CTC_ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */ +#define CTC_ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */ +#define CTC_ASYNC_HUP_NOTIFY 0x0001 /* Notify tty on hangups/closes */ +#define CTC_ASYNC_NETDEV_OPEN 0x0002 /* Underlying netdev is open */ +#define CTC_ASYNC_TX_LINESTAT 0x0004 /* Must send line status */ +#define CTC_ASYNC_SPLIT_TERMIOS 0x0008 /* Sep. termios for dialin/out */ +#define CTC_TTY_XMIT_SIZE 1024 /* Default bufsize for write */ +#define CTC_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */ + +/* Private data (similar to async_struct in <linux/serial.h>) */ +typedef struct { + int magic; + int flags; /* defined in tty.h */ + int mcr; /* Modem control register */ + int msr; /* Modem status register */ + int lsr; /* Line status register */ + int line; + int count; /* # of fd on device */ + int blocked_open; /* # of blocked opens */ + struct net_device *netdev; + struct sk_buff_head tx_queue; /* transmit queue */ + struct sk_buff_head rx_queue; /* receive queue */ + struct tty_struct *tty; /* Pointer to corresponding tty */ + wait_queue_head_t open_wait; + wait_queue_head_t close_wait; + struct semaphore write_sem; + struct tasklet_struct tasklet; + struct timer_list stoptimer; +} ctc_tty_info; + +/* Description of one CTC-tty */ +typedef struct { + struct tty_driver *ctc_tty_device; /* tty-device */ + ctc_tty_info info[CTC_TTY_MAX_DEVICES]; /* Private data */ +} ctc_tty_driver; + +static ctc_tty_driver *driver; + +/* Leave this unchanged unless you know what you do! */ +#define MODEM_PARANOIA_CHECK +#define MODEM_DO_RESTART + +#define CTC_TTY_NAME "ctctty" + +static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC; +static int ctc_tty_shuttingdown = 0; + +static spinlock_t ctc_tty_lock; + +/* ctc_tty_try_read() is called from within ctc_tty_rcv_skb() + * to stuff incoming data directly into a tty's flip-buffer. If the + * flip buffer is full, the packet gets queued up. + * + * Return: + * 1 = Success + * 0 = Failure, data has to be buffered and later processed by + * ctc_tty_readmodem(). + */ +static int +ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb) +{ + int c; + int len; + struct tty_struct *tty; + + DBF_TEXT(trace, 5, __FUNCTION__); + if ((tty = info->tty)) { + if (info->mcr & UART_MCR_RTS) { + c = TTY_FLIPBUF_SIZE - tty->flip.count; + len = skb->len; + if (c >= len) { + memcpy(tty->flip.char_buf_ptr, skb->data, len); + memset(tty->flip.flag_buf_ptr, 0, len); + tty->flip.count += len; + tty->flip.char_buf_ptr += len; + tty->flip.flag_buf_ptr += len; + tty_flip_buffer_push(tty); + kfree_skb(skb); + return 1; + } + } + } + return 0; +} + +/* ctc_tty_readmodem() is called periodically from within timer-interrupt. + * It tries getting received data from the receive queue an stuff it into + * the tty's flip-buffer. + */ +static int +ctc_tty_readmodem(ctc_tty_info *info) +{ + int ret = 1; + struct tty_struct *tty; + + DBF_TEXT(trace, 5, __FUNCTION__); + if ((tty = info->tty)) { + if (info->mcr & UART_MCR_RTS) { + int c = TTY_FLIPBUF_SIZE - tty->flip.count; + struct sk_buff *skb; + + if ((c > 0) && (skb = skb_dequeue(&info->rx_queue))) { + int len = skb->len; + if (len > c) + len = c; + memcpy(tty->flip.char_buf_ptr, skb->data, len); + skb_pull(skb, len); + memset(tty->flip.flag_buf_ptr, 0, len); + tty->flip.count += len; + tty->flip.char_buf_ptr += len; + tty->flip.flag_buf_ptr += len; + tty_flip_buffer_push(tty); + if (skb->len > 0) + skb_queue_head(&info->rx_queue, skb); + else { + kfree_skb(skb); + ret = skb_queue_len(&info->rx_queue); + } + } + } + } + return ret; +} + +void +ctc_tty_setcarrier(struct net_device *netdev, int on) +{ + int i; + + DBF_TEXT(trace, 4, __FUNCTION__); + if ((!driver) || ctc_tty_shuttingdown) + return; + for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) + if (driver->info[i].netdev == netdev) { + ctc_tty_info *info = &driver->info[i]; + if (on) + info->msr |= UART_MSR_DCD; + else + info->msr &= ~UART_MSR_DCD; + if ((info->flags & CTC_ASYNC_CHECK_CD) && (!on)) + tty_hangup(info->tty); + } +} + +void +ctc_tty_netif_rx(struct sk_buff *skb) +{ + int i; + ctc_tty_info *info = NULL; + + DBF_TEXT(trace, 5, __FUNCTION__); + if (!skb) + return; + if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) { + dev_kfree_skb(skb); + return; + } + for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) + if (driver->info[i].netdev == skb->dev) { + info = &driver->info[i]; + break; + } + if (!info) { + dev_kfree_skb(skb); + return; + } + if (skb->len < 6) { + dev_kfree_skb(skb); + return; + } + if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) { + dev_kfree_skb(skb); + return; + } + skb_pull(skb, sizeof(__u32)); + + i = *((int *)skb->data); + skb_pull(skb, sizeof(info->mcr)); + if (i & UART_MCR_RTS) { + info->msr |= UART_MSR_CTS; + if (info->flags & CTC_ASYNC_CTS_FLOW) + info->tty->hw_stopped = 0; + } else { + info->msr &= ~UART_MSR_CTS; + if (info->flags & CTC_ASYNC_CTS_FLOW) + info->tty->hw_stopped = 1; + } + if (i & UART_MCR_DTR) + info->msr |= UART_MSR_DSR; + else + info->msr &= ~UART_MSR_DSR; + if (skb->len <= 0) { + kfree_skb(skb); + return; + } + /* Try to deliver directly via tty-flip-buf if queue is empty */ + if (skb_queue_empty(&info->rx_queue)) + if (ctc_tty_try_read(info, skb)) + return; + /* Direct deliver failed or queue wasn't empty. + * Queue up for later dequeueing via timer-irq. + */ + skb_queue_tail(&info->rx_queue, skb); + /* Schedule dequeuing */ + tasklet_schedule(&info->tasklet); +} + +static int +ctc_tty_tint(ctc_tty_info * info) +{ + struct sk_buff *skb = skb_dequeue(&info->tx_queue); + int stopped = (info->tty->hw_stopped || info->tty->stopped); + int wake = 1; + int rc; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (!info->netdev) { + if (skb) + kfree_skb(skb); + return 0; + } + if (info->flags & CTC_ASYNC_TX_LINESTAT) { + int skb_res = info->netdev->hard_header_len + + sizeof(info->mcr) + sizeof(__u32); + /* If we must update line status, + * create an empty dummy skb and insert it. + */ + if (skb) + skb_queue_head(&info->tx_queue, skb); + + skb = dev_alloc_skb(skb_res); + if (!skb) { + printk(KERN_WARNING + "ctc_tty: Out of memory in %s%d tint\n", + CTC_TTY_NAME, info->line); + return 1; + } + skb_reserve(skb, skb_res); + stopped = 0; + wake = 0; + } + if (!skb) + return 0; + if (stopped) { + skb_queue_head(&info->tx_queue, skb); + return 1; + } +#if 0 + if (skb->len > 0) + printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data)); + else + printk(KERN_DEBUG "tint: %d STAT\n", skb->len); +#endif + memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr)); + memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32)); + rc = info->netdev->hard_start_xmit(skb, info->netdev); + if (rc) { + skb_pull(skb, sizeof(info->mcr) + sizeof(__u32)); + if (skb->len > 0) + skb_queue_head(&info->tx_queue, skb); + else + kfree_skb(skb); + } else { + struct tty_struct *tty = info->tty; + + info->flags &= ~CTC_ASYNC_TX_LINESTAT; + if (tty) { + tty_wakeup(tty); + } + } + return (skb_queue_empty(&info->tx_queue) ? 0 : 1); +} + +/************************************************************ + * + * Modem-functions + * + * mostly "stolen" from original Linux-serial.c and friends. + * + ************************************************************/ + +static inline int +ctc_tty_paranoia_check(ctc_tty_info * info, char *name, const char *routine) +{ +#ifdef MODEM_PARANOIA_CHECK + if (!info) { + printk(KERN_WARNING "ctc_tty: null info_struct for %s in %s\n", + name, routine); + return 1; + } + if (info->magic != CTC_ASYNC_MAGIC) { + printk(KERN_WARNING "ctc_tty: bad magic for info struct %s in %s\n", + name, routine); + return 1; + } +#endif + return 0; +} + +static void +ctc_tty_inject(ctc_tty_info *info, char c) +{ + int skb_res; + struct sk_buff *skb; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ctc_tty_shuttingdown) + return; + skb_res = info->netdev->hard_header_len + sizeof(info->mcr) + + sizeof(__u32) + 1; + skb = dev_alloc_skb(skb_res); + if (!skb) { + printk(KERN_WARNING + "ctc_tty: Out of memory in %s%d tx_inject\n", + CTC_TTY_NAME, info->line); + return; + } + skb_reserve(skb, skb_res); + *(skb_put(skb, 1)) = c; + skb_queue_head(&info->tx_queue, skb); + tasklet_schedule(&info->tasklet); +} + +static void +ctc_tty_transmit_status(ctc_tty_info *info) +{ + DBF_TEXT(trace, 5, __FUNCTION__); + if (ctc_tty_shuttingdown) + return; + info->flags |= CTC_ASYNC_TX_LINESTAT; + tasklet_schedule(&info->tasklet); +} + +static void +ctc_tty_change_speed(ctc_tty_info * info) +{ + unsigned int cflag; + unsigned int quot; + int i; + + DBF_TEXT(trace, 3, __FUNCTION__); + if (!info->tty || !info->tty->termios) + return; + cflag = info->tty->termios->c_cflag; + + quot = i = cflag & CBAUD; + if (i & CBAUDEX) { + i &= ~CBAUDEX; + if (i < 1 || i > 2) + info->tty->termios->c_cflag &= ~CBAUDEX; + else + i += 15; + } + if (quot) { + info->mcr |= UART_MCR_DTR; + info->mcr |= UART_MCR_RTS; + ctc_tty_transmit_status(info); + } else { + info->mcr &= ~UART_MCR_DTR; + info->mcr &= ~UART_MCR_RTS; + ctc_tty_transmit_status(info); + return; + } + + /* CTS flow control flag and modem status interrupts */ + if (cflag & CRTSCTS) { + info->flags |= CTC_ASYNC_CTS_FLOW; + } else + info->flags &= ~CTC_ASYNC_CTS_FLOW; + if (cflag & CLOCAL) + info->flags &= ~CTC_ASYNC_CHECK_CD; + else { + info->flags |= CTC_ASYNC_CHECK_CD; + } +} + +static int +ctc_tty_startup(ctc_tty_info * info) +{ + DBF_TEXT(trace, 3, __FUNCTION__); + if (info->flags & CTC_ASYNC_INITIALIZED) + return 0; +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "starting up %s%d ...\n", CTC_TTY_NAME, info->line); +#endif + /* + * Now, initialize the UART + */ + info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2; + if (info->tty) + clear_bit(TTY_IO_ERROR, &info->tty->flags); + /* + * and set the speed of the serial port + */ + ctc_tty_change_speed(info); + + info->flags |= CTC_ASYNC_INITIALIZED; + if (!(info->flags & CTC_ASYNC_NETDEV_OPEN)) + info->netdev->open(info->netdev); + info->flags |= CTC_ASYNC_NETDEV_OPEN; + return 0; +} + +static void +ctc_tty_stopdev(unsigned long data) +{ + ctc_tty_info *info = (ctc_tty_info *)data; + + if ((!info) || (!info->netdev) || + (info->flags & CTC_ASYNC_INITIALIZED)) + return; + info->netdev->stop(info->netdev); + info->flags &= ~CTC_ASYNC_NETDEV_OPEN; +} + +/* + * This routine will shutdown a serial port; interrupts are disabled, and + * DTR is dropped if the hangup on close termio flag is on. + */ +static void +ctc_tty_shutdown(ctc_tty_info * info) +{ + DBF_TEXT(trace, 3, __FUNCTION__); + if (!(info->flags & CTC_ASYNC_INITIALIZED)) + return; +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "Shutting down %s%d ....\n", CTC_TTY_NAME, info->line); +#endif + info->msr &= ~UART_MSR_RI; + if (!info->tty || (info->tty->termios->c_cflag & HUPCL)) + info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS); + if (info->tty) + set_bit(TTY_IO_ERROR, &info->tty->flags); + mod_timer(&info->stoptimer, jiffies + (10 * HZ)); + skb_queue_purge(&info->tx_queue); + skb_queue_purge(&info->rx_queue); + info->flags &= ~CTC_ASYNC_INITIALIZED; +} + +/* ctc_tty_write() is the main send-routine. It is called from the upper + * levels within the kernel to perform sending data. Depending on the + * online-flag it either directs output to the at-command-interpreter or + * to the lower level. Additional tasks done here: + * - If online, check for escape-sequence (+++) + * - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes. + * - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed. + * - If dialing, abort dial. + */ +static int +ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count) +{ + int c; + int total = 0; + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + + DBF_TEXT(trace, 5, __FUNCTION__); + if (ctc_tty_shuttingdown) + goto ex; + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write")) + goto ex; + if (!tty) + goto ex; + if (!info->netdev) { + total = -ENODEV; + goto ex; + } + while (1) { + struct sk_buff *skb; + int skb_res; + + c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE; + if (c <= 0) + break; + + skb_res = info->netdev->hard_header_len + sizeof(info->mcr) + + + sizeof(__u32); + skb = dev_alloc_skb(skb_res + c); + if (!skb) { + printk(KERN_WARNING + "ctc_tty: Out of memory in %s%d write\n", + CTC_TTY_NAME, info->line); + break; + } + skb_reserve(skb, skb_res); + memcpy(skb_put(skb, c), buf, c); + skb_queue_tail(&info->tx_queue, skb); + buf += c; + total += c; + count -= c; + } + if (skb_queue_len(&info->tx_queue)) { + info->lsr &= ~UART_LSR_TEMT; + tasklet_schedule(&info->tasklet); + } +ex: + DBF_TEXT(trace, 6, __FUNCTION__); + return total; +} + +static int +ctc_tty_write_room(struct tty_struct *tty) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write_room")) + return 0; + return CTC_TTY_XMIT_SIZE; +} + +static int +ctc_tty_chars_in_buffer(struct tty_struct *tty) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_chars_in_buffer")) + return 0; + return 0; +} + +static void +ctc_tty_flush_buffer(struct tty_struct *tty) +{ + ctc_tty_info *info; + unsigned long flags; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (!tty) + goto ex; + spin_lock_irqsave(&ctc_tty_lock, flags); + info = (ctc_tty_info *) tty->driver_data; + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) { + spin_unlock_irqrestore(&ctc_tty_lock, flags); + goto ex; + } + skb_queue_purge(&info->tx_queue); + info->lsr |= UART_LSR_TEMT; + spin_unlock_irqrestore(&ctc_tty_lock, flags); + wake_up_interruptible(&tty->write_wait); + tty_wakeup(tty); +ex: + DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__); + return; +} + +static void +ctc_tty_flush_chars(struct tty_struct *tty) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ctc_tty_shuttingdown) + return; + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars")) + return; + if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue))) + return; + tasklet_schedule(&info->tasklet); +} + +/* + * ------------------------------------------------------------ + * ctc_tty_throttle() + * + * This routine is called by the upper-layer tty layer to signal that + * incoming characters should be throttled. + * ------------------------------------------------------------ + */ +static void +ctc_tty_throttle(struct tty_struct *tty) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle")) + return; + info->mcr &= ~UART_MCR_RTS; + if (I_IXOFF(tty)) + ctc_tty_inject(info, STOP_CHAR(tty)); + ctc_tty_transmit_status(info); +} + +static void +ctc_tty_unthrottle(struct tty_struct *tty) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle")) + return; + info->mcr |= UART_MCR_RTS; + if (I_IXOFF(tty)) + ctc_tty_inject(info, START_CHAR(tty)); + ctc_tty_transmit_status(info); +} + +/* + * ------------------------------------------------------------ + * ctc_tty_ioctl() and friends + * ------------------------------------------------------------ + */ + +/* + * ctc_tty_get_lsr_info - get line status register info + * + * Purpose: Let user call ioctl() to get info when the UART physically + * is emptied. On bus types like RS485, the transmitter must + * release the bus after transmitting. This must be done when + * the transmit shift register is empty, not be done when the + * transmit holding register is empty. This functionality + * allows RS485 driver to be written in user space. + */ +static int +ctc_tty_get_lsr_info(ctc_tty_info * info, uint __user *value) +{ + u_char status; + uint result; + ulong flags; + + DBF_TEXT(trace, 4, __FUNCTION__); + spin_lock_irqsave(&ctc_tty_lock, flags); + status = info->lsr; + spin_unlock_irqrestore(&ctc_tty_lock, flags); + result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); + put_user(result, value); + return 0; +} + + +static int ctc_tty_tiocmget(struct tty_struct *tty, struct file *file) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + u_char control, + status; + uint result; + ulong flags; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl")) + return -ENODEV; + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + + control = info->mcr; + spin_lock_irqsave(&ctc_tty_lock, flags); + status = info->msr; + spin_unlock_irqrestore(&ctc_tty_lock, flags); + result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) + | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) + | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) + | ((status & UART_MSR_RI) ? TIOCM_RNG : 0) + | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) + | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0); + return result; +} + +static int +ctc_tty_tiocmset(struct tty_struct *tty, struct file *file, + unsigned int set, unsigned int clear) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl")) + return -ENODEV; + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + + if (set & TIOCM_RTS) + info->mcr |= UART_MCR_RTS; + if (set & TIOCM_DTR) + info->mcr |= UART_MCR_DTR; + + if (clear & TIOCM_RTS) + info->mcr &= ~UART_MCR_RTS; + if (clear & TIOCM_DTR) + info->mcr &= ~UART_MCR_DTR; + + if ((set | clear) & (TIOCM_RTS|TIOCM_DTR)) + ctc_tty_transmit_status(info); + return 0; +} + +static int +ctc_tty_ioctl(struct tty_struct *tty, struct file *file, + uint cmd, ulong arg) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + int error; + int retval; + + DBF_TEXT(trace, 4, __FUNCTION__); + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl")) + return -ENODEV; + if (tty->flags & (1 << TTY_IO_ERROR)) + return -EIO; + switch (cmd) { + case TCSBRK: /* SVID version: non-zero arg --> no break */ +#ifdef CTC_DEBUG_MODEM_IOCTL + printk(KERN_DEBUG "%s%d ioctl TCSBRK\n", CTC_TTY_NAME, info->line); +#endif + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + return 0; + case TCSBRKP: /* support for POSIX tcsendbreak() */ +#ifdef CTC_DEBUG_MODEM_IOCTL + printk(KERN_DEBUG "%s%d ioctl TCSBRKP\n", CTC_TTY_NAME, info->line); +#endif + retval = tty_check_change(tty); + if (retval) + return retval; + tty_wait_until_sent(tty, 0); + return 0; + case TIOCGSOFTCAR: +#ifdef CTC_DEBUG_MODEM_IOCTL + printk(KERN_DEBUG "%s%d ioctl TIOCGSOFTCAR\n", CTC_TTY_NAME, + info->line); +#endif + error = put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg); + return error; + case TIOCSSOFTCAR: +#ifdef CTC_DEBUG_MODEM_IOCTL + printk(KERN_DEBUG "%s%d ioctl TIOCSSOFTCAR\n", CTC_TTY_NAME, + info->line); +#endif + error = get_user(arg, (ulong __user *) arg); + if (error) + return error; + tty->termios->c_cflag = + ((tty->termios->c_cflag & ~CLOCAL) | + (arg ? CLOCAL : 0)); + return 0; + case TIOCSERGETLSR: /* Get line status register */ +#ifdef CTC_DEBUG_MODEM_IOCTL + printk(KERN_DEBUG "%s%d ioctl TIOCSERGETLSR\n", CTC_TTY_NAME, + info->line); +#endif + if (access_ok(VERIFY_WRITE, (void __user *) arg, sizeof(uint))) + return ctc_tty_get_lsr_info(info, (uint __user *) arg); + else + return -EFAULT; + default: +#ifdef CTC_DEBUG_MODEM_IOCTL + printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on %s%d\n", cmd, + CTC_TTY_NAME, info->line); +#endif + return -ENOIOCTLCMD; + } + return 0; +} + +static void +ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + unsigned int cflag = tty->termios->c_cflag; + + DBF_TEXT(trace, 4, __FUNCTION__); + ctc_tty_change_speed(info); + + /* Handle transition to B0 */ + if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) { + info->mcr &= ~(UART_MCR_DTR|UART_MCR_RTS); + ctc_tty_transmit_status(info); + } + + /* Handle transition from B0 to other */ + if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) { + info->mcr |= UART_MCR_DTR; + if (!(tty->termios->c_cflag & CRTSCTS) || + !test_bit(TTY_THROTTLED, &tty->flags)) { + info->mcr |= UART_MCR_RTS; + } + ctc_tty_transmit_status(info); + } + + /* Handle turning off CRTSCTS */ + if ((old_termios->c_cflag & CRTSCTS) && + !(tty->termios->c_cflag & CRTSCTS)) + tty->hw_stopped = 0; +} + +/* + * ------------------------------------------------------------ + * ctc_tty_open() and friends + * ------------------------------------------------------------ + */ +static int +ctc_tty_block_til_ready(struct tty_struct *tty, struct file *filp, ctc_tty_info *info) +{ + DECLARE_WAITQUEUE(wait, NULL); + int do_clocal = 0; + unsigned long flags; + int retval; + + DBF_TEXT(trace, 4, __FUNCTION__); + /* + * If the device is in the middle of being closed, then block + * until it's done, and then try again. + */ + if (tty_hung_up_p(filp) || + (info->flags & CTC_ASYNC_CLOSING)) { + if (info->flags & CTC_ASYNC_CLOSING) + wait_event(info->close_wait, + !(info->flags & CTC_ASYNC_CLOSING)); +#ifdef MODEM_DO_RESTART + if (info->flags & CTC_ASYNC_HUP_NOTIFY) + return -EAGAIN; + else + return -ERESTARTSYS; +#else + return -EAGAIN; +#endif + } + /* + * If non-blocking mode is set, then make the check up front + * and then exit. + */ + if ((filp->f_flags & O_NONBLOCK) || + (tty->flags & (1 << TTY_IO_ERROR))) { + info->flags |= CTC_ASYNC_NORMAL_ACTIVE; + return 0; + } + if (tty->termios->c_cflag & CLOCAL) + do_clocal = 1; + /* + * Block waiting for the carrier detect and the line to become + * free (i.e., not in use by the callout). While we are in + * this loop, info->count is dropped by one, so that + * ctc_tty_close() knows when to free things. We restore it upon + * exit, either normal or abnormal. + */ + retval = 0; + add_wait_queue(&info->open_wait, &wait); +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_block_til_ready before block: %s%d, count = %d\n", + CTC_TTY_NAME, info->line, info->count); +#endif + spin_lock_irqsave(&ctc_tty_lock, flags); + if (!(tty_hung_up_p(filp))) + info->count--; + spin_unlock_irqrestore(&ctc_tty_lock, flags); + info->blocked_open++; + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + if (tty_hung_up_p(filp) || + !(info->flags & CTC_ASYNC_INITIALIZED)) { +#ifdef MODEM_DO_RESTART + if (info->flags & CTC_ASYNC_HUP_NOTIFY) + retval = -EAGAIN; + else + retval = -ERESTARTSYS; +#else + retval = -EAGAIN; +#endif + break; + } + if (!(info->flags & CTC_ASYNC_CLOSING) && + (do_clocal || (info->msr & UART_MSR_DCD))) { + break; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_block_til_ready blocking: %s%d, count = %d\n", + CTC_TTY_NAME, info->line, info->count); +#endif + schedule(); + } + current->state = TASK_RUNNING; + remove_wait_queue(&info->open_wait, &wait); + if (!tty_hung_up_p(filp)) + info->count++; + info->blocked_open--; +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_block_til_ready after blocking: %s%d, count = %d\n", + CTC_TTY_NAME, info->line, info->count); +#endif + if (retval) + return retval; + info->flags |= CTC_ASYNC_NORMAL_ACTIVE; + return 0; +} + +/* + * This routine is called whenever a serial port is opened. It + * enables interrupts for a serial port, linking in its async structure into + * the IRQ chain. It also performs the serial-specific + * initialization for the tty structure. + */ +static int +ctc_tty_open(struct tty_struct *tty, struct file *filp) +{ + ctc_tty_info *info; + unsigned long saveflags; + int retval, + line; + + DBF_TEXT(trace, 3, __FUNCTION__); + line = tty->index; + if (line < 0 || line > CTC_TTY_MAX_DEVICES) + return -ENODEV; + info = &driver->info[line]; + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_open")) + return -ENODEV; + if (!info->netdev) + return -ENODEV; +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_open %s, count = %d\n", tty->name, + info->count); +#endif + spin_lock_irqsave(&ctc_tty_lock, saveflags); + info->count++; + tty->driver_data = info; + info->tty = tty; + spin_unlock_irqrestore(&ctc_tty_lock, saveflags); + /* + * Start up serial port + */ + retval = ctc_tty_startup(info); + if (retval) { +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_open return after startup\n"); +#endif + return retval; + } + retval = ctc_tty_block_til_ready(tty, filp, info); + if (retval) { +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_open return after ctc_tty_block_til_ready \n"); +#endif + return retval; + } +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_open %s successful...\n", tty->name); +#endif + return 0; +} + +static void +ctc_tty_close(struct tty_struct *tty, struct file *filp) +{ + ctc_tty_info *info = (ctc_tty_info *) tty->driver_data; + ulong flags; + ulong timeout; + DBF_TEXT(trace, 3, __FUNCTION__); + if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close")) + return; + spin_lock_irqsave(&ctc_tty_lock, flags); + if (tty_hung_up_p(filp)) { + spin_unlock_irqrestore(&ctc_tty_lock, flags); +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_close return after tty_hung_up_p\n"); +#endif + return; + } + if ((tty->count == 1) && (info->count != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty + * structure will be freed. Info->count should always + * be one in these conditions. If it's greater than + * one, we've got real problems, since it means the + * serial port won't be shutdown. + */ + printk(KERN_ERR "ctc_tty_close: bad port count; tty->count is 1, " + "info->count is %d\n", info->count); + info->count = 1; + } + if (--info->count < 0) { + printk(KERN_ERR "ctc_tty_close: bad port count for %s%d: %d\n", + CTC_TTY_NAME, info->line, info->count); + info->count = 0; + } + if (info->count) { + local_irq_restore(flags); +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_close after info->count != 0\n"); +#endif + return; + } + info->flags |= CTC_ASYNC_CLOSING; + tty->closing = 1; + /* + * At this point we stop accepting input. To do this, we + * disable the receive line status interrupts, and tell the + * interrupt driver to stop checking the data ready bit in the + * line status register. + */ + if (info->flags & CTC_ASYNC_INITIALIZED) { + tty_wait_until_sent(tty, 30*HZ); /* 30 seconds timeout */ + /* + * Before we drop DTR, make sure the UART transmitter + * has completely drained; this is especially + * important if there is a transmit FIFO! + */ + timeout = jiffies + HZ; + while (!(info->lsr & UART_LSR_TEMT)) { + spin_unlock_irqrestore(&ctc_tty_lock, flags); + msleep(500); + spin_lock_irqsave(&ctc_tty_lock, flags); + if (time_after(jiffies,timeout)) + break; + } + } + ctc_tty_shutdown(info); + if (tty->driver->flush_buffer) { + skb_queue_purge(&info->tx_queue); + info->lsr |= UART_LSR_TEMT; + } + tty_ldisc_flush(tty); + info->tty = 0; + tty->closing = 0; + if (info->blocked_open) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ/2); + wake_up_interruptible(&info->open_wait); + } + info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING); + wake_up_interruptible(&info->close_wait); + spin_unlock_irqrestore(&ctc_tty_lock, flags); +#ifdef CTC_DEBUG_MODEM_OPEN + printk(KERN_DEBUG "ctc_tty_close normal exit\n"); +#endif +} + +/* + * ctc_tty_hangup() --- called by tty_hangup() when a hangup is signaled. + */ +static void +ctc_tty_hangup(struct tty_struct *tty) +{ + ctc_tty_info *info = (ctc_tty_info *)tty->driver_data; + unsigned long saveflags; + DBF_TEXT(trace, 3, __FUNCTION__); + if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup")) + return; + ctc_tty_shutdown(info); + info->count = 0; + info->flags &= ~CTC_ASYNC_NORMAL_ACTIVE; + spin_lock_irqsave(&ctc_tty_lock, saveflags); + info->tty = 0; + spin_unlock_irqrestore(&ctc_tty_lock, saveflags); + wake_up_interruptible(&info->open_wait); +} + + +/* + * For all online tty's, try sending data to + * the lower levels. + */ +static void +ctc_tty_task(unsigned long arg) +{ + ctc_tty_info *info = (void *)arg; + unsigned long saveflags; + int again; + + DBF_TEXT(trace, 3, __FUNCTION__); + spin_lock_irqsave(&ctc_tty_lock, saveflags); + if ((!ctc_tty_shuttingdown) && info) { + again = ctc_tty_tint(info); + if (!again) + info->lsr |= UART_LSR_TEMT; + again |= ctc_tty_readmodem(info); + if (again) { + tasklet_schedule(&info->tasklet); + } + } + spin_unlock_irqrestore(&ctc_tty_lock, saveflags); +} + +static struct tty_operations ctc_ops = { + .open = ctc_tty_open, + .close = ctc_tty_close, + .write = ctc_tty_write, + .flush_chars = ctc_tty_flush_chars, + .write_room = ctc_tty_write_room, + .chars_in_buffer = ctc_tty_chars_in_buffer, + .flush_buffer = ctc_tty_flush_buffer, + .ioctl = ctc_tty_ioctl, + .throttle = ctc_tty_throttle, + .unthrottle = ctc_tty_unthrottle, + .set_termios = ctc_tty_set_termios, + .hangup = ctc_tty_hangup, + .tiocmget = ctc_tty_tiocmget, + .tiocmset = ctc_tty_tiocmset, +}; + +int +ctc_tty_init(void) +{ + int i; + ctc_tty_info *info; + struct tty_driver *device; + + DBF_TEXT(trace, 2, __FUNCTION__); + driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL); + if (driver == NULL) { + printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n"); + return -ENOMEM; + } + memset(driver, 0, sizeof(ctc_tty_driver)); + device = alloc_tty_driver(CTC_TTY_MAX_DEVICES); + if (!device) { + kfree(driver); + printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n"); + return -ENOMEM; + } + + device->devfs_name = "ctc/" CTC_TTY_NAME; + device->name = CTC_TTY_NAME; + device->major = CTC_TTY_MAJOR; + device->minor_start = 0; + device->type = TTY_DRIVER_TYPE_SERIAL; + device->subtype = SERIAL_TYPE_NORMAL; + device->init_termios = tty_std_termios; + device->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; + device->flags = TTY_DRIVER_REAL_RAW; + device->driver_name = "ctc_tty", + tty_set_operations(device, &ctc_ops); + if (tty_register_driver(device)) { + printk(KERN_WARNING "ctc_tty: Couldn't register serial-device\n"); + put_tty_driver(device); + kfree(driver); + return -1; + } + driver->ctc_tty_device = device; + for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) { + info = &driver->info[i]; + init_MUTEX(&info->write_sem); + tasklet_init(&info->tasklet, ctc_tty_task, + (unsigned long) info); + info->magic = CTC_ASYNC_MAGIC; + info->line = i; + info->tty = 0; + info->count = 0; + info->blocked_open = 0; + init_waitqueue_head(&info->open_wait); + init_waitqueue_head(&info->close_wait); + skb_queue_head_init(&info->tx_queue); + skb_queue_head_init(&info->rx_queue); + init_timer(&info->stoptimer); + info->stoptimer.function = ctc_tty_stopdev; + info->stoptimer.data = (unsigned long)info; + info->mcr = UART_MCR_RTS; + } + return 0; +} + +int +ctc_tty_register_netdev(struct net_device *dev) { + int ttynum; + char *err; + char *p; + + DBF_TEXT(trace, 2, __FUNCTION__); + if ((!dev) || (!dev->name)) { + printk(KERN_WARNING + "ctc_tty_register_netdev called " + "with NULL dev or NULL dev-name\n"); + return -1; + } + + /* + * If the name is a format string the caller wants us to + * do a name allocation : format string must end with %d + */ + if (strchr(dev->name, '%')) + { + int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this + if (err < 0) { + printk(KERN_DEBUG "dev_alloc returned error %d\n", err); + return err; + } + + } + + for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++); + ttynum = simple_strtoul(p, &err, 0); + if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) || + (err && *err)) { + printk(KERN_WARNING + "ctc_tty_register_netdev called " + "with number in name '%s'\n", dev->name); + return -1; + } + if (driver->info[ttynum].netdev) { + printk(KERN_WARNING + "ctc_tty_register_netdev called " + "for already registered device '%s'\n", + dev->name); + return -1; + } + driver->info[ttynum].netdev = dev; + return 0; +} + +void +ctc_tty_unregister_netdev(struct net_device *dev) { + int i; + unsigned long saveflags; + ctc_tty_info *info = NULL; + + DBF_TEXT(trace, 2, __FUNCTION__); + spin_lock_irqsave(&ctc_tty_lock, saveflags); + for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) + if (driver->info[i].netdev == dev) { + info = &driver->info[i]; + break; + } + if (info) { + info->netdev = NULL; + skb_queue_purge(&info->tx_queue); + skb_queue_purge(&info->rx_queue); + } + spin_unlock_irqrestore(&ctc_tty_lock, saveflags); +} + +void +ctc_tty_cleanup(void) { + unsigned long saveflags; + + DBF_TEXT(trace, 2, __FUNCTION__); + spin_lock_irqsave(&ctc_tty_lock, saveflags); + ctc_tty_shuttingdown = 1; + spin_unlock_irqrestore(&ctc_tty_lock, saveflags); + tty_unregister_driver(driver->ctc_tty_device); + put_tty_driver(driver->ctc_tty_device); + kfree(driver); + driver = NULL; +} diff --git a/drivers/s390/net/ctctty.h b/drivers/s390/net/ctctty.h new file mode 100644 index 000000000000..84b2f8f23ab3 --- /dev/null +++ b/drivers/s390/net/ctctty.h @@ -0,0 +1,37 @@ +/* + * $Id: ctctty.h,v 1.4 2003/09/18 08:01:10 mschwide Exp $ + * + * CTC / ESCON network driver, tty interface. + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _CTCTTY_H_ +#define _CTCTTY_H_ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +extern int ctc_tty_register_netdev(struct net_device *); +extern void ctc_tty_unregister_netdev(struct net_device *); +extern void ctc_tty_netif_rx(struct sk_buff *); +extern int ctc_tty_init(void); +extern void ctc_tty_cleanup(void); +extern void ctc_tty_setcarrier(struct net_device *, int); + +#endif diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c new file mode 100644 index 000000000000..1b0a9f16024c --- /dev/null +++ b/drivers/s390/net/cu3088.c @@ -0,0 +1,166 @@ +/* + * $Id: cu3088.c,v 1.34 2004/06/15 13:16:27 pavlic Exp $ + * + * CTC / LCS ccw_device driver + * + * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Arnd Bergmann <arndb@de.ibm.com> + * Cornelia Huck <cohuck@de.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/err.h> + +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> + +#include "cu3088.h" + +const char *cu3088_type[] = { + "not a channel", + "CTC/A", + "ESCON channel", + "FICON channel", + "P390 LCS card", + "OSA LCS card", + "unknown channel type", + "unsupported channel type", +}; + +/* static definitions */ + +static struct ccw_device_id cu3088_ids[] = { + { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel }, + { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon }, + { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon }, + { CCW_DEVICE(0x3088, 0x01), .driver_info = channel_type_p390 }, + { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 }, + { /* end of list */ } +}; + +static struct ccw_driver cu3088_driver; + +struct device *cu3088_root_dev; + +static ssize_t +group_write(struct device_driver *drv, const char *buf, size_t count) +{ + const char *start, *end; + char bus_ids[2][BUS_ID_SIZE], *argv[2]; + int i; + int ret; + struct ccwgroup_driver *cdrv; + + cdrv = to_ccwgroupdrv(drv); + if (!cdrv) + return -EINVAL; + start = buf; + for (i=0; i<2; i++) { + static const char delim[] = {',', '\n'}; + int len; + + if (!(end = strchr(start, delim[i]))) + return count; + len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1); + strlcpy (bus_ids[i], start, len); + argv[i] = bus_ids[i]; + start = end + 1; + } + + ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id, + &cu3088_driver, 2, argv); + + return (ret == 0) ? count : ret; +} + +static DRIVER_ATTR(group, 0200, NULL, group_write); + +/* Register-unregister for ctc&lcs */ +int +register_cu3088_discipline(struct ccwgroup_driver *dcp) +{ + int rc; + + if (!dcp) + return -EINVAL; + + /* Register discipline.*/ + rc = ccwgroup_driver_register(dcp); + if (rc) + return rc; + + rc = driver_create_file(&dcp->driver, &driver_attr_group); + if (rc) + ccwgroup_driver_unregister(dcp); + + return rc; + +} + +void +unregister_cu3088_discipline(struct ccwgroup_driver *dcp) +{ + if (!dcp) + return; + + driver_remove_file(&dcp->driver, &driver_attr_group); + ccwgroup_driver_unregister(dcp); +} + +static struct ccw_driver cu3088_driver = { + .owner = THIS_MODULE, + .ids = cu3088_ids, + .name = "cu3088", + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, +}; + +/* module setup */ +static int __init +cu3088_init (void) +{ + int rc; + + cu3088_root_dev = s390_root_dev_register("cu3088"); + if (IS_ERR(cu3088_root_dev)) + return PTR_ERR(cu3088_root_dev); + rc = ccw_driver_register(&cu3088_driver); + if (rc) + s390_root_dev_unregister(cu3088_root_dev); + + return rc; +} + +static void __exit +cu3088_exit (void) +{ + ccw_driver_unregister(&cu3088_driver); + s390_root_dev_unregister(cu3088_root_dev); +} + +MODULE_DEVICE_TABLE(ccw,cu3088_ids); +MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); +MODULE_LICENSE("GPL"); + +module_init(cu3088_init); +module_exit(cu3088_exit); + +EXPORT_SYMBOL_GPL(cu3088_type); +EXPORT_SYMBOL_GPL(register_cu3088_discipline); +EXPORT_SYMBOL_GPL(unregister_cu3088_discipline); diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h new file mode 100644 index 000000000000..0ec49a8b3adc --- /dev/null +++ b/drivers/s390/net/cu3088.h @@ -0,0 +1,41 @@ +#ifndef _CU3088_H +#define _CU3088_H + +/** + * Enum for classifying detected devices. + */ +enum channel_types { + /* Device is not a channel */ + channel_type_none, + + /* Device is a CTC/A */ + channel_type_parallel, + + /* Device is a ESCON channel */ + channel_type_escon, + + /* Device is a FICON channel */ + channel_type_ficon, + + /* Device is a P390 LCS card */ + channel_type_p390, + + /* Device is a OSA2 card */ + channel_type_osa2, + + /* Device is a channel, but we don't know + * anything about it */ + channel_type_unknown, + + /* Device is an unsupported model */ + channel_type_unsupported, + + /* number of type entries */ + num_channel_types +}; + +extern const char *cu3088_type[num_channel_types]; +extern int register_cu3088_discipline(struct ccwgroup_driver *); +extern void unregister_cu3088_discipline(struct ccwgroup_driver *); + +#endif diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c new file mode 100644 index 000000000000..fa09440d82e5 --- /dev/null +++ b/drivers/s390/net/fsm.c @@ -0,0 +1,220 @@ +/** + * $Id: fsm.c,v 1.6 2003/10/15 11:37:29 mschwide Exp $ + * + * A generic FSM based on fsm used in isdn4linux + * + */ + +#include "fsm.h" +#include <linux/config.h> +#include <linux/module.h> +#include <linux/timer.h> + +MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION("Finite state machine helper functions"); +MODULE_LICENSE("GPL"); + +fsm_instance * +init_fsm(char *name, const char **state_names, const char **event_names, int nr_states, + int nr_events, const fsm_node *tmpl, int tmpl_len, int order) +{ + int i; + fsm_instance *this; + fsm_function_t *m; + fsm *f; + + this = (fsm_instance *)kmalloc(sizeof(fsm_instance), order); + if (this == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc instance\n", name); + return NULL; + } + memset(this, 0, sizeof(fsm_instance)); + strlcpy(this->name, name, sizeof(this->name)); + + f = (fsm *)kmalloc(sizeof(fsm), order); + if (f == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc fsm\n", name); + kfree_fsm(this); + return NULL; + } + memset(f, 0, sizeof(fsm)); + f->nr_events = nr_events; + f->nr_states = nr_states; + f->event_names = event_names; + f->state_names = state_names; + this->f = f; + + m = (fsm_function_t *)kmalloc( + sizeof(fsm_function_t) * nr_states * nr_events, order); + if (m == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name); + kfree_fsm(this); + return NULL; + } + memset(m, 0, sizeof(fsm_function_t) * f->nr_states * f->nr_events); + f->jumpmatrix = m; + + for (i = 0; i < tmpl_len; i++) { + if ((tmpl[i].cond_state >= nr_states) || + (tmpl[i].cond_event >= nr_events) ) { + printk(KERN_ERR + "fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n", + name, i, (long)tmpl[i].cond_state, (long)f->nr_states, + (long)tmpl[i].cond_event, (long)f->nr_events); + kfree_fsm(this); + return NULL; + } else + m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] = + tmpl[i].function; + } + return this; +} + +void +kfree_fsm(fsm_instance *this) +{ + if (this) { + if (this->f) { + if (this->f->jumpmatrix) + kfree(this->f->jumpmatrix); + kfree(this->f); + } + kfree(this); + } else + printk(KERN_WARNING + "fsm: kfree_fsm called with NULL argument\n"); +} + +#if FSM_DEBUG_HISTORY +void +fsm_print_history(fsm_instance *fi) +{ + int idx = 0; + int i; + + if (fi->history_size >= FSM_HISTORY_SIZE) + idx = fi->history_index; + + printk(KERN_DEBUG "fsm(%s): History:\n", fi->name); + for (i = 0; i < fi->history_size; i++) { + int e = fi->history[idx].event; + int s = fi->history[idx++].state; + idx %= FSM_HISTORY_SIZE; + if (e == -1) + printk(KERN_DEBUG " S=%s\n", + fi->f->state_names[s]); + else + printk(KERN_DEBUG " S=%s E=%s\n", + fi->f->state_names[s], + fi->f->event_names[e]); + } + fi->history_size = fi->history_index = 0; +} + +void +fsm_record_history(fsm_instance *fi, int state, int event) +{ + fi->history[fi->history_index].state = state; + fi->history[fi->history_index++].event = event; + fi->history_index %= FSM_HISTORY_SIZE; + if (fi->history_size < FSM_HISTORY_SIZE) + fi->history_size++; +} +#endif + +const char * +fsm_getstate_str(fsm_instance *fi) +{ + int st = atomic_read(&fi->state); + if (st >= fi->f->nr_states) + return "Invalid"; + return fi->f->state_names[st]; +} + +static void +fsm_expire_timer(fsm_timer *this) +{ +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Timer %p expired\n", + this->fi->name, this); +#endif + fsm_event(this->fi, this->expire_event, this->event_arg); +} + +void +fsm_settimer(fsm_instance *fi, fsm_timer *this) +{ + this->fi = fi; + this->tl.function = (void *)fsm_expire_timer; + this->tl.data = (long)this; +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name, + this); +#endif + init_timer(&this->tl); +} + +void +fsm_deltimer(fsm_timer *this) +{ +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name, + this); +#endif + del_timer(&this->tl); +} + +int +fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg) +{ + +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n", + this->fi->name, this, millisec); +#endif + + init_timer(&this->tl); + this->tl.function = (void *)fsm_expire_timer; + this->tl.data = (long)this; + this->expire_event = event; + this->event_arg = arg; + this->tl.expires = jiffies + (millisec * HZ) / 1000; + add_timer(&this->tl); + return 0; +} + +/* FIXME: this function is never used, why */ +void +fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg) +{ + +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n", + this->fi->name, this, millisec); +#endif + + del_timer(&this->tl); + init_timer(&this->tl); + this->tl.function = (void *)fsm_expire_timer; + this->tl.data = (long)this; + this->expire_event = event; + this->event_arg = arg; + this->tl.expires = jiffies + (millisec * HZ) / 1000; + add_timer(&this->tl); +} + +EXPORT_SYMBOL(init_fsm); +EXPORT_SYMBOL(kfree_fsm); +EXPORT_SYMBOL(fsm_settimer); +EXPORT_SYMBOL(fsm_deltimer); +EXPORT_SYMBOL(fsm_addtimer); +EXPORT_SYMBOL(fsm_modtimer); +EXPORT_SYMBOL(fsm_getstate_str); + +#if FSM_DEBUG_HISTORY +EXPORT_SYMBOL(fsm_print_history); +EXPORT_SYMBOL(fsm_record_history); +#endif diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h new file mode 100644 index 000000000000..f9a011001eb6 --- /dev/null +++ b/drivers/s390/net/fsm.h @@ -0,0 +1,265 @@ +/* $Id: fsm.h,v 1.1.1.1 2002/03/13 19:33:09 mschwide Exp $ + */ +#ifndef _FSM_H_ +#define _FSM_H_ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/time.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <asm/atomic.h> + +/** + * Define this to get debugging messages. + */ +#define FSM_DEBUG 0 + +/** + * Define this to get debugging massages for + * timer handling. + */ +#define FSM_TIMER_DEBUG 0 + +/** + * Define these to record a history of + * Events/Statechanges and print it if a + * action_function is not found. + */ +#define FSM_DEBUG_HISTORY 0 +#define FSM_HISTORY_SIZE 40 + +struct fsm_instance_t; + +/** + * Definition of an action function, called by a FSM + */ +typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *); + +/** + * Internal jump table for a FSM + */ +typedef struct { + fsm_function_t *jumpmatrix; + int nr_events; + int nr_states; + const char **event_names; + const char **state_names; +} fsm; + +#if FSM_DEBUG_HISTORY +/** + * Element of State/Event history used for debugging. + */ +typedef struct { + int state; + int event; +} fsm_history; +#endif + +/** + * Representation of a FSM + */ +typedef struct fsm_instance_t { + fsm *f; + atomic_t state; + char name[16]; + void *userdata; + int userint; +#if FSM_DEBUG_HISTORY + int history_index; + int history_size; + fsm_history history[FSM_HISTORY_SIZE]; +#endif +} fsm_instance; + +/** + * Description of a state-event combination + */ +typedef struct { + int cond_state; + int cond_event; + fsm_function_t function; +} fsm_node; + +/** + * Description of a FSM Timer. + */ +typedef struct { + fsm_instance *fi; + struct timer_list tl; + int expire_event; + void *event_arg; +} fsm_timer; + +/** + * Creates an FSM + * + * @param name Name of this instance for logging purposes. + * @param state_names An array of names for all states for logging purposes. + * @param event_names An array of names for all events for logging purposes. + * @param nr_states Number of states for this instance. + * @param nr_events Number of events for this instance. + * @param tmpl An array of fsm_nodes, describing this FSM. + * @param tmpl_len Length of the describing array. + * @param order Parameter for allocation of the FSM data structs. + */ +extern fsm_instance * +init_fsm(char *name, const char **state_names, + const char **event_names, + int nr_states, int nr_events, const fsm_node *tmpl, + int tmpl_len, int order); + +/** + * Releases an FSM + * + * @param fi Pointer to an FSM, previously created with init_fsm. + */ +extern void kfree_fsm(fsm_instance *fi); + +#if FSM_DEBUG_HISTORY +extern void +fsm_print_history(fsm_instance *fi); + +extern void +fsm_record_history(fsm_instance *fi, int state, int event); +#endif + +/** + * Emits an event to a FSM. + * If an action function is defined for the current state/event combination, + * this function is called. + * + * @param fi Pointer to FSM which should receive the event. + * @param event The event do be delivered. + * @param arg A generic argument, handed to the action function. + * + * @return 0 on success, + * 1 if current state or event is out of range + * !0 if state and event in range, but no action defined. + */ +extern __inline__ int +fsm_event(fsm_instance *fi, int event, void *arg) +{ + fsm_function_t r; + int state = atomic_read(&fi->state); + + if ((state >= fi->f->nr_states) || + (event >= fi->f->nr_events) ) { + printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n", + fi->name, (long)state,(long)fi->f->nr_states, event, + (long)fi->f->nr_events); +#if FSM_DEBUG_HISTORY + fsm_print_history(fi); +#endif + return 1; + } + r = fi->f->jumpmatrix[fi->f->nr_states * event + state]; + if (r) { +#if FSM_DEBUG + printk(KERN_DEBUG "fsm(%s): state %s event %s\n", + fi->name, fi->f->state_names[state], + fi->f->event_names[event]); +#endif +#if FSM_DEBUG_HISTORY + fsm_record_history(fi, state, event); +#endif + r(fi, event, arg); + return 0; + } else { +#if FSM_DEBUG || FSM_DEBUG_HISTORY + printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n", + fi->name, fi->f->event_names[event], + fi->f->state_names[state]); +#endif +#if FSM_DEBUG_HISTORY + fsm_print_history(fi); +#endif + return !0; + } +} + +/** + * Modifies the state of an FSM. + * This does <em>not</em> trigger an event or calls an action function. + * + * @param fi Pointer to FSM + * @param state The new state for this FSM. + */ +extern __inline__ void +fsm_newstate(fsm_instance *fi, int newstate) +{ + atomic_set(&fi->state,newstate); +#if FSM_DEBUG_HISTORY + fsm_record_history(fi, newstate, -1); +#endif +#if FSM_DEBUG + printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, + fi->f->state_names[newstate]); +#endif +} + +/** + * Retrieves the state of an FSM + * + * @param fi Pointer to FSM + * + * @return The current state of the FSM. + */ +extern __inline__ int +fsm_getstate(fsm_instance *fi) +{ + return atomic_read(&fi->state); +} + +/** + * Retrieves the name of the state of an FSM + * + * @param fi Pointer to FSM + * + * @return The current state of the FSM in a human readable form. + */ +extern const char *fsm_getstate_str(fsm_instance *fi); + +/** + * Initializes a timer for an FSM. + * This prepares an fsm_timer for usage with fsm_addtimer. + * + * @param fi Pointer to FSM + * @param timer The timer to be initialized. + */ +extern void fsm_settimer(fsm_instance *fi, fsm_timer *); + +/** + * Clears a pending timer of an FSM instance. + * + * @param timer The timer to clear. + */ +extern void fsm_deltimer(fsm_timer *timer); + +/** + * Adds and starts a timer to an FSM instance. + * + * @param timer The timer to be added. The field fi of that timer + * must have been set to point to the instance. + * @param millisec Duration, after which the timer should expire. + * @param event Event, to trigger if timer expires. + * @param arg Generic argument, provided to expiry function. + * + * @return 0 on success, -1 if timer is already active. + */ +extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg); + +/** + * Modifies a timer of an FSM. + * + * @param timer The timer to modify. + * @param millisec Duration, after which the timer should expire. + * @param event Event, to trigger if timer expires. + * @param arg Generic argument, provided to expiry function. + */ +extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg); + +#endif /* _FSM_H_ */ diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c new file mode 100644 index 000000000000..1ac6563ee3e0 --- /dev/null +++ b/drivers/s390/net/iucv.c @@ -0,0 +1,2567 @@ +/* + * $Id: iucv.c,v 1.43 2005/02/09 14:47:43 braunu Exp $ + * + * IUCV network driver + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): + * Original source: + * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 + * Xenia Tkatschow (xenia@us.ibm.com) + * 2Gb awareness and general cleanup: + * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * + * Documentation used: + * The original source + * CP Programming Service, IBM document # SC24-5760 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.43 $ + * + */ + +/* #define DEBUG */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/config.h> + +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/device.h> +#include <asm/atomic.h> +#include "iucv.h" +#include <asm/io.h> +#include <asm/s390_ext.h> +#include <asm/ebcdic.h> +#include <asm/smp.h> +#include <asm/ccwdev.h> //for root device stuff + +/* FLAGS: + * All flags are defined in the field IPFLAGS1 of each function + * and can be found in CP Programming Services. + * IPSRCCLS - Indicates you have specified a source class + * IPFGMCL - Indicates you have specified a target class + * IPFGPID - Indicates you have specified a pathid + * IPFGMID - Indicates you have specified a message ID + * IPANSLST - Indicates that you are using an address list for + * reply data + * IPBUFLST - Indicates that you are using an address list for + * message data + */ + +#define IPSRCCLS 0x01 +#define IPFGMCL 0x01 +#define IPFGPID 0x02 +#define IPFGMID 0x04 +#define IPANSLST 0x08 +#define IPBUFLST 0x40 + +static int +iucv_bus_match (struct device *dev, struct device_driver *drv) +{ + return 0; +} + +struct bus_type iucv_bus = { + .name = "iucv", + .match = iucv_bus_match, +}; + +struct device *iucv_root; + +/* General IUCV interrupt structure */ +typedef struct { + __u16 ippathid; + __u8 res1; + __u8 iptype; + __u32 res2; + __u8 ipvmid[8]; + __u8 res3[24]; +} iucv_GeneralInterrupt; + +static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL; + +/* Spin Lock declaration */ + +static DEFINE_SPINLOCK(iucv_lock); + +static int messagesDisabled = 0; + +/***************INTERRUPT HANDLING ***************/ + +typedef struct { + struct list_head queue; + iucv_GeneralInterrupt data; +} iucv_irqdata; + +static struct list_head iucv_irq_queue; +static DEFINE_SPINLOCK(iucv_irq_queue_lock); + +/* + *Internal function prototypes + */ +static void iucv_tasklet_handler(unsigned long); +static void iucv_irq_handler(struct pt_regs *, __u16); + +static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0); + +/************ FUNCTION ID'S ****************************/ + +#define ACCEPT 10 +#define CONNECT 11 +#define DECLARE_BUFFER 12 +#define PURGE 9 +#define QUERY 0 +#define QUIESCE 13 +#define RECEIVE 5 +#define REJECT 8 +#define REPLY 6 +#define RESUME 14 +#define RETRIEVE_BUFFER 2 +#define SEND 4 +#define SETMASK 16 +#define SEVER 15 + +/** + * Structure: handler + * members: list - list management. + * structure: id + * userid - 8 char array of machine identification + * user_data - 16 char array for user identification + * mask - 24 char array used to compare the 2 previous + * interrupt_table - vector of interrupt functions. + * pgm_data - ulong, application data that is passed + * to the interrupt handlers +*/ +typedef struct handler_t { + struct list_head list; + struct { + __u8 userid[8]; + __u8 user_data[16]; + __u8 mask[24]; + } id; + iucv_interrupt_ops_t *interrupt_table; + void *pgm_data; +} handler; + +/** + * iucv_handler_table: List of registered handlers. + */ +static struct list_head iucv_handler_table; + +/** + * iucv_pathid_table: an array of *handler pointing into + * iucv_handler_table for fast indexing by pathid; + */ +static handler **iucv_pathid_table; + +static unsigned long max_connections; + +/** + * iucv_cpuid: contains the logical cpu number of the cpu which + * has declared the iucv buffer by issuing DECLARE_BUFFER. + * If no cpu has done the initialization iucv_cpuid contains -1. + */ +static int iucv_cpuid = -1; +/** + * register_flag: is 0 when external interrupt has not been registered + */ +static int register_flag; + +/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/ +/* Data struct 1: iparml_control + * Used for iucv_accept + * iucv_connect + * iucv_quiesce + * iucv_resume + * iucv_sever + * iucv_retrieve_buffer + * Data struct 2: iparml_dpl (data in parameter list) + * Used for iucv_send_prmmsg + * iucv_send2way_prmmsg + * iucv_send2way_prmmsg_array + * iucv_reply_prmmsg + * Data struct 3: iparml_db (data in a buffer) + * Used for iucv_receive + * iucv_receive_array + * iucv_reject + * iucv_reply + * iucv_reply_array + * iucv_send + * iucv_send_array + * iucv_send2way + * iucv_send2way_array + * iucv_declare_buffer + * Data struct 4: iparml_purge + * Used for iucv_purge + * iucv_query + * Data struct 5: iparml_set_mask + * Used for iucv_set_mask + */ + +typedef struct { + __u16 ippathid; + __u8 ipflags1; + __u8 iprcode; + __u16 ipmsglim; + __u16 res1; + __u8 ipvmid[8]; + __u8 ipuser[16]; + __u8 iptarget[8]; +} iparml_control; + +typedef struct { + __u16 ippathid; + __u8 ipflags1; + __u8 iprcode; + __u32 ipmsgid; + __u32 iptrgcls; + __u8 iprmmsg[8]; + __u32 ipsrccls; + __u32 ipmsgtag; + __u32 ipbfadr2; + __u32 ipbfln2f; + __u32 res; +} iparml_dpl; + +typedef struct { + __u16 ippathid; + __u8 ipflags1; + __u8 iprcode; + __u32 ipmsgid; + __u32 iptrgcls; + __u32 ipbfadr1; + __u32 ipbfln1f; + __u32 ipsrccls; + __u32 ipmsgtag; + __u32 ipbfadr2; + __u32 ipbfln2f; + __u32 res; +} iparml_db; + +typedef struct { + __u16 ippathid; + __u8 ipflags1; + __u8 iprcode; + __u32 ipmsgid; + __u8 ipaudit[3]; + __u8 res1[5]; + __u32 res2; + __u32 ipsrccls; + __u32 ipmsgtag; + __u32 res3[3]; +} iparml_purge; + +typedef struct { + __u8 ipmask; + __u8 res1[2]; + __u8 iprcode; + __u32 res2[9]; +} iparml_set_mask; + +typedef struct { + union { + iparml_control p_ctrl; + iparml_dpl p_dpl; + iparml_db p_db; + iparml_purge p_purge; + iparml_set_mask p_set_mask; + } param; + atomic_t in_use; + __u32 res; +} __attribute__ ((aligned(8))) iucv_param; +#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param)) + +static iucv_param * iucv_param_pool; + +MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); +MODULE_LICENSE("GPL"); + +/* + * Debugging stuff + *******************************************************************************/ + + +#ifdef DEBUG +static int debuglevel = 0; + +module_param(debuglevel, int, 0); +MODULE_PARM_DESC(debuglevel, + "Specifies the debug level (0=off ... 3=all)"); + +static void +iucv_dumpit(char *title, void *buf, int len) +{ + int i; + __u8 *p = (__u8 *)buf; + + if (debuglevel < 3) + return; + + printk(KERN_DEBUG "%s\n", title); + printk(" "); + for (i = 0; i < len; i++) { + if (!(i % 16) && i != 0) + printk ("\n "); + else if (!(i % 4) && i != 0) + printk(" "); + printk("%02X", *p++); + } + if (len % 16) + printk ("\n"); + return; +} +#define iucv_debug(lvl, fmt, args...) \ +do { \ + if (debuglevel >= lvl) \ + printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \ +} while (0) + +#else + +#define iucv_debug(lvl, fmt, args...) +#define iucv_dumpit(title, buf, len) + +#endif + +/* + * Internal functions + *******************************************************************************/ + +/** + * print start banner + */ +static void +iucv_banner(void) +{ + char vbuf[] = "$Revision: 1.43 $"; + char *version = vbuf; + + if ((version = strchr(version, ':'))) { + char *p = strchr(version + 1, '$'); + if (p) + *p = '\0'; + } else + version = " ??? "; + printk(KERN_INFO + "IUCV lowlevel driver Version%s initialized\n", version); +} + +/** + * iucv_init - Initialization + * + * Allocates and initializes various data structures. + */ +static int +iucv_init(void) +{ + int ret; + + if (iucv_external_int_buffer) + return 0; + + if (!MACHINE_IS_VM) { + printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n"); + return -EPROTONOSUPPORT; + } + + ret = bus_register(&iucv_bus); + if (ret) { + printk(KERN_ERR "IUCV: failed to register bus.\n"); + return ret; + } + + iucv_root = s390_root_dev_register("iucv"); + if (IS_ERR(iucv_root)) { + printk(KERN_ERR "IUCV: failed to register iucv root.\n"); + bus_unregister(&iucv_bus); + return PTR_ERR(iucv_root); + } + + /* Note: GFP_DMA used used to get memory below 2G */ + iucv_external_int_buffer = kmalloc(sizeof(iucv_GeneralInterrupt), + GFP_KERNEL|GFP_DMA); + if (!iucv_external_int_buffer) { + printk(KERN_WARNING + "%s: Could not allocate external interrupt buffer\n", + __FUNCTION__); + s390_root_dev_unregister(iucv_root); + bus_unregister(&iucv_bus); + return -ENOMEM; + } + memset(iucv_external_int_buffer, 0, sizeof(iucv_GeneralInterrupt)); + + /* Initialize parameter pool */ + iucv_param_pool = kmalloc(sizeof(iucv_param) * PARAM_POOL_SIZE, + GFP_KERNEL|GFP_DMA); + if (!iucv_param_pool) { + printk(KERN_WARNING "%s: Could not allocate param pool\n", + __FUNCTION__); + kfree(iucv_external_int_buffer); + iucv_external_int_buffer = NULL; + s390_root_dev_unregister(iucv_root); + bus_unregister(&iucv_bus); + return -ENOMEM; + } + memset(iucv_param_pool, 0, sizeof(iucv_param) * PARAM_POOL_SIZE); + + /* Initialize irq queue */ + INIT_LIST_HEAD(&iucv_irq_queue); + + /* Initialize handler table */ + INIT_LIST_HEAD(&iucv_handler_table); + + iucv_banner(); + return 0; +} + +/** + * iucv_exit - De-Initialization + * + * Frees everything allocated from iucv_init. + */ +static int iucv_retrieve_buffer (void); + +static void +iucv_exit(void) +{ + iucv_retrieve_buffer(); + if (iucv_external_int_buffer) { + kfree(iucv_external_int_buffer); + iucv_external_int_buffer = NULL; + } + if (iucv_param_pool) { + kfree(iucv_param_pool); + iucv_param_pool = NULL; + } + s390_root_dev_unregister(iucv_root); + bus_unregister(&iucv_bus); + printk(KERN_INFO "IUCV lowlevel driver unloaded\n"); +} + +/** + * grab_param: - Get a parameter buffer from the pre-allocated pool. + * + * This function searches for an unused element in the pre-allocated pool + * of parameter buffers. If one is found, it marks it "in use" and returns + * a pointer to it. The calling function is responsible for releasing it + * when it has finished its usage. + * + * Returns: A pointer to iucv_param. + */ +static __inline__ iucv_param * +grab_param(void) +{ + iucv_param *ptr; + static int hint = 0; + + ptr = iucv_param_pool + hint; + do { + ptr++; + if (ptr >= iucv_param_pool + PARAM_POOL_SIZE) + ptr = iucv_param_pool; + } while (atomic_compare_and_swap(0, 1, &ptr->in_use)); + hint = ptr - iucv_param_pool; + + memset(&ptr->param, 0, sizeof(ptr->param)); + return ptr; +} + +/** + * release_param - Release a parameter buffer. + * @p: A pointer to a struct iucv_param, previously obtained by calling + * grab_param(). + * + * This function marks the specified parameter buffer "unused". + */ +static __inline__ void +release_param(void *p) +{ + atomic_set(&((iucv_param *)p)->in_use, 0); +} + +/** + * iucv_add_handler: - Add a new handler + * @new_handler: handle that is being entered into chain. + * + * Places new handle on iucv_handler_table, if identical handler is not + * found. + * + * Returns: 0 on success, !0 on failure (handler already in chain). + */ +static int +iucv_add_handler (handler *new) +{ + ulong flags; + + iucv_debug(1, "entering"); + iucv_dumpit("handler:", new, sizeof(handler)); + + spin_lock_irqsave (&iucv_lock, flags); + if (!list_empty(&iucv_handler_table)) { + struct list_head *lh; + + /** + * Search list for handler with identical id. If one + * is found, the new handler is _not_ added. + */ + list_for_each(lh, &iucv_handler_table) { + handler *h = list_entry(lh, handler, list); + if (!memcmp(&new->id, &h->id, sizeof(h->id))) { + iucv_debug(1, "ret 1"); + spin_unlock_irqrestore (&iucv_lock, flags); + return 1; + } + } + } + /** + * If we get here, no handler was found. + */ + INIT_LIST_HEAD(&new->list); + list_add(&new->list, &iucv_handler_table); + spin_unlock_irqrestore (&iucv_lock, flags); + + iucv_debug(1, "exiting"); + return 0; +} + +/** + * b2f0: + * @code: identifier of IUCV call to CP. + * @parm: pointer to 40 byte iparml area passed to CP + * + * Calls CP to execute IUCV commands. + * + * Returns: return code from CP's IUCV call + */ +static __inline__ ulong +b2f0(__u32 code, void *parm) +{ + iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param)); + + asm volatile ( + "LRA 1,0(%1)\n\t" + "LR 0,%0\n\t" + ".long 0xb2f01000" + : + : "d" (code), "a" (parm) + : "0", "1" + ); + + iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param)); + + return (unsigned long)*((__u8 *)(parm + 3)); +} + +/* + * Name: iucv_add_pathid + * Purpose: Adds a path id to the system. + * Input: pathid - pathid that is going to be entered into system + * handle - address of handler that the pathid will be associated + * with. + * pgm_data - token passed in by application. + * Output: 0: successful addition of pathid + * - EINVAL - pathid entry is being used by another application + * - ENOMEM - storage allocation for a new pathid table failed +*/ +static int +__iucv_add_pathid(__u16 pathid, handler *handler) +{ + + iucv_debug(1, "entering"); + + iucv_debug(1, "handler is pointing to %p", handler); + + if (pathid > (max_connections - 1)) + return -EINVAL; + + if (iucv_pathid_table[pathid]) { + iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]); + printk(KERN_WARNING + "%s: Pathid being used, error.\n", __FUNCTION__); + return -EINVAL; + } + iucv_pathid_table[pathid] = handler; + + iucv_debug(1, "exiting"); + return 0; +} /* end of add_pathid function */ + +static int +iucv_add_pathid(__u16 pathid, handler *handler) +{ + ulong flags; + int rc; + + spin_lock_irqsave (&iucv_lock, flags); + rc = __iucv_add_pathid(pathid, handler); + spin_unlock_irqrestore (&iucv_lock, flags); + return rc; +} + +static void +iucv_remove_pathid(__u16 pathid) +{ + ulong flags; + + if (pathid > (max_connections - 1)) + return; + + spin_lock_irqsave (&iucv_lock, flags); + iucv_pathid_table[pathid] = NULL; + spin_unlock_irqrestore (&iucv_lock, flags); +} + +/** + * iucv_declare_buffer_cpuid + * Register at VM for subsequent IUCV operations. This is executed + * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer(). + */ +static void +iucv_declare_buffer_cpuid (void *result) +{ + iparml_db *parm; + + parm = (iparml_db *)grab_param(); + parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer); + if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1) + *((ulong *)result) = parm->iprcode; + release_param(parm); +} + +/** + * iucv_retrieve_buffer_cpuid: + * Unregister IUCV usage at VM. This is always executed on the same + * cpu that registered the buffer to VM. + * Called from iucv_retrieve_buffer(). + */ +static void +iucv_retrieve_buffer_cpuid (void *cpu) +{ + iparml_control *parm; + + parm = (iparml_control *)grab_param(); + b2f0(RETRIEVE_BUFFER, parm); + release_param(parm); +} + +/** + * Name: iucv_declare_buffer + * Purpose: Specifies the guests real address of an external + * interrupt. + * Input: void + * Output: iprcode - return code from b2f0 call + */ +static int +iucv_declare_buffer (void) +{ + unsigned long flags; + ulong b2f0_result; + + iucv_debug(1, "entering"); + b2f0_result = -ENODEV; + spin_lock_irqsave (&iucv_lock, flags); + if (iucv_cpuid == -1) { + /* Reserve any cpu for use by iucv. */ + iucv_cpuid = smp_get_cpu(CPU_MASK_ALL); + spin_unlock_irqrestore (&iucv_lock, flags); + smp_call_function_on(iucv_declare_buffer_cpuid, + &b2f0_result, 0, 1, iucv_cpuid); + if (b2f0_result) { + smp_put_cpu(iucv_cpuid); + iucv_cpuid = -1; + } + iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer); + } else { + spin_unlock_irqrestore (&iucv_lock, flags); + b2f0_result = 0; + } + iucv_debug(1, "exiting"); + return b2f0_result; +} + +/** + * iucv_retrieve_buffer: + * + * Terminates all use of IUCV. + * Returns: return code from CP + */ +static int +iucv_retrieve_buffer (void) +{ + iucv_debug(1, "entering"); + if (iucv_cpuid != -1) { + smp_call_function_on(iucv_retrieve_buffer_cpuid, + 0, 0, 1, iucv_cpuid); + /* Release the cpu reserved by iucv_declare_buffer. */ + smp_put_cpu(iucv_cpuid); + iucv_cpuid = -1; + } + iucv_debug(1, "exiting"); + return 0; +} + +/** + * iucv_remove_handler: + * @users_handler: handler to be removed + * + * Remove handler when application unregisters. + */ +static void +iucv_remove_handler(handler *handler) +{ + unsigned long flags; + + if ((!iucv_pathid_table) || (!handler)) + return; + + iucv_debug(1, "entering"); + + spin_lock_irqsave (&iucv_lock, flags); + list_del(&handler->list); + if (list_empty(&iucv_handler_table)) { + if (register_flag) { + unregister_external_interrupt(0x4000, iucv_irq_handler); + register_flag = 0; + } + } + spin_unlock_irqrestore (&iucv_lock, flags); + + iucv_debug(1, "exiting"); + return; +} + +/** + * iucv_register_program: + * @pgmname: user identification + * @userid: machine identification + * @pgmmask: Indicates which bits in the pgmname and userid combined will be + * used to determine who is given control. + * @ops: Address of interrupt handler table. + * @pgm_data: Application data to be passed to interrupt handlers. + * + * Registers an application with IUCV. + * Returns: + * The address of handler, or NULL on failure. + * NOTE on pgmmask: + * If pgmname, userid and pgmmask are provided, pgmmask is entered into the + * handler as is. + * If pgmmask is NULL, the internal mask is set to all 0xff's + * When userid is NULL, the first 8 bytes of the internal mask are forced + * to 0x00. + * If pgmmask and userid are NULL, the first 8 bytes of the internal mask + * are forced to 0x00 and the last 16 bytes to 0xff. + */ + +iucv_handle_t +iucv_register_program (__u8 pgmname[16], + __u8 userid[8], + __u8 pgmmask[24], + iucv_interrupt_ops_t * ops, void *pgm_data) +{ + ulong rc = 0; /* return code from function calls */ + handler *new_handler; + + iucv_debug(1, "entering"); + + if (ops == NULL) { + /* interrupt table is not defined */ + printk(KERN_WARNING "%s: Interrupt table is not defined, " + "exiting\n", __FUNCTION__); + return NULL; + } + if (!pgmname) { + printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__); + return NULL; + } + + /* Allocate handler entry */ + new_handler = (handler *)kmalloc(sizeof(handler), GFP_ATOMIC); + if (new_handler == NULL) { + printk(KERN_WARNING "%s: storage allocation for new handler " + "failed.\n", __FUNCTION__); + return NULL; + } + + if (!iucv_pathid_table) { + if (iucv_init()) { + kfree(new_handler); + return NULL; + } + + max_connections = iucv_query_maxconn(); + iucv_pathid_table = kmalloc(max_connections * sizeof(handler *), + GFP_ATOMIC); + if (iucv_pathid_table == NULL) { + printk(KERN_WARNING "%s: iucv_pathid_table storage " + "allocation failed\n", __FUNCTION__); + kfree(new_handler); + return NULL; + } + memset (iucv_pathid_table, 0, max_connections * sizeof(handler *)); + } + memset(new_handler, 0, sizeof (handler)); + memcpy(new_handler->id.user_data, pgmname, + sizeof (new_handler->id.user_data)); + if (userid) { + memcpy (new_handler->id.userid, userid, + sizeof (new_handler->id.userid)); + ASCEBC (new_handler->id.userid, + sizeof (new_handler->id.userid)); + EBC_TOUPPER (new_handler->id.userid, + sizeof (new_handler->id.userid)); + + if (pgmmask) { + memcpy (new_handler->id.mask, pgmmask, + sizeof (new_handler->id.mask)); + } else { + memset (new_handler->id.mask, 0xFF, + sizeof (new_handler->id.mask)); + } + } else { + if (pgmmask) { + memcpy (new_handler->id.mask, pgmmask, + sizeof (new_handler->id.mask)); + } else { + memset (new_handler->id.mask, 0xFF, + sizeof (new_handler->id.mask)); + } + memset (new_handler->id.userid, 0x00, + sizeof (new_handler->id.userid)); + } + /* fill in the rest of handler */ + new_handler->pgm_data = pgm_data; + new_handler->interrupt_table = ops; + + /* + * Check if someone else is registered with same pgmname, userid + * and mask. If someone is already registered with same pgmname, + * userid and mask, registration will fail and NULL will be returned + * to the application. + * If identical handler not found, then handler is added to list. + */ + rc = iucv_add_handler(new_handler); + if (rc) { + printk(KERN_WARNING "%s: Someone already registered with same " + "pgmname, userid, pgmmask\n", __FUNCTION__); + kfree (new_handler); + return NULL; + } + + rc = iucv_declare_buffer(); + if (rc) { + char *err = "Unknown"; + iucv_remove_handler(new_handler); + kfree(new_handler); + switch(rc) { + case 0x03: + err = "Directory error"; + break; + case 0x0a: + err = "Invalid length"; + break; + case 0x13: + err = "Buffer already exists"; + break; + case 0x3e: + err = "Buffer overlap"; + break; + case 0x5c: + err = "Paging or storage error"; + break; + } + printk(KERN_WARNING "%s: iucv_declare_buffer " + "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err); + return NULL; + } + if (!register_flag) { + /* request the 0x4000 external interrupt */ + rc = register_external_interrupt (0x4000, iucv_irq_handler); + if (rc) { + iucv_remove_handler(new_handler); + kfree (new_handler); + printk(KERN_WARNING "%s: " + "register_external_interrupt returned %ld\n", + __FUNCTION__, rc); + return NULL; + + } + register_flag = 1; + } + iucv_debug(1, "exiting"); + return new_handler; +} /* end of register function */ + +/** + * iucv_unregister_program: + * @handle: address of handler + * + * Unregister application with IUCV. + * Returns: + * 0 on success, -EINVAL, if specified handle is invalid. + */ + +int +iucv_unregister_program (iucv_handle_t handle) +{ + handler *h = NULL; + struct list_head *lh; + int i; + ulong flags; + + iucv_debug(1, "entering"); + iucv_debug(1, "address of handler is %p", h); + + /* Checking if handle is valid */ + spin_lock_irqsave (&iucv_lock, flags); + list_for_each(lh, &iucv_handler_table) { + if ((handler *)handle == list_entry(lh, handler, list)) { + h = (handler *)handle; + break; + } + } + if (!h) { + spin_unlock_irqrestore (&iucv_lock, flags); + if (handle) + printk(KERN_WARNING + "%s: Handler not found in iucv_handler_table.\n", + __FUNCTION__); + else + printk(KERN_WARNING + "%s: NULL handle passed by application.\n", + __FUNCTION__); + return -EINVAL; + } + + /** + * First, walk thru iucv_pathid_table and sever any pathid which is + * still pointing to the handler to be removed. + */ + for (i = 0; i < max_connections; i++) + if (iucv_pathid_table[i] == h) { + spin_unlock_irqrestore (&iucv_lock, flags); + iucv_sever(i, h->id.user_data); + spin_lock_irqsave(&iucv_lock, flags); + } + spin_unlock_irqrestore (&iucv_lock, flags); + + iucv_remove_handler(h); + kfree(h); + + iucv_debug(1, "exiting"); + return 0; +} + +/** + * iucv_accept: + * @pathid: Path identification number + * @msglim_reqstd: The number of outstanding messages requested. + * @user_data: Data specified by the iucv_connect function. + * @flags1: Contains options for this path. + * - IPPRTY (0x20) Specifies if you want to send priority message. + * - IPRMDATA (0x80) Specifies whether your program can handle a message + * in the parameter list. + * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being + * established. + * @handle: Address of handler. + * @pgm_data: Application data passed to interrupt handlers. + * @flags1_out: Pointer to an int. If not NULL, on return the options for + * the path are stored at the given location: + * - IPPRTY (0x20) Indicates you may send a priority message. + * @msglim: Pointer to an __u16. If not NULL, on return the maximum + * number of outstanding messages is stored at the given + * location. + * + * This function is issued after the user receives a Connection Pending external + * interrupt and now wishes to complete the IUCV communication path. + * Returns: + * return code from CP + */ +int +iucv_accept(__u16 pathid, __u16 msglim_reqstd, + __u8 user_data[16], int flags1, + iucv_handle_t handle, void *pgm_data, + int *flags1_out, __u16 * msglim) +{ + ulong b2f0_result = 0; + ulong flags; + struct list_head *lh; + handler *h = NULL; + iparml_control *parm; + + iucv_debug(1, "entering"); + iucv_debug(1, "pathid = %d", pathid); + + /* Checking if handle is valid */ + spin_lock_irqsave (&iucv_lock, flags); + list_for_each(lh, &iucv_handler_table) { + if ((handler *)handle == list_entry(lh, handler, list)) { + h = (handler *)handle; + break; + } + } + spin_unlock_irqrestore (&iucv_lock, flags); + + if (!h) { + if (handle) + printk(KERN_WARNING + "%s: Handler not found in iucv_handler_table.\n", + __FUNCTION__); + else + printk(KERN_WARNING + "%s: NULL handle passed by application.\n", + __FUNCTION__); + return -EINVAL; + } + + parm = (iparml_control *)grab_param(); + + parm->ippathid = pathid; + parm->ipmsglim = msglim_reqstd; + if (user_data) + memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); + + parm->ipflags1 = (__u8)flags1; + b2f0_result = b2f0(ACCEPT, parm); + + if (!b2f0_result) { + if (msglim) + *msglim = parm->ipmsglim; + if (pgm_data) + h->pgm_data = pgm_data; + if (flags1_out) + *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; + } + release_param(parm); + + iucv_debug(1, "exiting"); + return b2f0_result; +} + +/** + * iucv_connect: + * @pathid: Path identification number + * @msglim_reqstd: Number of outstanding messages requested + * @user_data: 16-byte user data + * @userid: 8-byte of user identification + * @system_name: 8-byte identifying the system name + * @flags1: Specifies options for this path: + * - IPPRTY (0x20) Specifies if you want to send priority message. + * - IPRMDATA (0x80) Specifies whether your program can handle a message + * in the parameter list. + * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being + * established. + * - IPLOCAL (0x01) Allows an application to force the partner to be on the + * local system. If local is specified then target class + * cannot be specified. + * @flags1_out: Pointer to an int. If not NULL, on return the options for + * the path are stored at the given location: + * - IPPRTY (0x20) Indicates you may send a priority message. + * @msglim: Pointer to an __u16. If not NULL, on return the maximum + * number of outstanding messages is stored at the given + * location. + * @handle: Address of handler. + * @pgm_data: Application data to be passed to interrupt handlers. + * + * This function establishes an IUCV path. Although the connect may complete + * successfully, you are not able to use the path until you receive an IUCV + * Connection Complete external interrupt. + * Returns: return code from CP, or one of the following + * - ENOMEM + * - return code from iucv_declare_buffer + * - EINVAL - invalid handle passed by application + * - EINVAL - pathid address is NULL + * - ENOMEM - pathid table storage allocation failed + * - return code from internal function add_pathid + */ +int +iucv_connect (__u16 *pathid, __u16 msglim_reqstd, + __u8 user_data[16], __u8 userid[8], + __u8 system_name[8], int flags1, + int *flags1_out, __u16 * msglim, + iucv_handle_t handle, void *pgm_data) +{ + iparml_control *parm; + iparml_control local_parm; + struct list_head *lh; + ulong b2f0_result = 0; + ulong flags; + int add_pathid_result = 0; + handler *h = NULL; + __u8 no_memory[16] = "NO MEMORY"; + + iucv_debug(1, "entering"); + + /* Checking if handle is valid */ + spin_lock_irqsave (&iucv_lock, flags); + list_for_each(lh, &iucv_handler_table) { + if ((handler *)handle == list_entry(lh, handler, list)) { + h = (handler *)handle; + break; + } + } + spin_unlock_irqrestore (&iucv_lock, flags); + + if (!h) { + if (handle) + printk(KERN_WARNING + "%s: Handler not found in iucv_handler_table.\n", + __FUNCTION__); + else + printk(KERN_WARNING + "%s: NULL handle passed by application.\n", + __FUNCTION__); + return -EINVAL; + } + + if (pathid == NULL) { + printk(KERN_WARNING "%s: NULL pathid pointer\n", + __FUNCTION__); + return -EINVAL; + } + + parm = (iparml_control *)grab_param(); + + parm->ipmsglim = msglim_reqstd; + + if (user_data) + memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); + + if (userid) { + memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid)); + ASCEBC(parm->ipvmid, sizeof(parm->ipvmid)); + EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid)); + } + + if (system_name) { + memcpy(parm->iptarget, system_name, sizeof(parm->iptarget)); + ASCEBC(parm->iptarget, sizeof(parm->iptarget)); + EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget)); + } + + /* In order to establish an IUCV connection, the procedure is: + * + * b2f0(CONNECT) + * take the ippathid from the b2f0 call + * register the handler to the ippathid + * + * Unfortunately, the ConnectionEstablished message gets sent after the + * b2f0(CONNECT) call but before the register is handled. + * + * In order for this race condition to be eliminated, the IUCV Control + * Interrupts must be disabled for the above procedure. + * + * David Kennedy <dkennedy@linuxcare.com> + */ + + /* Enable everything but IUCV Control messages */ + iucv_setmask(~(AllInterrupts)); + messagesDisabled = 1; + + spin_lock_irqsave (&iucv_lock, flags); + parm->ipflags1 = (__u8)flags1; + b2f0_result = b2f0(CONNECT, parm); + memcpy(&local_parm, parm, sizeof(local_parm)); + release_param(parm); + parm = &local_parm; + if (!b2f0_result) + add_pathid_result = __iucv_add_pathid(parm->ippathid, h); + spin_unlock_irqrestore (&iucv_lock, flags); + + if (b2f0_result) { + iucv_setmask(~0); + messagesDisabled = 0; + return b2f0_result; + } + + *pathid = parm->ippathid; + + /* Enable everything again */ + iucv_setmask(IUCVControlInterruptsFlag); + + if (msglim) + *msglim = parm->ipmsglim; + if (flags1_out) + *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0; + + if (add_pathid_result) { + iucv_sever(*pathid, no_memory); + printk(KERN_WARNING "%s: add_pathid failed with rc =" + " %d\n", __FUNCTION__, add_pathid_result); + return(add_pathid_result); + } + + iucv_debug(1, "exiting"); + return b2f0_result; +} + +/** + * iucv_purge: + * @pathid: Path identification number + * @msgid: Message ID of message to purge. + * @srccls: Message class of the message to purge. + * @audit: Pointer to an __u32. If not NULL, on return, information about + * asynchronous errors that may have affected the normal completion + * of this message ist stored at the given location. + * + * Cancels a message you have sent. + * Returns: return code from CP + */ +int +iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit) +{ + iparml_purge *parm; + ulong b2f0_result = 0; + + iucv_debug(1, "entering"); + iucv_debug(1, "pathid = %d", pathid); + + parm = (iparml_purge *)grab_param(); + + parm->ipmsgid = msgid; + parm->ippathid = pathid; + parm->ipsrccls = srccls; + parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID); + b2f0_result = b2f0(PURGE, parm); + + if (!b2f0_result && audit) { + memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit)); + /* parm->ipaudit has only 3 bytes */ + *audit >>= 8; + } + + release_param(parm); + + iucv_debug(1, "b2f0_result = %ld", b2f0_result); + iucv_debug(1, "exiting"); + return b2f0_result; +} + +/** + * iucv_query_generic: + * @want_maxconn: Flag, describing which value is to be returned. + * + * Helper function for iucv_query_maxconn() and iucv_query_bufsize(). + * + * Returns: The buffersize, if want_maxconn is 0; the maximum number of + * connections, if want_maxconn is 1 or an error-code < 0 on failure. + */ +static int +iucv_query_generic(int want_maxconn) +{ + iparml_purge *parm = (iparml_purge *)grab_param(); + int bufsize, maxconn; + int ccode; + + /** + * Call b2f0 and store R0 (max buffer size), + * R1 (max connections) and CC. + */ + asm volatile ( + "LRA 1,0(%4)\n\t" + "LR 0,%3\n\t" + ".long 0xb2f01000\n\t" + "IPM %0\n\t" + "SRL %0,28\n\t" + "ST 0,%1\n\t" + "ST 1,%2\n\t" + : "=d" (ccode), "=m" (bufsize), "=m" (maxconn) + : "d" (QUERY), "a" (parm) + : "0", "1", "cc" + ); + release_param(parm); + + if (ccode) + return -EPERM; + if (want_maxconn) + return maxconn; + return bufsize; +} + +/** + * iucv_query_maxconn: + * + * Determines the maximum number of connections thay may be established. + * + * Returns: Maximum number of connections that can be. + */ +ulong +iucv_query_maxconn(void) +{ + return iucv_query_generic(1); +} + +/** + * iucv_query_bufsize: + * + * Determines the size of the external interrupt buffer. + * + * Returns: Size of external interrupt buffer. + */ +ulong +iucv_query_bufsize (void) +{ + return iucv_query_generic(0); +} + +/** + * iucv_quiesce: + * @pathid: Path identification number + * @user_data: 16-byte user data + * + * Temporarily suspends incoming messages on an IUCV path. + * You can later reactivate the path by invoking the iucv_resume function. + * Returns: return code from CP + */ +int +iucv_quiesce (__u16 pathid, __u8 user_data[16]) +{ + iparml_control *parm; + ulong b2f0_result = 0; + + iucv_debug(1, "entering"); + iucv_debug(1, "pathid = %d", pathid); + + parm = (iparml_control *)grab_param(); + + memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); + parm->ippathid = pathid; + + b2f0_result = b2f0(QUIESCE, parm); + release_param(parm); + + iucv_debug(1, "b2f0_result = %ld", b2f0_result); + iucv_debug(1, "exiting"); + + return b2f0_result; +} + +/** + * iucv_receive: + * @pathid: Path identification number. + * @buffer: Address of buffer to receive. Must be below 2G. + * @buflen: Length of buffer to receive. + * @msgid: Specifies the message ID. + * @trgcls: Specifies target class. + * @flags1_out: Receives options for path on return. + * - IPNORPY (0x10) Specifies whether a reply is required + * - IPPRTY (0x20) Specifies if you want to send priority message + * - IPRMDATA (0x80) Specifies the data is contained in the parameter list + * @residual_buffer: Receives the address of buffer updated by the number + * of bytes you have received on return. + * @residual_length: On return, receives one of the following values: + * - 0 If the receive buffer is the same length as + * the message. + * - Remaining bytes in buffer If the receive buffer is longer than the + * message. + * - Remaining bytes in message If the receive buffer is shorter than the + * message. + * + * This function receives messages that are being sent to you over established + * paths. + * Returns: return code from CP IUCV call; If the receive buffer is shorter + * than the message, always 5 + * -EINVAL - buffer address is pointing to NULL + */ +int +iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls, + void *buffer, ulong buflen, + int *flags1_out, ulong * residual_buffer, ulong * residual_length) +{ + iparml_db *parm; + ulong b2f0_result; + int moved = 0; /* number of bytes moved from parmlist to buffer */ + + iucv_debug(2, "entering"); + + if (!buffer) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ipbfadr1 = (__u32) (addr_t) buffer; + parm->ipbfln1f = (__u32) ((ulong) buflen); + parm->ipmsgid = msgid; + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL); + + b2f0_result = b2f0(RECEIVE, parm); + + if (!b2f0_result || b2f0_result == 5) { + if (flags1_out) { + iucv_debug(2, "*flags1_out = %d", *flags1_out); + *flags1_out = (parm->ipflags1 & (~0x07)); + iucv_debug(2, "*flags1_out = %d", *flags1_out); + } + + if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ + if (residual_length) + *residual_length = parm->ipbfln1f; + + if (residual_buffer) + *residual_buffer = parm->ipbfadr1; + } else { + moved = min_t (unsigned long, buflen, 8); + + memcpy ((char *) buffer, + (char *) &parm->ipbfadr1, moved); + + if (buflen < 8) + b2f0_result = 5; + + if (residual_length) + *residual_length = abs (buflen - 8); + + if (residual_buffer) + *residual_buffer = (ulong) (buffer + moved); + } + } + release_param(parm); + + iucv_debug(2, "exiting"); + return b2f0_result; +} + +/* + * Name: iucv_receive_array + * Purpose: This function receives messages that are being sent to you + * over established paths. + * Input: pathid - path identification number + * buffer - address of array of buffers + * buflen - total length of buffers + * msgid - specifies the message ID. + * trgcls - specifies target class + * Output: + * flags1_out: Options for path. + * IPNORPY - 0x10 specifies whether a reply is required + * IPPRTY - 0x20 specifies if you want to send priority message + * IPRMDATA - 0x80 specifies the data is contained in the parameter list + * residual_buffer - address points to the current list entry IUCV + * is working on. + * residual_length - + * Contains one of the following values, if the receive buffer is: + * The same length as the message, this field is zero. + * Longer than the message, this field contains the number of + * bytes remaining in the buffer. + * Shorter than the message, this field contains the residual + * count (that is, the number of bytes remaining in the + * message that does not fit into the buffer. In this case + * b2f0_result = 5. + * Return: b2f0_result - return code from CP + * (-EINVAL) - buffer address is NULL + */ +int +iucv_receive_array (__u16 pathid, + __u32 msgid, __u32 trgcls, + iucv_array_t * buffer, ulong buflen, + int *flags1_out, + ulong * residual_buffer, ulong * residual_length) +{ + iparml_db *parm; + ulong b2f0_result; + int i = 0, moved = 0, need_to_move = 8, dyn_len; + + iucv_debug(2, "entering"); + + if (!buffer) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ipbfadr1 = (__u32) ((ulong) buffer); + parm->ipbfln1f = (__u32) buflen; + parm->ipmsgid = msgid; + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL); + + b2f0_result = b2f0(RECEIVE, parm); + + if (!b2f0_result || b2f0_result == 5) { + + if (flags1_out) { + iucv_debug(2, "*flags1_out = %d", *flags1_out); + *flags1_out = (parm->ipflags1 & (~0x07)); + iucv_debug(2, "*flags1_out = %d", *flags1_out); + } + + if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */ + + if (residual_length) + *residual_length = parm->ipbfln1f; + + if (residual_buffer) + *residual_buffer = parm->ipbfadr1; + + } else { + /* copy msg from parmlist to users array. */ + + while ((moved < 8) && (moved < buflen)) { + dyn_len = + min_t (unsigned int, + (buffer + i)->length, need_to_move); + + memcpy ((char *)((ulong)((buffer + i)->address)), + ((char *) &parm->ipbfadr1) + moved, + dyn_len); + + moved += dyn_len; + need_to_move -= dyn_len; + + (buffer + i)->address = + (__u32) + ((ulong)(__u8 *) ((ulong)(buffer + i)->address) + + dyn_len); + + (buffer + i)->length -= dyn_len; + i++; + } + + if (need_to_move) /* buflen < 8 bytes */ + b2f0_result = 5; + + if (residual_length) + *residual_length = abs (buflen - 8); + + if (residual_buffer) { + if (!moved) + *residual_buffer = (ulong) buffer; + else + *residual_buffer = + (ulong) (buffer + (i - 1)); + } + + } + } + release_param(parm); + + iucv_debug(2, "exiting"); + return b2f0_result; +} + +/** + * iucv_reject: + * @pathid: Path identification number. + * @msgid: Message ID of the message to reject. + * @trgcls: Target class of the message to reject. + * Returns: return code from CP + * + * Refuses a specified message. Between the time you are notified of a + * message and the time that you complete the message, the message may + * be rejected. + */ +int +iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls) +{ + iparml_db *parm; + ulong b2f0_result = 0; + + iucv_debug(1, "entering"); + iucv_debug(1, "pathid = %d", pathid); + + parm = (iparml_db *)grab_param(); + + parm->ippathid = pathid; + parm->ipmsgid = msgid; + parm->iptrgcls = trgcls; + parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID); + + b2f0_result = b2f0(REJECT, parm); + release_param(parm); + + iucv_debug(1, "b2f0_result = %ld", b2f0_result); + iucv_debug(1, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_reply + * Purpose: This function responds to the two-way messages that you + * receive. You must identify completely the message to + * which you wish to reply. ie, pathid, msgid, and trgcls. + * Input: pathid - path identification number + * msgid - specifies the message ID. + * trgcls - specifies target class + * flags1 - option for path + * IPPRTY- 0x20 - specifies if you want to send priority message + * buffer - address of reply buffer + * buflen - length of reply buffer + * Output: ipbfadr2 - Address of buffer updated by the number + * of bytes you have moved. + * ipbfln2f - Contains one of the following values: + * If the answer buffer is the same length as the reply, this field + * contains zero. + * If the answer buffer is longer than the reply, this field contains + * the number of bytes remaining in the buffer. + * If the answer buffer is shorter than the reply, this field contains + * a residual count (that is, the number of bytes remianing in the + * reply that does not fit into the buffer. In this + * case b2f0_result = 5. + * Return: b2f0_result - return code from CP + * (-EINVAL) - buffer address is NULL + */ +int +iucv_reply (__u16 pathid, + __u32 msgid, __u32 trgcls, + int flags1, + void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) +{ + iparml_db *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!buffer) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ipbfadr2 = (__u32) ((ulong) buffer); + parm->ipbfln2f = (__u32) buflen; /* length of message */ + parm->ippathid = pathid; + parm->ipmsgid = msgid; + parm->iptrgcls = trgcls; + parm->ipflags1 = (__u8) flags1; /* priority message */ + + b2f0_result = b2f0(REPLY, parm); + + if ((!b2f0_result) || (b2f0_result == 5)) { + if (ipbfadr2) + *ipbfadr2 = parm->ipbfadr2; + if (ipbfln2f) + *ipbfln2f = parm->ipbfln2f; + } + release_param(parm); + + iucv_debug(2, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_reply_array + * Purpose: This function responds to the two-way messages that you + * receive. You must identify completely the message to + * which you wish to reply. ie, pathid, msgid, and trgcls. + * The array identifies a list of addresses and lengths of + * discontiguous buffers that contains the reply data. + * Input: pathid - path identification number + * msgid - specifies the message ID. + * trgcls - specifies target class + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * buffer - address of array of reply buffers + * buflen - total length of reply buffers + * Output: ipbfadr2 - Address of buffer which IUCV is currently working on. + * ipbfln2f - Contains one of the following values: + * If the answer buffer is the same length as the reply, this field + * contains zero. + * If the answer buffer is longer than the reply, this field contains + * the number of bytes remaining in the buffer. + * If the answer buffer is shorter than the reply, this field contains + * a residual count (that is, the number of bytes remianing in the + * reply that does not fit into the buffer. In this + * case b2f0_result = 5. + * Return: b2f0_result - return code from CP + * (-EINVAL) - buffer address is NULL +*/ +int +iucv_reply_array (__u16 pathid, + __u32 msgid, __u32 trgcls, + int flags1, + iucv_array_t * buffer, + ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f) +{ + iparml_db *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!buffer) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ipbfadr2 = (__u32) ((ulong) buffer); + parm->ipbfln2f = buflen; /* length of message */ + parm->ippathid = pathid; + parm->ipmsgid = msgid; + parm->iptrgcls = trgcls; + parm->ipflags1 = (IPANSLST | flags1); + + b2f0_result = b2f0(REPLY, parm); + + if ((!b2f0_result) || (b2f0_result == 5)) { + + if (ipbfadr2) + *ipbfadr2 = parm->ipbfadr2; + if (ipbfln2f) + *ipbfln2f = parm->ipbfln2f; + } + release_param(parm); + + iucv_debug(2, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_reply_prmmsg + * Purpose: This function responds to the two-way messages that you + * receive. You must identify completely the message to + * which you wish to reply. ie, pathid, msgid, and trgcls. + * Prmmsg signifies the data is moved into the + * parameter list. + * Input: pathid - path identification number + * msgid - specifies the message ID. + * trgcls - specifies target class + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * prmmsg - 8-bytes of data to be placed into the parameter + * list. + * Output: NA + * Return: b2f0_result - return code from CP +*/ +int +iucv_reply_prmmsg (__u16 pathid, + __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8]) +{ + iparml_dpl *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + parm = (iparml_dpl *)grab_param(); + + parm->ippathid = pathid; + parm->ipmsgid = msgid; + parm->iptrgcls = trgcls; + memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg)); + parm->ipflags1 = (IPRMDATA | flags1); + + b2f0_result = b2f0(REPLY, parm); + release_param(parm); + + iucv_debug(2, "exiting"); + + return b2f0_result; +} + +/** + * iucv_resume: + * @pathid: Path identification number + * @user_data: 16-byte of user data + * + * This function restores communication over a quiesced path. + * Returns: return code from CP + */ +int +iucv_resume (__u16 pathid, __u8 user_data[16]) +{ + iparml_control *parm; + ulong b2f0_result = 0; + + iucv_debug(1, "entering"); + iucv_debug(1, "pathid = %d", pathid); + + parm = (iparml_control *)grab_param(); + + memcpy (parm->ipuser, user_data, sizeof (*user_data)); + parm->ippathid = pathid; + + b2f0_result = b2f0(RESUME, parm); + release_param(parm); + + iucv_debug(1, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_send + * Purpose: sends messages + * Input: pathid - ushort, pathid + * msgid - ulong *, id of message returned to caller + * trgcls - ulong, target message class + * srccls - ulong, source message class + * msgtag - ulong, message tag + * flags1 - Contains options for this path. + * IPPRTY - Ox20 - specifies if you want to send a priority message. + * buffer - pointer to buffer + * buflen - ulong, length of buffer + * Output: b2f0_result - return code from b2f0 call + * msgid - returns message id + */ +int +iucv_send (__u16 pathid, __u32 * msgid, + __u32 trgcls, __u32 srccls, + __u32 msgtag, int flags1, void *buffer, ulong buflen) +{ + iparml_db *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!buffer) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ipbfadr1 = (__u32) ((ulong) buffer); + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipbfln1f = (__u32) buflen; /* length of message */ + parm->ipsrccls = srccls; + parm->ipmsgtag = msgtag; + parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */ + + b2f0_result = b2f0(SEND, parm); + + if ((!b2f0_result) && (msgid)) + *msgid = parm->ipmsgid; + release_param(parm); + + iucv_debug(2, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_send_array + * Purpose: This function transmits data to another application. + * The contents of buffer is the address of the array of + * addresses and lengths of discontiguous buffers that hold + * the message text. This is a one-way message and the + * receiver will not reply to the message. + * Input: pathid - path identification number + * trgcls - specifies target class + * srccls - specifies the source message class + * msgtag - specifies a tag to be associated witht the message + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * buffer - address of array of send buffers + * buflen - total length of send buffers + * Output: msgid - specifies the message ID. + * Return: b2f0_result - return code from CP + * (-EINVAL) - buffer address is NULL + */ +int +iucv_send_array (__u16 pathid, + __u32 * msgid, + __u32 trgcls, + __u32 srccls, + __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen) +{ + iparml_db *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!buffer) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipbfadr1 = (__u32) ((ulong) buffer); + parm->ipbfln1f = (__u32) buflen; /* length of message */ + parm->ipsrccls = srccls; + parm->ipmsgtag = msgtag; + parm->ipflags1 = (IPNORPY | IPBUFLST | flags1); + b2f0_result = b2f0(SEND, parm); + + if ((!b2f0_result) && (msgid)) + *msgid = parm->ipmsgid; + release_param(parm); + + iucv_debug(2, "exiting"); + return b2f0_result; +} + +/* + * Name: iucv_send_prmmsg + * Purpose: This function transmits data to another application. + * Prmmsg specifies that the 8-bytes of data are to be moved + * into the parameter list. This is a one-way message and the + * receiver will not reply to the message. + * Input: pathid - path identification number + * trgcls - specifies target class + * srccls - specifies the source message class + * msgtag - specifies a tag to be associated with the message + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * prmmsg - 8-bytes of data to be placed into parameter list + * Output: msgid - specifies the message ID. + * Return: b2f0_result - return code from CP +*/ +int +iucv_send_prmmsg (__u16 pathid, + __u32 * msgid, + __u32 trgcls, + __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8]) +{ + iparml_dpl *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + parm = (iparml_dpl *)grab_param(); + + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipsrccls = srccls; + parm->ipmsgtag = msgtag; + parm->ipflags1 = (IPRMDATA | IPNORPY | flags1); + memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); + + b2f0_result = b2f0(SEND, parm); + + if ((!b2f0_result) && (msgid)) + *msgid = parm->ipmsgid; + release_param(parm); + + iucv_debug(2, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_send2way + * Purpose: This function transmits data to another application. + * Data to be transmitted is in a buffer. The receiver + * of the send is expected to reply to the message and + * a buffer is provided into which IUCV moves the reply + * to this message. + * Input: pathid - path identification number + * trgcls - specifies target class + * srccls - specifies the source message class + * msgtag - specifies a tag associated with the message + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * buffer - address of send buffer + * buflen - length of send buffer + * ansbuf - address of buffer to reply with + * anslen - length of buffer to reply with + * Output: msgid - specifies the message ID. + * Return: b2f0_result - return code from CP + * (-EINVAL) - buffer or ansbuf address is NULL + */ +int +iucv_send2way (__u16 pathid, + __u32 * msgid, + __u32 trgcls, + __u32 srccls, + __u32 msgtag, + int flags1, + void *buffer, ulong buflen, void *ansbuf, ulong anslen) +{ + iparml_db *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!buffer || !ansbuf) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipbfadr1 = (__u32) ((ulong) buffer); + parm->ipbfln1f = (__u32) buflen; /* length of message */ + parm->ipbfadr2 = (__u32) ((ulong) ansbuf); + parm->ipbfln2f = (__u32) anslen; + parm->ipsrccls = srccls; + parm->ipmsgtag = msgtag; + parm->ipflags1 = flags1; /* priority message */ + + b2f0_result = b2f0(SEND, parm); + + if ((!b2f0_result) && (msgid)) + *msgid = parm->ipmsgid; + release_param(parm); + + iucv_debug(2, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_send2way_array + * Purpose: This function transmits data to another application. + * The contents of buffer is the address of the array of + * addresses and lengths of discontiguous buffers that hold + * the message text. The receiver of the send is expected to + * reply to the message and a buffer is provided into which + * IUCV moves the reply to this message. + * Input: pathid - path identification number + * trgcls - specifies target class + * srccls - specifies the source message class + * msgtag - spcifies a tag to be associated with the message + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * buffer - address of array of send buffers + * buflen - total length of send buffers + * ansbuf - address of buffer to reply with + * anslen - length of buffer to reply with + * Output: msgid - specifies the message ID. + * Return: b2f0_result - return code from CP + * (-EINVAL) - buffer address is NULL + */ +int +iucv_send2way_array (__u16 pathid, + __u32 * msgid, + __u32 trgcls, + __u32 srccls, + __u32 msgtag, + int flags1, + iucv_array_t * buffer, + ulong buflen, iucv_array_t * ansbuf, ulong anslen) +{ + iparml_db *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!buffer || !ansbuf) + return -EINVAL; + + parm = (iparml_db *)grab_param(); + + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipbfadr1 = (__u32) ((ulong) buffer); + parm->ipbfln1f = (__u32) buflen; /* length of message */ + parm->ipbfadr2 = (__u32) ((ulong) ansbuf); + parm->ipbfln2f = (__u32) anslen; + parm->ipsrccls = srccls; + parm->ipmsgtag = msgtag; + parm->ipflags1 = (IPBUFLST | IPANSLST | flags1); + b2f0_result = b2f0(SEND, parm); + if ((!b2f0_result) && (msgid)) + *msgid = parm->ipmsgid; + release_param(parm); + + iucv_debug(2, "exiting"); + return b2f0_result; +} + +/* + * Name: iucv_send2way_prmmsg + * Purpose: This function transmits data to another application. + * Prmmsg specifies that the 8-bytes of data are to be moved + * into the parameter list. This is a two-way message and the + * receiver of the message is expected to reply. A buffer + * is provided into which IUCV moves the reply to this + * message. + * Input: pathid - path identification number + * trgcls - specifies target class + * srccls - specifies the source message class + * msgtag - specifies a tag to be associated with the message + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * prmmsg - 8-bytes of data to be placed in parameter list + * ansbuf - address of buffer to reply with + * anslen - length of buffer to reply with + * Output: msgid - specifies the message ID. + * Return: b2f0_result - return code from CP + * (-EINVAL) - buffer address is NULL +*/ +int +iucv_send2way_prmmsg (__u16 pathid, + __u32 * msgid, + __u32 trgcls, + __u32 srccls, + __u32 msgtag, + ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen) +{ + iparml_dpl *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!ansbuf) + return -EINVAL; + + parm = (iparml_dpl *)grab_param(); + + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipsrccls = srccls; + parm->ipmsgtag = msgtag; + parm->ipbfadr2 = (__u32) ((ulong) ansbuf); + parm->ipbfln2f = (__u32) anslen; + parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */ + memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); + + b2f0_result = b2f0(SEND, parm); + + if ((!b2f0_result) && (msgid)) + *msgid = parm->ipmsgid; + release_param(parm); + + iucv_debug(2, "exiting"); + + return b2f0_result; +} + +/* + * Name: iucv_send2way_prmmsg_array + * Purpose: This function transmits data to another application. + * Prmmsg specifies that the 8-bytes of data are to be moved + * into the parameter list. This is a two-way message and the + * receiver of the message is expected to reply. A buffer + * is provided into which IUCV moves the reply to this + * message. The contents of ansbuf is the address of the + * array of addresses and lengths of discontiguous buffers + * that contain the reply. + * Input: pathid - path identification number + * trgcls - specifies target class + * srccls - specifies the source message class + * msgtag - specifies a tag to be associated with the message + * flags1 - option for path + * IPPRTY- specifies if you want to send priority message + * prmmsg - 8-bytes of data to be placed into the parameter list + * ansbuf - address of buffer to reply with + * anslen - length of buffer to reply with + * Output: msgid - specifies the message ID. + * Return: b2f0_result - return code from CP + * (-EINVAL) - ansbuf address is NULL + */ +int +iucv_send2way_prmmsg_array (__u16 pathid, + __u32 * msgid, + __u32 trgcls, + __u32 srccls, + __u32 msgtag, + int flags1, + __u8 prmmsg[8], + iucv_array_t * ansbuf, ulong anslen) +{ + iparml_dpl *parm; + ulong b2f0_result; + + iucv_debug(2, "entering"); + + if (!ansbuf) + return -EINVAL; + + parm = (iparml_dpl *)grab_param(); + + parm->ippathid = pathid; + parm->iptrgcls = trgcls; + parm->ipsrccls = srccls; + parm->ipmsgtag = msgtag; + parm->ipbfadr2 = (__u32) ((ulong) ansbuf); + parm->ipbfln2f = (__u32) anslen; + parm->ipflags1 = (IPRMDATA | IPANSLST | flags1); + memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg)); + b2f0_result = b2f0(SEND, parm); + if ((!b2f0_result) && (msgid)) + *msgid = parm->ipmsgid; + release_param(parm); + + iucv_debug(2, "exiting"); + return b2f0_result; +} + +void +iucv_setmask_cpuid (void *result) +{ + iparml_set_mask *parm; + + iucv_debug(1, "entering"); + parm = (iparml_set_mask *)grab_param(); + parm->ipmask = *((__u8*)result); + *((ulong *)result) = b2f0(SETMASK, parm); + release_param(parm); + + iucv_debug(1, "b2f0_result = %ld", *((ulong *)result)); + iucv_debug(1, "exiting"); +} + +/* + * Name: iucv_setmask + * Purpose: This function enables or disables the following IUCV + * external interruptions: Nonpriority and priority message + * interrupts, nonpriority and priority reply interrupts. + * Input: SetMaskFlag - options for interrupts + * 0x80 - Nonpriority_MessagePendingInterruptsFlag + * 0x40 - Priority_MessagePendingInterruptsFlag + * 0x20 - Nonpriority_MessageCompletionInterruptsFlag + * 0x10 - Priority_MessageCompletionInterruptsFlag + * 0x08 - IUCVControlInterruptsFlag + * Output: NA + * Return: b2f0_result - return code from CP +*/ +int +iucv_setmask (int SetMaskFlag) +{ + union { + ulong result; + __u8 param; + } u; + int cpu; + + u.param = SetMaskFlag; + cpu = get_cpu(); + smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid); + put_cpu(); + + return u.result; +} + +/** + * iucv_sever: + * @pathid: Path identification number + * @user_data: 16-byte of user data + * + * This function terminates an iucv path. + * Returns: return code from CP + */ +int +iucv_sever(__u16 pathid, __u8 user_data[16]) +{ + iparml_control *parm; + ulong b2f0_result = 0; + + iucv_debug(1, "entering"); + parm = (iparml_control *)grab_param(); + + memcpy(parm->ipuser, user_data, sizeof(parm->ipuser)); + parm->ippathid = pathid; + + b2f0_result = b2f0(SEVER, parm); + + if (!b2f0_result) + iucv_remove_pathid(pathid); + release_param(parm); + + iucv_debug(1, "exiting"); + return b2f0_result; +} + +/* + * Interrupt Handlers + *******************************************************************************/ + +/** + * iucv_irq_handler: + * @regs: Current registers + * @code: irq code + * + * Handles external interrupts coming in from CP. + * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler(). + */ +static void +iucv_irq_handler(struct pt_regs *regs, __u16 code) +{ + iucv_irqdata *irqdata; + + irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC); + if (!irqdata) { + printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__); + return; + } + + memcpy(&irqdata->data, iucv_external_int_buffer, + sizeof(iucv_GeneralInterrupt)); + + spin_lock(&iucv_irq_queue_lock); + list_add_tail(&irqdata->queue, &iucv_irq_queue); + spin_unlock(&iucv_irq_queue_lock); + + tasklet_schedule(&iucv_tasklet); +} + +/** + * iucv_do_int: + * @int_buf: Pointer to copy of external interrupt buffer + * + * The workhorse for handling interrupts queued by iucv_irq_handler(). + * This function is called from the bottom half iucv_tasklet_handler(). + */ +static void +iucv_do_int(iucv_GeneralInterrupt * int_buf) +{ + handler *h = NULL; + struct list_head *lh; + ulong flags; + iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */ + __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */ + int rc = 0, j = 0; + __u8 no_listener[16] = "NO LISTENER"; + + iucv_debug(2, "entering, pathid %d, type %02X", + int_buf->ippathid, int_buf->iptype); + iucv_dumpit("External Interrupt Buffer:", + int_buf, sizeof(iucv_GeneralInterrupt)); + + ASCEBC (no_listener, 16); + + if (int_buf->iptype != 01) { + if ((int_buf->ippathid) > (max_connections - 1)) { + printk(KERN_WARNING "%s: Got interrupt with pathid %d" + " > max_connections (%ld)\n", __FUNCTION__, + int_buf->ippathid, max_connections - 1); + } else { + h = iucv_pathid_table[int_buf->ippathid]; + interrupt = h->interrupt_table; + iucv_dumpit("Handler:", h, sizeof(handler)); + } + } + + /* end of if statement */ + switch (int_buf->iptype) { + case 0x01: /* connection pending */ + if (messagesDisabled) { + iucv_setmask(~0); + messagesDisabled = 0; + } + spin_lock_irqsave(&iucv_lock, flags); + list_for_each(lh, &iucv_handler_table) { + h = list_entry(lh, handler, list); + memcpy(temp_buff1, &(int_buf->ipvmid), 24); + memcpy(temp_buff2, &(h->id.userid), 24); + for (j = 0; j < 24; j++) { + temp_buff1[j] &= (h->id.mask)[j]; + temp_buff2[j] &= (h->id.mask)[j]; + } + + iucv_dumpit("temp_buff1:", + temp_buff1, sizeof(temp_buff1)); + iucv_dumpit("temp_buff2", + temp_buff2, sizeof(temp_buff2)); + + if (!memcmp (temp_buff1, temp_buff2, 24)) { + + iucv_debug(2, + "found a matching handler"); + break; + } else + h = NULL; + } + spin_unlock_irqrestore (&iucv_lock, flags); + if (h) { + /* ADD PATH TO PATHID TABLE */ + rc = iucv_add_pathid(int_buf->ippathid, h); + if (rc) { + iucv_sever (int_buf->ippathid, + no_listener); + iucv_debug(1, + "add_pathid failed, rc = %d", + rc); + } else { + interrupt = h->interrupt_table; + if (interrupt->ConnectionPending) { + EBCASC (int_buf->ipvmid, 8); + interrupt->ConnectionPending( + (iucv_ConnectionPending *)int_buf, + h->pgm_data); + } else + iucv_sever(int_buf->ippathid, + no_listener); + } + } else + iucv_sever(int_buf->ippathid, no_listener); + break; + + case 0x02: /*connection complete */ + if (messagesDisabled) { + iucv_setmask(~0); + messagesDisabled = 0; + } + if (h) { + if (interrupt->ConnectionComplete) + { + interrupt->ConnectionComplete( + (iucv_ConnectionComplete *)int_buf, + h->pgm_data); + } + else + iucv_debug(1, + "ConnectionComplete not called"); + } else + iucv_sever(int_buf->ippathid, no_listener); + break; + + case 0x03: /* connection severed */ + if (messagesDisabled) { + iucv_setmask(~0); + messagesDisabled = 0; + } + if (h) { + if (interrupt->ConnectionSevered) + interrupt->ConnectionSevered( + (iucv_ConnectionSevered *)int_buf, + h->pgm_data); + + else + iucv_sever (int_buf->ippathid, no_listener); + } else + iucv_sever(int_buf->ippathid, no_listener); + break; + + case 0x04: /* connection quiesced */ + if (messagesDisabled) { + iucv_setmask(~0); + messagesDisabled = 0; + } + if (h) { + if (interrupt->ConnectionQuiesced) + interrupt->ConnectionQuiesced( + (iucv_ConnectionQuiesced *)int_buf, + h->pgm_data); + else + iucv_debug(1, + "ConnectionQuiesced not called"); + } + break; + + case 0x05: /* connection resumed */ + if (messagesDisabled) { + iucv_setmask(~0); + messagesDisabled = 0; + } + if (h) { + if (interrupt->ConnectionResumed) + interrupt->ConnectionResumed( + (iucv_ConnectionResumed *)int_buf, + h->pgm_data); + else + iucv_debug(1, + "ConnectionResumed not called"); + } + break; + + case 0x06: /* priority message complete */ + case 0x07: /* nonpriority message complete */ + if (h) { + if (interrupt->MessageComplete) + interrupt->MessageComplete( + (iucv_MessageComplete *)int_buf, + h->pgm_data); + else + iucv_debug(2, + "MessageComplete not called"); + } + break; + + case 0x08: /* priority message pending */ + case 0x09: /* nonpriority message pending */ + if (h) { + if (interrupt->MessagePending) + interrupt->MessagePending( + (iucv_MessagePending *) int_buf, + h->pgm_data); + else + iucv_debug(2, + "MessagePending not called"); + } + break; + default: /* unknown iucv type */ + printk(KERN_WARNING "%s: unknown iucv interrupt\n", + __FUNCTION__); + break; + } /* end switch */ + + iucv_debug(2, "exiting pathid %d, type %02X", + int_buf->ippathid, int_buf->iptype); + + return; +} + +/** + * iucv_tasklet_handler: + * + * This function loops over the queue of irq buffers and runs iucv_do_int() + * on every queue element. + */ +static void +iucv_tasklet_handler(unsigned long ignored) +{ + struct list_head head; + struct list_head *next; + ulong flags; + + spin_lock_irqsave(&iucv_irq_queue_lock, flags); + list_add(&head, &iucv_irq_queue); + list_del_init(&iucv_irq_queue); + spin_unlock_irqrestore (&iucv_irq_queue_lock, flags); + + next = head.next; + while (next != &head) { + iucv_irqdata *p = list_entry(next, iucv_irqdata, queue); + + next = next->next; + iucv_do_int(&p->data); + kfree(p); + } + + return; +} + +subsys_initcall(iucv_init); +module_exit(iucv_exit); + +/** + * Export all public stuff + */ +EXPORT_SYMBOL (iucv_bus); +EXPORT_SYMBOL (iucv_root); +EXPORT_SYMBOL (iucv_accept); +EXPORT_SYMBOL (iucv_connect); +#if 0 +EXPORT_SYMBOL (iucv_purge); +EXPORT_SYMBOL (iucv_query_maxconn); +EXPORT_SYMBOL (iucv_query_bufsize); +EXPORT_SYMBOL (iucv_quiesce); +#endif +EXPORT_SYMBOL (iucv_receive); +#if 0 +EXPORT_SYMBOL (iucv_receive_array); +#endif +EXPORT_SYMBOL (iucv_reject); +#if 0 +EXPORT_SYMBOL (iucv_reply); +EXPORT_SYMBOL (iucv_reply_array); +EXPORT_SYMBOL (iucv_resume); +#endif +EXPORT_SYMBOL (iucv_reply_prmmsg); +EXPORT_SYMBOL (iucv_send); +#if 0 +EXPORT_SYMBOL (iucv_send2way); +EXPORT_SYMBOL (iucv_send2way_array); +EXPORT_SYMBOL (iucv_send_array); +EXPORT_SYMBOL (iucv_send2way_prmmsg); +EXPORT_SYMBOL (iucv_send2way_prmmsg_array); +EXPORT_SYMBOL (iucv_send_prmmsg); +EXPORT_SYMBOL (iucv_setmask); +#endif +EXPORT_SYMBOL (iucv_sever); +EXPORT_SYMBOL (iucv_register_program); +EXPORT_SYMBOL (iucv_unregister_program); diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h new file mode 100644 index 000000000000..198330217eff --- /dev/null +++ b/drivers/s390/net/iucv.h @@ -0,0 +1,849 @@ +/* + * drivers/s390/net/iucv.h + * IUCV base support. + * + * S390 version + * Copyright (C) 2000 IBM Corporation + * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com) + * Xenia Tkatschow (xenia@us.ibm.com) + * + * + * Functionality: + * To explore any of the IUCV functions, one must first register + * their program using iucv_register_program(). Once your program has + * successfully completed a register, it can exploit the other functions. + * For furthur reference on all IUCV functionality, refer to the + * CP Programming Services book, also available on the web + * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 + * + * Definition of Return Codes + * -All positive return codes including zero are reflected back + * from CP except for iucv_register_program. The definition of each + * return code can be found in CP Programming Services book. + * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760 + * - Return Code of: + * (-EINVAL) Invalid value + * (-ENOMEM) storage allocation failed + * pgmask defined in iucv_register_program will be set depending on input + * paramters. + * + */ + +#include <linux/types.h> +#include <asm/debug.h> + +/** + * Debug Facility stuff + */ +#define IUCV_DBF_SETUP_NAME "iucv_setup" +#define IUCV_DBF_SETUP_LEN 32 +#define IUCV_DBF_SETUP_INDEX 1 +#define IUCV_DBF_SETUP_NR_AREAS 1 +#define IUCV_DBF_SETUP_LEVEL 3 + +#define IUCV_DBF_DATA_NAME "iucv_data" +#define IUCV_DBF_DATA_LEN 128 +#define IUCV_DBF_DATA_INDEX 1 +#define IUCV_DBF_DATA_NR_AREAS 1 +#define IUCV_DBF_DATA_LEVEL 2 + +#define IUCV_DBF_TRACE_NAME "iucv_trace" +#define IUCV_DBF_TRACE_LEN 16 +#define IUCV_DBF_TRACE_INDEX 2 +#define IUCV_DBF_TRACE_NR_AREAS 1 +#define IUCV_DBF_TRACE_LEVEL 3 + +#define IUCV_DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(iucv_dbf_##name,level,text); \ + } while (0) + +#define IUCV_DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ + } while (0) + +DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); + +#define IUCV_DBF_TEXT_(name,level,text...) \ + do { \ + char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ + sprintf(iucv_dbf_txt_buf, text); \ + debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ + put_cpu_var(iucv_dbf_txt_buf); \ + } while (0) + +#define IUCV_DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ + debug_sprintf_event(iucv_dbf_trace, level, text ); \ + } while (0) + +/** + * some more debug stuff + */ +#define IUCV_HEXDUMP16(importance,header,ptr) \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ + *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ + *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ + *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ + *(((char*)ptr)+12),*(((char*)ptr)+13), \ + *(((char*)ptr)+14),*(((char*)ptr)+15)); \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)+16),*(((char*)ptr)+17), \ + *(((char*)ptr)+18),*(((char*)ptr)+19), \ + *(((char*)ptr)+20),*(((char*)ptr)+21), \ + *(((char*)ptr)+22),*(((char*)ptr)+23), \ + *(((char*)ptr)+24),*(((char*)ptr)+25), \ + *(((char*)ptr)+26),*(((char*)ptr)+27), \ + *(((char*)ptr)+28),*(((char*)ptr)+29), \ + *(((char*)ptr)+30),*(((char*)ptr)+31)); + +static inline void +iucv_hex_dump(unsigned char *buf, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + printk("\n"); + printk("%02x ", *(buf + i)); + } + printk("\n"); +} +/** + * end of debug stuff + */ + +#define uchar unsigned char +#define ushort unsigned short +#define ulong unsigned long +#define iucv_handle_t void * + +/* flags1: + * All flags are defined in the field IPFLAGS1 of each function + * and can be found in CP Programming Services. + * IPLOCAL - Indicates the connect can only be satisfied on the + * local system + * IPPRTY - Indicates a priority message + * IPQUSCE - Indicates you do not want to receive messages on a + * path until an iucv_resume is issued + * IPRMDATA - Indicates that the message is in the parameter list + */ +#define IPLOCAL 0x01 +#define IPPRTY 0x20 +#define IPQUSCE 0x40 +#define IPRMDATA 0x80 + +/* flags1_out: + * All flags are defined in the output field of IPFLAGS1 for each function + * and can be found in CP Programming Services. + * IPNORPY - Specifies this is a one-way message and no reply is expected. + * IPPRTY - Indicates a priority message is permitted. Defined in flags1. + */ +#define IPNORPY 0x10 + +#define Nonpriority_MessagePendingInterruptsFlag 0x80 +#define Priority_MessagePendingInterruptsFlag 0x40 +#define Nonpriority_MessageCompletionInterruptsFlag 0x20 +#define Priority_MessageCompletionInterruptsFlag 0x10 +#define IUCVControlInterruptsFlag 0x08 +#define AllInterrupts 0xf8 +/* + * Mapping of external interrupt buffers should be used with the corresponding + * interrupt types. + * Names: iucv_ConnectionPending -> connection pending + * iucv_ConnectionComplete -> connection complete + * iucv_ConnectionSevered -> connection severed + * iucv_ConnectionQuiesced -> connection quiesced + * iucv_ConnectionResumed -> connection resumed + * iucv_MessagePending -> message pending + * iucv_MessageComplete -> message complete + */ +typedef struct { + u16 ippathid; + uchar ipflags1; + uchar iptype; + u16 ipmsglim; + u16 res1; + uchar ipvmid[8]; + uchar ipuser[16]; + u32 res3; + uchar ippollfg; + uchar res4[3]; +} iucv_ConnectionPending; + +typedef struct { + u16 ippathid; + uchar ipflags1; + uchar iptype; + u16 ipmsglim; + u16 res1; + uchar res2[8]; + uchar ipuser[16]; + u32 res3; + uchar ippollfg; + uchar res4[3]; +} iucv_ConnectionComplete; + +typedef struct { + u16 ippathid; + uchar res1; + uchar iptype; + u32 res2; + uchar res3[8]; + uchar ipuser[16]; + u32 res4; + uchar ippollfg; + uchar res5[3]; +} iucv_ConnectionSevered; + +typedef struct { + u16 ippathid; + uchar res1; + uchar iptype; + u32 res2; + uchar res3[8]; + uchar ipuser[16]; + u32 res4; + uchar ippollfg; + uchar res5[3]; +} iucv_ConnectionQuiesced; + +typedef struct { + u16 ippathid; + uchar res1; + uchar iptype; + u32 res2; + uchar res3[8]; + uchar ipuser[16]; + u32 res4; + uchar ippollfg; + uchar res5[3]; +} iucv_ConnectionResumed; + +typedef struct { + u16 ippathid; + uchar ipflags1; + uchar iptype; + u32 ipmsgid; + u32 iptrgcls; + union u2 { + u32 iprmmsg1_u32; + uchar iprmmsg1[4]; + } ln1msg1; + union u1 { + u32 ipbfln1f; + uchar iprmmsg2[4]; + } ln1msg2; + u32 res1[3]; + u32 ipbfln2f; + uchar ippollfg; + uchar res2[3]; +} iucv_MessagePending; + +typedef struct { + u16 ippathid; + uchar ipflags1; + uchar iptype; + u32 ipmsgid; + u32 ipaudit; + uchar iprmmsg[8]; + u32 ipsrccls; + u32 ipmsgtag; + u32 res; + u32 ipbfln2f; + uchar ippollfg; + uchar res2[3]; +} iucv_MessageComplete; + +/* + * iucv_interrupt_ops_t: Is a vector of functions that handle + * IUCV interrupts. + * Parameter list: + * eib - is a pointer to a 40-byte area described + * with one of the structures above. + * pgm_data - this data is strictly for the + * interrupt handler that is passed by + * the application. This may be an address + * or token. +*/ +typedef struct { + void (*ConnectionPending) (iucv_ConnectionPending * eib, + void *pgm_data); + void (*ConnectionComplete) (iucv_ConnectionComplete * eib, + void *pgm_data); + void (*ConnectionSevered) (iucv_ConnectionSevered * eib, + void *pgm_data); + void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib, + void *pgm_data); + void (*ConnectionResumed) (iucv_ConnectionResumed * eib, + void *pgm_data); + void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data); + void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data); +} iucv_interrupt_ops_t; + +/* + *iucv_array_t : Defines buffer array. + * Inside the array may be 31- bit addresses and 31-bit lengths. +*/ +typedef struct { + u32 address; + u32 length; +} iucv_array_t __attribute__ ((aligned (8))); + +extern struct bus_type iucv_bus; +extern struct device *iucv_root; + +/* -prototypes- */ +/* + * Name: iucv_register_program + * Purpose: Registers an application with IUCV + * Input: prmname - user identification + * userid - machine identification + * pgmmask - indicates which bits in the prmname and userid combined will be + * used to determine who is given control + * ops - address of vector of interrupt handlers + * pgm_data- application data passed to interrupt handlers + * Output: NA + * Return: address of handler + * (0) - Error occurred, registration not completed. + * NOTE: Exact cause of failure will be recorded in syslog. +*/ +iucv_handle_t iucv_register_program (uchar pgmname[16], + uchar userid[8], + uchar pgmmask[24], + iucv_interrupt_ops_t * ops, + void *pgm_data); + +/* + * Name: iucv_unregister_program + * Purpose: Unregister application with IUCV + * Input: address of handler + * Output: NA + * Return: (0) - Normal return + * (-EINVAL) - Internal error, wild pointer +*/ +int iucv_unregister_program (iucv_handle_t handle); + +/* + * Name: iucv_accept + * Purpose: This function is issued after the user receives a Connection Pending external + * interrupt and now wishes to complete the IUCV communication path. + * Input: pathid - u16 , Path identification number + * msglim_reqstd - u16, The number of outstanding messages requested. + * user_data - uchar[16], Data specified by the iucv_connect function. + * flags1 - int, Contains options for this path. + * -IPPRTY - 0x20- Specifies if you want to send priority message. + * -IPRMDATA - 0x80, Specifies whether your program can handle a message + * in the parameter list. + * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being + * established. + * handle - iucv_handle_t, Address of handler. + * pgm_data - void *, Application data passed to interrupt handlers. + * flags1_out - int * Contains information about the path + * - IPPRTY - 0x20, Indicates you may send priority messages. + * msglim - *u16, Number of outstanding messages. + * Output: return code from CP IUCV call. +*/ + +int iucv_accept (u16 pathid, + u16 msglim_reqstd, + uchar user_data[16], + int flags1, + iucv_handle_t handle, + void *pgm_data, int *flags1_out, u16 * msglim); + +/* + * Name: iucv_connect + * Purpose: This function establishes an IUCV path. Although the connect may complete + * successfully, you are not able to use the path until you receive an IUCV + * Connection Complete external interrupt. + * Input: pathid - u16 *, Path identification number + * msglim_reqstd - u16, Number of outstanding messages requested + * user_data - uchar[16], 16-byte user data + * userid - uchar[8], User identification + * system_name - uchar[8], 8-byte identifying the system name + * flags1 - int, Contains options for this path. + * -IPPRTY - 0x20, Specifies if you want to send priority message. + * -IPRMDATA - 0x80, Specifies whether your program can handle a message + * in the parameter list. + * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being + * established. + * -IPLOCAL - 0X01, Allows an application to force the partner to be on + * the local system. If local is specified then target class cannot be + * specified. + * flags1_out - int * Contains information about the path + * - IPPRTY - 0x20, Indicates you may send priority messages. + * msglim - * u16, Number of outstanding messages + * handle - iucv_handle_t, Address of handler + * pgm_data - void *, Application data passed to interrupt handlers + * Output: return code from CP IUCV call + * rc - return code from iucv_declare_buffer + * -EINVAL - Invalid handle passed by application + * -EINVAL - Pathid address is NULL + * add_pathid_result - Return code from internal function add_pathid +*/ +int + iucv_connect (u16 * pathid, + u16 msglim_reqstd, + uchar user_data[16], + uchar userid[8], + uchar system_name[8], + int flags1, + int *flags1_out, + u16 * msglim, iucv_handle_t handle, void *pgm_data); + +/* + * Name: iucv_purge + * Purpose: This function cancels a message that you have sent. + * Input: pathid - Path identification number. + * msgid - Specifies the message ID of the message to be purged. + * srccls - Specifies the source message class. + * Output: audit - Contains information about asynchronous error + * that may have affected the normal completion + * of this message. + * Return: Return code from CP IUCV call. +*/ +int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit); +/* + * Name: iucv_query_maxconn + * Purpose: This function determines the maximum number of communication paths you + * may establish. + * Return: maxconn - ulong, Maximum number of connection the virtual machine may + * establish. +*/ +ulong iucv_query_maxconn (void); + +/* + * Name: iucv_query_bufsize + * Purpose: This function determines how large an external interrupt + * buffer IUCV requires to store information. + * Return: bufsize - ulong, Size of external interrupt buffer. + */ +ulong iucv_query_bufsize (void); + +/* + * Name: iucv_quiesce + * Purpose: This function temporarily suspends incoming messages on an + * IUCV path. You can later reactivate the path by invoking + * the iucv_resume function. + * Input: pathid - Path identification number + * user_data - 16-bytes of user data + * Output: NA + * Return: Return code from CP IUCV call. +*/ +int iucv_quiesce (u16 pathid, uchar user_data[16]); + +/* + * Name: iucv_receive + * Purpose: This function receives messages that are being sent to you + * over established paths. Data will be returned in buffer for length of + * buflen. + * Input: + * pathid - Path identification number. + * buffer - Address of buffer to receive. + * buflen - Length of buffer to receive. + * msgid - Specifies the message ID. + * trgcls - Specifies target class. + * Output: + * flags1_out: int *, Contains information about this path. + * IPNORPY - 0x10 Specifies this is a one-way message and no reply is + * expected. + * IPPRTY - 0x20 Specifies if you want to send priority message. + * IPRMDATA - 0x80 specifies the data is contained in the parameter list + * residual_buffer - address of buffer updated by the number + * of bytes you have received. + * residual_length - + * Contains one of the following values, if the receive buffer is: + * The same length as the message, this field is zero. + * Longer than the message, this field contains the number of + * bytes remaining in the buffer. + * Shorter than the message, this field contains the residual + * count (that is, the number of bytes remaining in the + * message that does not fit into the buffer. In this + * case b2f0_result = 5. + * Return: Return code from CP IUCV call. + * (-EINVAL) - buffer address is pointing to NULL +*/ +int iucv_receive (u16 pathid, + u32 msgid, + u32 trgcls, + void *buffer, + ulong buflen, + int *flags1_out, + ulong * residual_buffer, ulong * residual_length); + + /* + * Name: iucv_receive_array + * Purpose: This function receives messages that are being sent to you + * over established paths. Data will be returned in first buffer for + * length of first buffer. + * Input: pathid - Path identification number. + * msgid - specifies the message ID. + * trgcls - Specifies target class. + * buffer - Address of array of buffers. + * buflen - Total length of buffers. + * Output: + * flags1_out: int *, Contains information about this path. + * IPNORPY - 0x10 Specifies this is a one-way message and no reply is + * expected. + * IPPRTY - 0x20 Specifies if you want to send priority message. + * IPRMDATA - 0x80 specifies the data is contained in the parameter list + * residual_buffer - address points to the current list entry IUCV + * is working on. + * residual_length - + * Contains one of the following values, if the receive buffer is: + * The same length as the message, this field is zero. + * Longer than the message, this field contains the number of + * bytes remaining in the buffer. + * Shorter than the message, this field contains the residual + * count (that is, the number of bytes remaining in the + * message that does not fit into the buffer. In this + * case b2f0_result = 5. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer address is NULL. + */ +int iucv_receive_array (u16 pathid, + u32 msgid, + u32 trgcls, + iucv_array_t * buffer, + ulong buflen, + int *flags1_out, + ulong * residual_buffer, ulong * residual_length); + +/* + * Name: iucv_reject + * Purpose: The reject function refuses a specified message. Between the + * time you are notified of a message and the time that you + * complete the message, the message may be rejected. + * Input: pathid - Path identification number. + * msgid - Specifies the message ID. + * trgcls - Specifies target class. + * Output: NA + * Return: Return code from CP IUCV call. +*/ +int iucv_reject (u16 pathid, u32 msgid, u32 trgcls); + +/* + * Name: iucv_reply + * Purpose: This function responds to the two-way messages that you + * receive. You must identify completely the message to + * which you wish to reply. ie, pathid, msgid, and trgcls. + * Input: pathid - Path identification number. + * msgid - Specifies the message ID. + * trgcls - Specifies target class. + * flags1 - Option for path. + * IPPRTY- 0x20, Specifies if you want to send priority message. + * buffer - Address of reply buffer. + * buflen - Length of reply buffer. + * Output: residual_buffer - Address of buffer updated by the number + * of bytes you have moved. + * residual_length - Contains one of the following values: + * If the answer buffer is the same length as the reply, this field + * contains zero. + * If the answer buffer is longer than the reply, this field contains + * the number of bytes remaining in the buffer. + * If the answer buffer is shorter than the reply, this field contains + * a residual count (that is, the number of bytes remianing in the + * reply that does not fit into the buffer. In this + * case b2f0_result = 5. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer address is NULL. +*/ +int iucv_reply (u16 pathid, + u32 msgid, + u32 trgcls, + int flags1, + void *buffer, ulong buflen, ulong * residual_buffer, + ulong * residual_length); + +/* + * Name: iucv_reply_array + * Purpose: This function responds to the two-way messages that you + * receive. You must identify completely the message to + * which you wish to reply. ie, pathid, msgid, and trgcls. + * The array identifies a list of addresses and lengths of + * discontiguous buffers that contains the reply data. + * Input: pathid - Path identification number + * msgid - Specifies the message ID. + * trgcls - Specifies target class. + * flags1 - Option for path. + * IPPRTY- 0x20, Specifies if you want to send priority message. + * buffer - Address of array of reply buffers. + * buflen - Total length of reply buffers. + * Output: residual_buffer - Address of buffer which IUCV is currently working on. + * residual_length - Contains one of the following values: + * If the answer buffer is the same length as the reply, this field + * contains zero. + * If the answer buffer is longer than the reply, this field contains + * the number of bytes remaining in the buffer. + * If the answer buffer is shorter than the reply, this field contains + * a residual count (that is, the number of bytes remianing in the + * reply that does not fit into the buffer. In this + * case b2f0_result = 5. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer address is NULL. +*/ +int iucv_reply_array (u16 pathid, + u32 msgid, + u32 trgcls, + int flags1, + iucv_array_t * buffer, + ulong buflen, ulong * residual_address, + ulong * residual_length); + +/* + * Name: iucv_reply_prmmsg + * Purpose: This function responds to the two-way messages that you + * receive. You must identify completely the message to + * which you wish to reply. ie, pathid, msgid, and trgcls. + * Prmmsg signifies the data is moved into the + * parameter list. + * Input: pathid - Path identification number. + * msgid - Specifies the message ID. + * trgcls - Specifies target class. + * flags1 - Option for path. + * IPPRTY- 0x20 Specifies if you want to send priority message. + * prmmsg - 8-bytes of data to be placed into the parameter. + * list. + * Output: NA + * Return: Return code from CP IUCV call. +*/ +int iucv_reply_prmmsg (u16 pathid, + u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]); + +/* + * Name: iucv_resume + * Purpose: This function restores communications over a quiesced path + * Input: pathid - Path identification number. + * user_data - 16-bytes of user data. + * Output: NA + * Return: Return code from CP IUCV call. +*/ +int iucv_resume (u16 pathid, uchar user_data[16]); + +/* + * Name: iucv_send + * Purpose: This function transmits data to another application. + * Data to be transmitted is in a buffer and this is a + * one-way message and the receiver will not reply to the + * message. + * Input: pathid - Path identification number. + * trgcls - Specifies target class. + * srccls - Specifies the source message class. + * msgtag - Specifies a tag to be associated with the message. + * flags1 - Option for path. + * IPPRTY- 0x20 Specifies if you want to send priority message. + * buffer - Address of send buffer. + * buflen - Length of send buffer. + * Output: msgid - Specifies the message ID. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer address is NULL. +*/ +int iucv_send (u16 pathid, + u32 * msgid, + u32 trgcls, + u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen); + +/* + * Name: iucv_send_array + * Purpose: This function transmits data to another application. + * The contents of buffer is the address of the array of + * addresses and lengths of discontiguous buffers that hold + * the message text. This is a one-way message and the + * receiver will not reply to the message. + * Input: pathid - Path identification number. + * trgcls - Specifies target class. + * srccls - Specifies the source message class. + * msgtag - Specifies a tag to be associated witht the message. + * flags1 - Option for path. + * IPPRTY- specifies if you want to send priority message. + * buffer - Address of array of send buffers. + * buflen - Total length of send buffers. + * Output: msgid - Specifies the message ID. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer address is NULL. +*/ +int iucv_send_array (u16 pathid, + u32 * msgid, + u32 trgcls, + u32 srccls, + u32 msgtag, + int flags1, iucv_array_t * buffer, ulong buflen); + +/* + * Name: iucv_send_prmmsg + * Purpose: This function transmits data to another application. + * Prmmsg specifies that the 8-bytes of data are to be moved + * into the parameter list. This is a one-way message and the + * receiver will not reply to the message. + * Input: pathid - Path identification number. + * trgcls - Specifies target class. + * srccls - Specifies the source message class. + * msgtag - Specifies a tag to be associated with the message. + * flags1 - Option for path. + * IPPRTY- 0x20 specifies if you want to send priority message. + * prmmsg - 8-bytes of data to be placed into parameter list. + * Output: msgid - Specifies the message ID. + * Return: Return code from CP IUCV call. +*/ +int iucv_send_prmmsg (u16 pathid, + u32 * msgid, + u32 trgcls, + u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]); + +/* + * Name: iucv_send2way + * Purpose: This function transmits data to another application. + * Data to be transmitted is in a buffer. The receiver + * of the send is expected to reply to the message and + * a buffer is provided into which IUCV moves the reply + * to this message. + * Input: pathid - Path identification number. + * trgcls - Specifies target class. + * srccls - Specifies the source message class. + * msgtag - Specifies a tag associated with the message. + * flags1 - Option for path. + * IPPRTY- 0x20 Specifies if you want to send priority message. + * buffer - Address of send buffer. + * buflen - Length of send buffer. + * ansbuf - Address of buffer into which IUCV moves the reply of + * this message. + * anslen - Address of length of buffer. + * Output: msgid - Specifies the message ID. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer or ansbuf address is NULL. +*/ +int iucv_send2way (u16 pathid, + u32 * msgid, + u32 trgcls, + u32 srccls, + u32 msgtag, + int flags1, + void *buffer, ulong buflen, void *ansbuf, ulong anslen); + +/* + * Name: iucv_send2way_array + * Purpose: This function transmits data to another application. + * The contents of buffer is the address of the array of + * addresses and lengths of discontiguous buffers that hold + * the message text. The receiver of the send is expected to + * reply to the message and a buffer is provided into which + * IUCV moves the reply to this message. + * Input: pathid - Path identification number. + * trgcls - Specifies target class. + * srccls - Specifies the source message class. + * msgtag - Specifies a tag to be associated with the message. + * flags1 - Option for path. + * IPPRTY- 0x20 Specifies if you want to send priority message. + * buffer - Sddress of array of send buffers. + * buflen - Total length of send buffers. + * ansbuf - Address of array of buffer into which IUCV moves the reply + * of this message. + * anslen - Address of length reply buffers. + * Output: msgid - Specifies the message ID. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer address is NULL. +*/ +int iucv_send2way_array (u16 pathid, + u32 * msgid, + u32 trgcls, + u32 srccls, + u32 msgtag, + int flags1, + iucv_array_t * buffer, + ulong buflen, iucv_array_t * ansbuf, ulong anslen); + +/* + * Name: iucv_send2way_prmmsg + * Purpose: This function transmits data to another application. + * Prmmsg specifies that the 8-bytes of data are to be moved + * into the parameter list. This is a two-way message and the + * receiver of the message is expected to reply. A buffer + * is provided into which IUCV moves the reply to this + * message. + * Input: pathid - Rath identification number. + * trgcls - Specifies target class. + * srccls - Specifies the source message class. + * msgtag - Specifies a tag to be associated with the message. + * flags1 - Option for path. + * IPPRTY- 0x20 Specifies if you want to send priority message. + * prmmsg - 8-bytes of data to be placed in parameter list. + * ansbuf - Address of buffer into which IUCV moves the reply of + * this message. + * anslen - Address of length of buffer. + * Output: msgid - Specifies the message ID. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Buffer address is NULL. +*/ +int iucv_send2way_prmmsg (u16 pathid, + u32 * msgid, + u32 trgcls, + u32 srccls, + u32 msgtag, + ulong flags1, + uchar prmmsg[8], void *ansbuf, ulong anslen); + +/* + * Name: iucv_send2way_prmmsg_array + * Purpose: This function transmits data to another application. + * Prmmsg specifies that the 8-bytes of data are to be moved + * into the parameter list. This is a two-way message and the + * receiver of the message is expected to reply. A buffer + * is provided into which IUCV moves the reply to this + * message. The contents of ansbuf is the address of the + * array of addresses and lengths of discontiguous buffers + * that contain the reply. + * Input: pathid - Path identification number. + * trgcls - Specifies target class. + * srccls - Specifies the source message class. + * msgtag - Specifies a tag to be associated with the message. + * flags1 - Option for path. + * IPPRTY- 0x20 specifies if you want to send priority message. + * prmmsg - 8-bytes of data to be placed into the parameter list. + * ansbuf - Address of array of buffer into which IUCV moves the reply + * of this message. + * anslen - Address of length of reply buffers. + * Output: msgid - Specifies the message ID. + * Return: Return code from CP IUCV call. + * (-EINVAL) - Ansbuf address is NULL. +*/ +int iucv_send2way_prmmsg_array (u16 pathid, + u32 * msgid, + u32 trgcls, + u32 srccls, + u32 msgtag, + int flags1, + uchar prmmsg[8], + iucv_array_t * ansbuf, ulong anslen); + +/* + * Name: iucv_setmask + * Purpose: This function enables or disables the following IUCV + * external interruptions: Nonpriority and priority message + * interrupts, nonpriority and priority reply interrupts. + * Input: SetMaskFlag - options for interrupts + * 0x80 - Nonpriority_MessagePendingInterruptsFlag + * 0x40 - Priority_MessagePendingInterruptsFlag + * 0x20 - Nonpriority_MessageCompletionInterruptsFlag + * 0x10 - Priority_MessageCompletionInterruptsFlag + * 0x08 - IUCVControlInterruptsFlag + * Output: NA + * Return: Return code from CP IUCV call. +*/ +int iucv_setmask (int SetMaskFlag); + +/* + * Name: iucv_sever + * Purpose: This function terminates an IUCV path. + * Input: pathid - Path identification number. + * user_data - 16-bytes of user data. + * Output: NA + * Return: Return code from CP IUCV call. + * (-EINVAL) - Interal error, wild pointer. +*/ +int iucv_sever (u16 pathid, uchar user_data[16]); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c new file mode 100644 index 000000000000..0f76e945b984 --- /dev/null +++ b/drivers/s390/net/lcs.c @@ -0,0 +1,2347 @@ +/* + * linux/drivers/s390/net/lcs.c + * + * Linux for S/390 Lan Channel Station Network Driver + * + * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH, + * IBM Corporation + * Author(s): Original Code written by + * DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Rewritten by + * Frank Pavlic (pavlic@de.ibm.com) and + * Martin Schwidefsky <schwidefsky@de.ibm.com> + * + * $Revision: 1.96 $ $Date: 2004/11/11 13:42:33 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/if.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/trdevice.h> +#include <linux/fddidevice.h> +#include <linux/inetdevice.h> +#include <linux/in.h> +#include <linux/igmp.h> +#include <linux/delay.h> +#include <net/arp.h> +#include <net/ip.h> + +#include <asm/debug.h> +#include <asm/idals.h> +#include <asm/timex.h> +#include <linux/device.h> +#include <asm/ccwgroup.h> + +#include "lcs.h" +#include "cu3088.h" + + +#if !defined(CONFIG_NET_ETHERNET) && \ + !defined(CONFIG_TR) && !defined(CONFIG_FDDI) +#error Cannot compile lcs.c without some net devices switched on. +#endif + +/** + * initialization string for output + */ +#define VERSION_LCS_C "$Revision: 1.96 $" + +static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")"; +static char debug_buffer[255]; + +/** + * Some prototypes. + */ +static void lcs_tasklet(unsigned long); +static void lcs_start_kernel_thread(struct lcs_card *card); +static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); +static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); + +/** + * Debug Facility Stuff + */ +static debug_info_t *lcs_dbf_setup; +static debug_info_t *lcs_dbf_trace; + +/** + * LCS Debug Facility functions + */ +static void +lcs_unregister_debug_facility(void) +{ + if (lcs_dbf_setup) + debug_unregister(lcs_dbf_setup); + if (lcs_dbf_trace) + debug_unregister(lcs_dbf_trace); +} + +static int +lcs_register_debug_facility(void) +{ + lcs_dbf_setup = debug_register("lcs_setup", 1, 1, 8); + lcs_dbf_trace = debug_register("lcs_trace", 1, 2, 8); + if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { + PRINT_ERR("Not enough memory for debug facility.\n"); + lcs_unregister_debug_facility(); + return -ENOMEM; + } + debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); + debug_set_level(lcs_dbf_setup, 4); + debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); + debug_set_level(lcs_dbf_trace, 4); + return 0; +} + +/** + * Allocate io buffers. + */ +static int +lcs_alloc_channel(struct lcs_channel *channel) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ichalloc"); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + /* alloc memory fo iobuffer */ + channel->iob[cnt].data = (void *) + kmalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); + if (channel->iob[cnt].data == NULL) + break; + memset(channel->iob[cnt].data, 0, LCS_IOBUFFERSIZE); + channel->iob[cnt].state = BUF_STATE_EMPTY; + } + if (cnt < LCS_NUM_BUFFS) { + /* Not all io buffers could be allocated. */ + LCS_DBF_TEXT(2, setup, "echalloc"); + while (cnt-- > 0) + kfree(channel->iob[cnt].data); + return -ENOMEM; + } + return 0; +} + +/** + * Free io buffers. + */ +static void +lcs_free_channel(struct lcs_channel *channel) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ichfree"); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + if (channel->iob[cnt].data != NULL) + kfree(channel->iob[cnt].data); + channel->iob[cnt].data = NULL; + } +} + +/* + * Cleanup channel. + */ +static void +lcs_cleanup_channel(struct lcs_channel *channel) +{ + LCS_DBF_TEXT(3, setup, "cleanch"); + /* Kill write channel tasklets. */ + tasklet_kill(&channel->irq_tasklet); + /* Free channel buffers. */ + lcs_free_channel(channel); +} + +/** + * LCS free memory for card and channels. + */ +static void +lcs_free_card(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, setup, "remcard"); + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + kfree(card); +} + +/** + * LCS alloc memory for card and channels + */ +static struct lcs_card * +lcs_alloc_card(void) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, setup, "alloclcs"); + + card = kmalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); + if (card == NULL) + return NULL; + memset(card, 0, sizeof(struct lcs_card)); + card->lan_type = LCS_FRAME_TYPE_AUTO; + card->pkt_seq = 0; + card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; + /* Allocate io buffers for the read channel. */ + rc = lcs_alloc_channel(&card->read); + if (rc){ + LCS_DBF_TEXT(2, setup, "iccwerr"); + lcs_free_card(card); + return NULL; + } + /* Allocate io buffers for the write channel. */ + rc = lcs_alloc_channel(&card->write); + if (rc) { + LCS_DBF_TEXT(2, setup, "iccwerr"); + lcs_cleanup_channel(&card->read); + lcs_free_card(card); + return NULL; + } + +#ifdef CONFIG_IP_MULTICAST + INIT_LIST_HEAD(&card->ipm_list); +#endif + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + return card; +} + +/* + * Setup read channel. + */ +static void +lcs_setup_read_ccws(struct lcs_card *card) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ireadccw"); + /* Setup read ccws. */ + memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1)); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + card->read.ccws[cnt].cmd_code = LCS_CCW_READ; + card->read.ccws[cnt].count = LCS_IOBUFFERSIZE; + card->read.ccws[cnt].flags = + CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI; + /* + * Note: we have allocated the buffer with GFP_DMA, so + * we do not need to do set_normalized_cda. + */ + card->read.ccws[cnt].cda = + (__u32) __pa(card->read.iob[cnt].data); + ((struct lcs_header *) + card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; + card->read.iob[cnt].callback = lcs_get_frames_cb; + card->read.iob[cnt].state = BUF_STATE_READY; + card->read.iob[cnt].count = LCS_IOBUFFERSIZE; + } + card->read.ccws[0].flags &= ~CCW_FLAG_PCI; + card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI; + card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND; + /* Last ccw is a tic (transfer in channel). */ + card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; + card->read.ccws[LCS_NUM_BUFFS].cda = + (__u32) __pa(card->read.ccws); + /* Setg initial state of the read channel. */ + card->read.state = CH_STATE_INIT; + + card->read.io_idx = 0; + card->read.buf_idx = 0; +} + +static void +lcs_setup_read(struct lcs_card *card) +{ + LCS_DBF_TEXT(3, setup, "initread"); + + lcs_setup_read_ccws(card); + /* Initialize read channel tasklet. */ + card->read.irq_tasklet.data = (unsigned long) &card->read; + card->read.irq_tasklet.func = lcs_tasklet; + /* Initialize waitqueue. */ + init_waitqueue_head(&card->read.wait_q); +} + +/* + * Setup write channel. + */ +static void +lcs_setup_write_ccws(struct lcs_card *card) +{ + int cnt; + + LCS_DBF_TEXT(3, setup, "iwritccw"); + /* Setup write ccws. */ + memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; + card->write.ccws[cnt].count = 0; + card->write.ccws[cnt].flags = + CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI; + /* + * Note: we have allocated the buffer with GFP_DMA, so + * we do not need to do set_normalized_cda. + */ + card->write.ccws[cnt].cda = + (__u32) __pa(card->write.iob[cnt].data); + } + /* Last ccw is a tic (transfer in channel). */ + card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; + card->write.ccws[LCS_NUM_BUFFS].cda = + (__u32) __pa(card->write.ccws); + /* Set initial state of the write channel. */ + card->read.state = CH_STATE_INIT; + + card->write.io_idx = 0; + card->write.buf_idx = 0; +} + +static void +lcs_setup_write(struct lcs_card *card) +{ + LCS_DBF_TEXT(3, setup, "initwrit"); + + lcs_setup_write_ccws(card); + /* Initialize write channel tasklet. */ + card->write.irq_tasklet.data = (unsigned long) &card->write; + card->write.irq_tasklet.func = lcs_tasklet; + /* Initialize waitqueue. */ + init_waitqueue_head(&card->write.wait_q); +} + +static void +lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + card->thread_allowed_mask = threads; + spin_unlock_irqrestore(&card->mask_lock, flags); + wake_up(&card->wait_q); +} +static inline int +lcs_threads_running(struct lcs_card *card, unsigned long threads) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + rc = (card->thread_running_mask & threads); + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +static int +lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) +{ + return wait_event_interruptible(card->wait_q, + lcs_threads_running(card, threads) == 0); +} + +static inline int +lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + if ( !(card->thread_allowed_mask & thread) || + (card->thread_start_mask & thread) ) { + spin_unlock_irqrestore(&card->mask_lock, flags); + return -EPERM; + } + card->thread_start_mask |= thread; + spin_unlock_irqrestore(&card->mask_lock, flags); + return 0; +} + +static void +lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + card->thread_running_mask &= ~thread; + spin_unlock_irqrestore(&card->mask_lock, flags); + wake_up(&card->wait_q); +} + +static inline int +__lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + if (card->thread_start_mask & thread){ + if ((card->thread_allowed_mask & thread) && + !(card->thread_running_mask & thread)){ + rc = 1; + card->thread_start_mask &= ~thread; + card->thread_running_mask |= thread; + } else + rc = -EPERM; + } + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +static int +lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +{ + int rc = 0; + wait_event(card->wait_q, + (rc = __lcs_do_run_thread(card, thread)) >= 0); + return rc; +} + +static int +lcs_do_start_thread(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + LCS_DBF_TEXT_(4, trace, " %02x%02x%02x", + (u8) card->thread_start_mask, + (u8) card->thread_allowed_mask, + (u8) card->thread_running_mask); + rc = (card->thread_start_mask & thread); + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +/** + * Initialize channels,card and state machines. + */ +static void +lcs_setup_card(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, setup, "initcard"); + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + + lcs_setup_read(card); + lcs_setup_write(card); + /* Set cards initial state. */ + card->state = DEV_STATE_DOWN; + card->tx_buffer = NULL; + card->tx_emitted = 0; + + /* Initialize kernel thread task used for LGW commands. */ + INIT_WORK(&card->kernel_thread_starter, + (void *)lcs_start_kernel_thread,card); + card->thread_start_mask = 0; + card->thread_allowed_mask = 0; + card->thread_running_mask = 0; + init_waitqueue_head(&card->wait_q); + spin_lock_init(&card->lock); + spin_lock_init(&card->ipm_lock); + spin_lock_init(&card->mask_lock); +#ifdef CONFIG_IP_MULTICAST + INIT_LIST_HEAD(&card->ipm_list); +#endif + INIT_LIST_HEAD(&card->lancmd_waiters); +} + +static inline void +lcs_clear_multicast_list(struct lcs_card *card) +{ +#ifdef CONFIG_IP_MULTICAST + struct lcs_ipm_list *ipm; + unsigned long flags; + + /* Free multicast list. */ + LCS_DBF_TEXT(3, setup, "clmclist"); + spin_lock_irqsave(&card->ipm_lock, flags); + while (!list_empty(&card->ipm_list)){ + ipm = list_entry(card->ipm_list.next, + struct lcs_ipm_list, list); + list_del(&ipm->list); + if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){ + spin_unlock_irqrestore(&card->ipm_lock, flags); + lcs_send_delipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + } + kfree(ipm); + } + spin_unlock_irqrestore(&card->ipm_lock, flags); +#endif +} +/** + * Cleanup channels,card and state machines. + */ +static void +lcs_cleanup_card(struct lcs_card *card) +{ + + LCS_DBF_TEXT(3, setup, "cleancrd"); + LCS_DBF_HEX(2,setup,&card,sizeof(void*)); + + if (card->dev != NULL) + free_netdev(card->dev); + /* Cleanup channels. */ + lcs_cleanup_channel(&card->write); + lcs_cleanup_channel(&card->read); +} + +/** + * Start channel. + */ +static int +lcs_start_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + LCS_DBF_TEXT_(4,trace,"ssch%s", channel->ccwdev->dev.bus_id); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start(channel->ccwdev, + channel->ccws + channel->io_idx, 0, 0, + DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND); + if (rc == 0) + channel->state = CH_STATE_RUNNING; + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4,trace,"essh%s", channel->ccwdev->dev.bus_id); + PRINT_ERR("Error in starting channel, rc=%d!\n", rc); + } + return rc; +} + +static int +lcs_clear_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4,trace,"clearch"); + LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_clear(channel->ccwdev, (addr_t) channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4,trace,"ecsc%s", channel->ccwdev->dev.bus_id); + return rc; + } + wait_event(channel->wait_q, (channel->state == CH_STATE_CLEARED)); + channel->state = CH_STATE_STOPPED; + return rc; +} + + +/** + * Stop channel. + */ +static int +lcs_stop_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + if (channel->state == CH_STATE_STOPPED) + return 0; + LCS_DBF_TEXT(4,trace,"haltsch"); + LCS_DBF_TEXT_(4,trace,"%s", channel->ccwdev->dev.bus_id); + channel->state = CH_STATE_INIT; + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_halt(channel->ccwdev, (addr_t) channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4,trace,"ehsc%s", channel->ccwdev->dev.bus_id); + return rc; + } + /* Asynchronous halt initialted. Wait for its completion. */ + wait_event(channel->wait_q, (channel->state == CH_STATE_HALTED)); + lcs_clear_channel(channel); + return 0; +} + +/** + * start read and write channel + */ +static int +lcs_start_channels(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(2, trace, "chstart"); + /* start read channel */ + rc = lcs_start_channel(&card->read); + if (rc) + return rc; + /* start write channel */ + rc = lcs_start_channel(&card->write); + if (rc) + lcs_stop_channel(&card->read); + return rc; +} + +/** + * stop read and write channel + */ +static int +lcs_stop_channels(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, trace, "chhalt"); + lcs_stop_channel(&card->read); + lcs_stop_channel(&card->write); + return 0; +} + +/** + * Get empty buffer. + */ +static struct lcs_buffer * +__lcs_get_buffer(struct lcs_channel *channel) +{ + int index; + + LCS_DBF_TEXT(5, trace, "_getbuff"); + index = channel->io_idx; + do { + if (channel->iob[index].state == BUF_STATE_EMPTY) { + channel->iob[index].state = BUF_STATE_LOCKED; + return channel->iob + index; + } + index = (index + 1) & (LCS_NUM_BUFFS - 1); + } while (index != channel->io_idx); + return NULL; +} + +static struct lcs_buffer * +lcs_get_buffer(struct lcs_channel *channel) +{ + struct lcs_buffer *buffer; + unsigned long flags; + + LCS_DBF_TEXT(5, trace, "getbuff"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer = __lcs_get_buffer(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + return buffer; +} + +/** + * Resume channel program if the channel is suspended. + */ +static int +__lcs_resume_channel(struct lcs_channel *channel) +{ + int rc; + + if (channel->state != CH_STATE_SUSPENDED) + return 0; + if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) + return 0; + LCS_DBF_TEXT_(5, trace, "rsch%s", channel->ccwdev->dev.bus_id); + rc = ccw_device_resume(channel->ccwdev); + if (rc) { + LCS_DBF_TEXT_(4, trace, "ersc%s", channel->ccwdev->dev.bus_id); + PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc); + } else + channel->state = CH_STATE_RUNNING; + return rc; + +} + +/** + * Make a buffer ready for processing. + */ +static inline void +__lcs_ready_buffer_bits(struct lcs_channel *channel, int index) +{ + int prev, next; + + LCS_DBF_TEXT(5, trace, "rdybits"); + prev = (index - 1) & (LCS_NUM_BUFFS - 1); + next = (index + 1) & (LCS_NUM_BUFFS - 1); + /* Check if we may clear the suspend bit of this buffer. */ + if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) { + /* Check if we have to set the PCI bit. */ + if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND)) + /* Suspend bit of the previous buffer is not set. */ + channel->ccws[index].flags |= CCW_FLAG_PCI; + /* Suspend bit of the next buffer is set. */ + channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND; + } +} + +static int +lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + unsigned long flags; + int index, rc; + + LCS_DBF_TEXT(5, trace, "rdybuff"); + if (buffer->state != BUF_STATE_LOCKED && + buffer->state != BUF_STATE_PROCESSED) + BUG(); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer->state = BUF_STATE_READY; + index = buffer - channel->iob; + /* Set length. */ + channel->ccws[index].count = buffer->count; + /* Check relevant PCI/suspend bits. */ + __lcs_ready_buffer_bits(channel, index); + rc = __lcs_resume_channel(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + return rc; +} + +/** + * Mark the buffer as processed. Take care of the suspend bit + * of the previous buffer. This function is called from + * interrupt context, so the lock must not be taken. + */ +static int +__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + int index, prev, next; + + LCS_DBF_TEXT(5, trace, "prcsbuff"); + if (buffer->state != BUF_STATE_READY) + BUG(); + buffer->state = BUF_STATE_PROCESSED; + index = buffer - channel->iob; + prev = (index - 1) & (LCS_NUM_BUFFS - 1); + next = (index + 1) & (LCS_NUM_BUFFS - 1); + /* Set the suspend bit and clear the PCI bit of this buffer. */ + channel->ccws[index].flags |= CCW_FLAG_SUSPEND; + channel->ccws[index].flags &= ~CCW_FLAG_PCI; + /* Check the suspend bit of the previous buffer. */ + if (channel->iob[prev].state == BUF_STATE_READY) { + /* + * Previous buffer is in state ready. It might have + * happened in lcs_ready_buffer that the suspend bit + * has not been cleared to avoid an endless loop. + * Do it now. + */ + __lcs_ready_buffer_bits(channel, prev); + } + /* Clear PCI bit of next buffer. */ + channel->ccws[next].flags &= ~CCW_FLAG_PCI; + return __lcs_resume_channel(channel); +} + +/** + * Put a processed buffer back to state empty. + */ +static void +lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + unsigned long flags; + + LCS_DBF_TEXT(5, trace, "relbuff"); + if (buffer->state != BUF_STATE_LOCKED && + buffer->state != BUF_STATE_PROCESSED) + BUG(); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer->state = BUF_STATE_EMPTY; + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); +} + +/** + * Get buffer for a lan command. + */ +static struct lcs_buffer * +lcs_get_lancmd(struct lcs_card *card, int count) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(4, trace, "getlncmd"); + /* Get buffer and wait if none is available. */ + wait_event(card->write.wait_q, + ((buffer = lcs_get_buffer(&card->write)) != NULL)); + count += sizeof(struct lcs_header); + *(__u16 *)(buffer->data + count) = 0; + buffer->count = count + sizeof(__u16); + buffer->callback = lcs_release_buffer; + cmd = (struct lcs_cmd *) buffer->data; + cmd->offset = count; + cmd->type = LCS_FRAME_TYPE_CONTROL; + cmd->slot = 0; + return buffer; +} + + +static void +lcs_get_reply(struct lcs_reply *reply) +{ + WARN_ON(atomic_read(&reply->refcnt) <= 0); + atomic_inc(&reply->refcnt); +} + +static void +lcs_put_reply(struct lcs_reply *reply) +{ + WARN_ON(atomic_read(&reply->refcnt) <= 0); + if (atomic_dec_and_test(&reply->refcnt)) { + kfree(reply); + } + +} + +static struct lcs_reply * +lcs_alloc_reply(struct lcs_cmd *cmd) +{ + struct lcs_reply *reply; + + LCS_DBF_TEXT(4, trace, "getreply"); + + reply = kmalloc(sizeof(struct lcs_reply), GFP_ATOMIC); + if (!reply) + return NULL; + memset(reply,0,sizeof(struct lcs_reply)); + atomic_set(&reply->refcnt,1); + reply->sequence_no = cmd->sequence_no; + reply->received = 0; + reply->rc = 0; + init_waitqueue_head(&reply->wait_q); + + return reply; +} + +/** + * Notifier function for lancmd replies. Called from read irq. + */ +static void +lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) +{ + struct list_head *l, *n; + struct lcs_reply *reply; + + LCS_DBF_TEXT(4, trace, "notiwait"); + spin_lock(&card->lock); + list_for_each_safe(l, n, &card->lancmd_waiters) { + reply = list_entry(l, struct lcs_reply, list); + if (reply->sequence_no == cmd->sequence_no) { + lcs_get_reply(reply); + list_del_init(&reply->list); + if (reply->callback != NULL) + reply->callback(card, cmd); + reply->received = 1; + reply->rc = cmd->return_code; + wake_up(&reply->wait_q); + lcs_put_reply(reply); + break; + } + } + spin_unlock(&card->lock); +} + +/** + * Emit buffer of a lan comand. + */ +void +lcs_lancmd_timeout(unsigned long data) +{ + struct lcs_reply *reply, *list_reply, *r; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "timeout"); + reply = (struct lcs_reply *) data; + spin_lock_irqsave(&reply->card->lock, flags); + list_for_each_entry_safe(list_reply, r, + &reply->card->lancmd_waiters,list) { + if (reply == list_reply) { + lcs_get_reply(reply); + list_del_init(&reply->list); + spin_unlock_irqrestore(&reply->card->lock, flags); + reply->received = 1; + reply->rc = -ETIME; + wake_up(&reply->wait_q); + lcs_put_reply(reply); + return; + } + } + spin_unlock_irqrestore(&reply->card->lock, flags); +} + +static int +lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, + void (*reply_callback)(struct lcs_card *, struct lcs_cmd *)) +{ + struct lcs_reply *reply; + struct lcs_cmd *cmd; + struct timer_list timer; + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4, trace, "sendcmd"); + cmd = (struct lcs_cmd *) buffer->data; + cmd->return_code = 0; + cmd->sequence_no = card->sequence_no++; + reply = lcs_alloc_reply(cmd); + if (!reply) + return -ENOMEM; + reply->callback = reply_callback; + reply->card = card; + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->lancmd_waiters); + spin_unlock_irqrestore(&card->lock, flags); + + buffer->callback = lcs_release_buffer; + rc = lcs_ready_buffer(&card->write, buffer); + if (rc) + return rc; + init_timer(&timer); + timer.function = lcs_lancmd_timeout; + timer.data = (unsigned long) reply; + timer.expires = jiffies + HZ*card->lancmd_timeout; + add_timer(&timer); + wait_event(reply->wait_q, reply->received); + del_timer_sync(&timer); + LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); + rc = reply->rc; + lcs_put_reply(reply); + return rc ? -EIO : 0; +} + +/** + * LCS startup command + */ +static int +lcs_send_startup(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "startup"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STARTUP; + cmd->initiator = initiator; + cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * LCS shutdown command + */ +static int +lcs_send_shutdown(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "shutdown"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_SHUTDOWN; + cmd->initiator = LCS_INITIATOR_TCPIP; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * LCS lanstat command + */ +static void +__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "statcb"); + memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH); +} + +static int +lcs_send_lanstat(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2,trace, "cmdstat"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + /* Setup lanstat command. */ + cmd->cmd_code = LCS_CMD_LANSTAT; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb); +} + +/** + * send stoplan command + */ +static int +lcs_send_stoplan(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdstpln"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STOPLAN; + cmd->initiator = initiator; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * send startlan command + */ +static void +__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "srtlancb"); + card->lan_type = cmd->cmd.lcs_std_cmd.lan_type; + card->portno = cmd->cmd.lcs_std_cmd.portno; +} + +static int +lcs_send_startlan(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdstaln"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STARTLAN; + cmd->initiator = initiator; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb); +} + +#ifdef CONFIG_IP_MULTICAST +/** + * send setipm command (Multicast) + */ +static int +lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdsetim"); + buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_SETIPM; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, + &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); + LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * send delipm command (Multicast) + */ +static int +lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmddelim"); + buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_DELIPM; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, + &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); + LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * check if multicast is supported by LCS + */ +static void +__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "chkmccb"); + card->ip_assists_supported = + cmd->cmd.lcs_qipassist.ip_assists_supported; + card->ip_assists_enabled = + cmd->cmd.lcs_qipassist.ip_assists_enabled; +} + +static int +lcs_check_multicast_support(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + int rc; + + LCS_DBF_TEXT(2, trace, "cmdqipa"); + /* Send query ipassist. */ + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_QIPASSIST; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); + if (rc != 0) { + PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n"); + return -EOPNOTSUPP; + } + /* Print out supported assists: IPv6 */ + PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? + "with" : "without"); + /* Print out supported assist: Multicast */ + PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? + "with" : "without"); + if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) + return 0; + return -EOPNOTSUPP; +} + +/** + * set or del multicast address on LCS card + */ +static void +lcs_fix_multicast_list(struct lcs_card *card) +{ + struct list_head failed_list; + struct lcs_ipm_list *ipm, *tmp; + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4,trace, "fixipm"); + INIT_LIST_HEAD(&failed_list); + spin_lock_irqsave(&card->ipm_lock, flags); +list_modified: + list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ + switch (ipm->ipm_state) { + case LCS_IPM_STATE_SET_REQUIRED: + /* del from ipm_list so noone else can tamper with + * this entry */ + list_del_init(&ipm->list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + rc = lcs_send_setipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + if (rc) { + PRINT_INFO("Adding multicast address failed." + "Table possibly full!\n"); + /* store ipm in failed list -> will be added + * to ipm_list again, so a retry will be done + * during the next call of this function */ + list_add_tail(&ipm->list, &failed_list); + } else { + ipm->ipm_state = LCS_IPM_STATE_ON_CARD; + /* re-insert into ipm_list */ + list_add_tail(&ipm->list, &card->ipm_list); + } + goto list_modified; + case LCS_IPM_STATE_DEL_REQUIRED: + list_del(&ipm->list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + lcs_send_delipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + kfree(ipm); + goto list_modified; + case LCS_IPM_STATE_ON_CARD: + break; + } + } + /* re-insert all entries from the failed_list into ipm_list */ + list_for_each_entry(ipm, &failed_list, list) { + list_del_init(&ipm->list); + list_add_tail(&ipm->list, &card->ipm_list); + } + spin_unlock_irqrestore(&card->ipm_lock, flags); + if (card->state == DEV_STATE_UP) + netif_wake_queue(card->dev); +} + +/** + * get mac address for the relevant Multicast address + */ +static void +lcs_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev) +{ + LCS_DBF_TEXT(4,trace, "getmac"); + if (dev->type == ARPHRD_IEEE802_TR) + ip_tr_mc_map(ipm, mac); + else + ip_eth_mc_map(ipm, mac); +} + +/** + * function called by net device to handle multicast address relevant things + */ +static inline void +lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +{ + struct ip_mc_list *im4; + struct list_head *l; + struct lcs_ipm_list *ipm; + unsigned long flags; + char buf[MAX_ADDR_LEN]; + + LCS_DBF_TEXT(4, trace, "remmclst"); + spin_lock_irqsave(&card->ipm_lock, flags); + list_for_each(l, &card->ipm_list) { + ipm = list_entry(l, struct lcs_ipm_list, list); + for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) { + lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); + if ( (ipm->ipm.ip_addr == im4->multiaddr) && + (memcmp(buf, &ipm->ipm.mac_addr, + LCS_MAC_LENGTH) == 0) ) + break; + } + if (im4 == NULL) + ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED; + } + spin_unlock_irqrestore(&card->ipm_lock, flags); +} + +static inline struct lcs_ipm_list * +lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf) +{ + struct lcs_ipm_list *tmp, *ipm = NULL; + struct list_head *l; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "chkmcent"); + spin_lock_irqsave(&card->ipm_lock, flags); + list_for_each(l, &card->ipm_list) { + tmp = list_entry(l, struct lcs_ipm_list, list); + if ( (tmp->ipm.ip_addr == im4->multiaddr) && + (memcmp(buf, &tmp->ipm.mac_addr, + LCS_MAC_LENGTH) == 0) ) { + ipm = tmp; + break; + } + } + spin_unlock_irqrestore(&card->ipm_lock, flags); + return ipm; +} + +static inline void +lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +{ + + struct ip_mc_list *im4; + struct lcs_ipm_list *ipm; + char buf[MAX_ADDR_LEN]; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "setmclst"); + for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { + lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); + ipm = lcs_check_addr_entry(card, im4, buf); + if (ipm != NULL) + continue; /* Address already in list. */ + ipm = (struct lcs_ipm_list *) + kmalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); + if (ipm == NULL) { + PRINT_INFO("Not enough memory to add " + "new multicast entry!\n"); + break; + } + memset(ipm, 0, sizeof(struct lcs_ipm_list)); + memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); + ipm->ipm.ip_addr = im4->multiaddr; + ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED; + spin_lock_irqsave(&card->ipm_lock, flags); + list_add(&ipm->list, &card->ipm_list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + } +} + +static int +lcs_register_mc_addresses(void *data) +{ + struct lcs_card *card; + struct in_device *in4_dev; + + card = (struct lcs_card *) data; + daemonize("regipm"); + + if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "regmulti"); + + in4_dev = in_dev_get(card->dev); + if (in4_dev == NULL) + goto out; + read_lock(&in4_dev->mc_list_lock); + lcs_remove_mc_addresses(card,in4_dev); + lcs_set_mc_addresses(card, in4_dev); + read_unlock(&in4_dev->mc_list_lock); + in_dev_put(in4_dev); + + lcs_fix_multicast_list(card); +out: + lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); + return 0; +} +/** + * function called by net device to + * handle multicast address relevant things + */ +static void +lcs_set_multicast_list(struct net_device *dev) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(4, trace, "setmulti"); + card = (struct lcs_card *) dev->priv; + + if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) { + schedule_work(&card->kernel_thread_starter); + } +} + +#endif /* CONFIG_IP_MULTICAST */ + +static long +lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); + break; + case -ETIMEDOUT: + PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); + break; + default: + PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), + cdev->dev.bus_id); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT(2, trace, " rc???"); + } + return PTR_ERR(irb); +} + + +/** + * IRQ Handler for LCS channels + */ +static void +lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) +{ + struct lcs_card *card; + struct lcs_channel *channel; + int index; + + if (lcs_check_irb_error(cdev, irb)) + return; + + card = CARD_FROM_DEV(cdev); + if (card->read.ccwdev == cdev) + channel = &card->read; + else + channel = &card->write; + + LCS_DBF_TEXT_(5, trace, "Rint%s",cdev->dev.bus_id); + LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.cstat, irb->scsw.dstat); + LCS_DBF_TEXT_(5, trace, "%4x%4x",irb->scsw.fctl, irb->scsw.actl); + + /* How far in the ccw chain have we processed? */ + if ((channel->state != CH_STATE_INIT) && + (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { + index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) + - channel->ccws; + if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || + (irb->scsw.cstat | SCHN_STAT_PCI)) + /* Bloody io subsystem tells us lies about cpa... */ + index = (index - 1) & (LCS_NUM_BUFFS - 1); + while (channel->io_idx != index) { + __lcs_processed_buffer(channel, + channel->iob + channel->io_idx); + channel->io_idx = + (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1); + } + } + + if ((irb->scsw.dstat & DEV_STAT_DEV_END) || + (irb->scsw.dstat & DEV_STAT_CHN_END) || + (irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) + /* Mark channel as stopped. */ + channel->state = CH_STATE_STOPPED; + else if (irb->scsw.actl & SCSW_ACTL_SUSPENDED) + /* CCW execution stopped on a suspend bit. */ + channel->state = CH_STATE_SUSPENDED; + + if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) { + if (irb->scsw.cc != 0) { + ccw_device_halt(channel->ccwdev, (addr_t) channel); + return; + } + /* The channel has been stopped by halt_IO. */ + channel->state = CH_STATE_HALTED; + } + + if (irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { + channel->state = CH_STATE_CLEARED; + } + /* Do the rest in the tasklet. */ + tasklet_schedule(&channel->irq_tasklet); +} + +/** + * Tasklet for IRQ handler + */ +static void +lcs_tasklet(unsigned long data) +{ + unsigned long flags; + struct lcs_channel *channel; + struct lcs_buffer *iob; + int buf_idx; + int rc; + + channel = (struct lcs_channel *) data; + LCS_DBF_TEXT_(5, trace, "tlet%s",channel->ccwdev->dev.bus_id); + + /* Check for processed buffers. */ + iob = channel->iob; + buf_idx = channel->buf_idx; + while (iob[buf_idx].state == BUF_STATE_PROCESSED) { + /* Do the callback thing. */ + if (iob[buf_idx].callback != NULL) + iob[buf_idx].callback(channel, iob + buf_idx); + buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1); + } + channel->buf_idx = buf_idx; + + if (channel->state == CH_STATE_STOPPED) + // FIXME: what if rc != 0 ?? + rc = lcs_start_channel(channel); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + if (channel->state == CH_STATE_SUSPENDED && + channel->iob[channel->io_idx].state == BUF_STATE_READY) { + // FIXME: what if rc != 0 ?? + rc = __lcs_resume_channel(channel); + } + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + /* Something happened on the channel. Wake up waiters. */ + wake_up(&channel->wait_q); +} + +/** + * Finish current tx buffer and make it ready for transmit. + */ +static void +__lcs_emit_txbuffer(struct lcs_card *card) +{ + LCS_DBF_TEXT(5, trace, "emittx"); + *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0; + card->tx_buffer->count += 2; + lcs_ready_buffer(&card->write, card->tx_buffer); + card->tx_buffer = NULL; + card->tx_emitted++; +} + +/** + * Callback for finished tx buffers. + */ +static void +lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(5, trace, "txbuffcb"); + /* Put buffer back to pool. */ + lcs_release_buffer(channel, buffer); + card = (struct lcs_card *) + ((char *) channel - offsetof(struct lcs_card, write)); + spin_lock(&card->lock); + card->tx_emitted--; + if (card->tx_emitted <= 0 && card->tx_buffer != NULL) + /* + * Last running tx buffer has finished. Submit partially + * filled current buffer. + */ + __lcs_emit_txbuffer(card); + spin_unlock(&card->lock); +} + +/** + * Packet transmit function called by network stack + */ +static int +__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, + struct net_device *dev) +{ + struct lcs_header *header; + + LCS_DBF_TEXT(5, trace, "hardxmit"); + if (skb == NULL) { + card->stats.tx_dropped++; + card->stats.tx_errors++; + return -EIO; + } + if (card->state != DEV_STATE_UP) { + dev_kfree_skb(skb); + card->stats.tx_dropped++; + card->stats.tx_errors++; + card->stats.tx_carrier_errors++; + return 0; + } + if (netif_queue_stopped(dev) ) { + card->stats.tx_dropped++; + return -EBUSY; + } + if (card->tx_buffer != NULL && + card->tx_buffer->count + sizeof(struct lcs_header) + + skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) + /* skb too big for current tx buffer. */ + __lcs_emit_txbuffer(card); + if (card->tx_buffer == NULL) { + /* Get new tx buffer */ + card->tx_buffer = lcs_get_buffer(&card->write); + if (card->tx_buffer == NULL) { + card->stats.tx_dropped++; + return -EBUSY; + } + card->tx_buffer->callback = lcs_txbuffer_cb; + card->tx_buffer->count = 0; + } + header = (struct lcs_header *) + (card->tx_buffer->data + card->tx_buffer->count); + card->tx_buffer->count += skb->len + sizeof(struct lcs_header); + header->offset = card->tx_buffer->count; + header->type = card->lan_type; + header->slot = card->portno; + memcpy(header + 1, skb->data, skb->len); + card->stats.tx_bytes += skb->len; + card->stats.tx_packets++; + dev_kfree_skb(skb); + if (card->tx_emitted <= 0) + /* If this is the first tx buffer emit it immediately. */ + __lcs_emit_txbuffer(card); + return 0; +} + +static int +lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(5, trace, "pktxmit"); + card = (struct lcs_card *) dev->priv; + spin_lock(&card->lock); + rc = __lcs_start_xmit(card, skb, dev); + spin_unlock(&card->lock); + return rc; +} + +/** + * send startlan and lanstat command to make LCS device ready + */ +static int +lcs_startlan_auto(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(2, trace, "strtauto"); +#ifdef CONFIG_NET_ETHERNET + card->lan_type = LCS_FRAME_TYPE_ENET; + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + return 0; + +#endif +#ifdef CONFIG_TR + card->lan_type = LCS_FRAME_TYPE_TR; + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + return 0; +#endif +#ifdef CONFIG_FDDI + card->lan_type = LCS_FRAME_TYPE_FDDI; + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + return 0; +#endif + return -EIO; +} + +static int +lcs_startlan(struct lcs_card *card) +{ + int rc, i; + + LCS_DBF_TEXT(2, trace, "startlan"); + rc = 0; + if (card->portno != LCS_INVALID_PORT_NO) { + if (card->lan_type == LCS_FRAME_TYPE_AUTO) + rc = lcs_startlan_auto(card); + else + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + } else { + for (i = 0; i <= 16; i++) { + card->portno = i; + if (card->lan_type != LCS_FRAME_TYPE_AUTO) + rc = lcs_send_startlan(card, + LCS_INITIATOR_TCPIP); + else + /* autodetecting lan type */ + rc = lcs_startlan_auto(card); + if (rc == 0) + break; + } + } + if (rc == 0) + return lcs_send_lanstat(card); + return rc; +} + +/** + * LCS detect function + * setup channels and make them I/O ready + */ +static int +lcs_detect(struct lcs_card *card) +{ + int rc = 0; + + LCS_DBF_TEXT(2, setup, "lcsdetct"); + /* start/reset card */ + if (card->dev) + netif_stop_queue(card->dev); + rc = lcs_stop_channels(card); + if (rc == 0) { + rc = lcs_start_channels(card); + if (rc == 0) { + rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + rc = lcs_startlan(card); + } + } + if (rc == 0) { + card->state = DEV_STATE_UP; + } else { + card->state = DEV_STATE_DOWN; + card->write.state = CH_STATE_INIT; + card->read.state = CH_STATE_INIT; + } + return rc; +} + +/** + * reset card + */ +static int +lcs_resetcard(struct lcs_card *card) +{ + int retries; + + LCS_DBF_TEXT(2, trace, "rescard"); + for (retries = 0; retries < 10; retries++) { + if (lcs_detect(card) == 0) { + netif_wake_queue(card->dev); + card->state = DEV_STATE_UP; + PRINT_INFO("LCS device %s successfully restarted!\n", + card->dev->name); + return 0; + } + msleep(3000); + } + PRINT_ERR("Error in Reseting LCS card!\n"); + return -EIO; +} + + +/** + * LCS Stop card + */ +static int +lcs_stopcard(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(3, setup, "stopcard"); + + if (card->read.state != CH_STATE_STOPPED && + card->write.state != CH_STATE_STOPPED && + card->state == DEV_STATE_UP) { + lcs_clear_multicast_list(card); + rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); + rc = lcs_send_shutdown(card); + } + rc = lcs_stop_channels(card); + card->state = DEV_STATE_DOWN; + + return rc; +} + +/** + * LGW initiated commands + */ +static int +lcs_lgw_startlan_thread(void *data) +{ + struct lcs_card *card; + + card = (struct lcs_card *) data; + daemonize("lgwstpln"); + + if (!lcs_do_run_thread(card, LCS_STARTLAN_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "lgwstpln"); + if (card->dev) + netif_stop_queue(card->dev); + if (lcs_startlan(card) == 0) { + netif_wake_queue(card->dev); + card->state = DEV_STATE_UP; + PRINT_INFO("LCS Startlan for device %s succeeded!\n", + card->dev->name); + + } else + PRINT_ERR("LCS Startlan for device %s failed!\n", + card->dev->name); + lcs_clear_thread_running_bit(card, LCS_STARTLAN_THREAD); + return 0; +} + +/** + * Send startup command initiated by Lan Gateway + */ +static int +lcs_lgw_startup_thread(void *data) +{ + int rc; + + struct lcs_card *card; + + card = (struct lcs_card *) data; + daemonize("lgwstaln"); + + if (!lcs_do_run_thread(card, LCS_STARTUP_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "lgwstaln"); + if (card->dev) + netif_stop_queue(card->dev); + rc = lcs_send_startup(card, LCS_INITIATOR_LGW); + if (rc != 0) { + PRINT_ERR("Startup for LCS device %s initiated " \ + "by LGW failed!\nReseting card ...\n", + card->dev->name); + /* do a card reset */ + rc = lcs_resetcard(card); + if (rc == 0) + goto Done; + } + rc = lcs_startlan(card); + if (rc == 0) { + netif_wake_queue(card->dev); + card->state = DEV_STATE_UP; + } +Done: + if (rc == 0) + PRINT_INFO("LCS Startup for device %s succeeded!\n", + card->dev->name); + else + PRINT_ERR("LCS Startup for device %s failed!\n", + card->dev->name); + lcs_clear_thread_running_bit(card, LCS_STARTUP_THREAD); + return 0; +} + + +/** + * send stoplan command initiated by Lan Gateway + */ +static int +lcs_lgw_stoplan_thread(void *data) +{ + struct lcs_card *card; + int rc; + + card = (struct lcs_card *) data; + daemonize("lgwstop"); + + if (!lcs_do_run_thread(card, LCS_STOPLAN_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "lgwstop"); + if (card->dev) + netif_stop_queue(card->dev); + if (lcs_send_stoplan(card, LCS_INITIATOR_LGW) == 0) + PRINT_INFO("Stoplan for %s initiated by LGW succeeded!\n", + card->dev->name); + else + PRINT_ERR("Stoplan %s initiated by LGW failed!\n", + card->dev->name); + /*Try to reset the card, stop it on failure */ + rc = lcs_resetcard(card); + if (rc != 0) + rc = lcs_stopcard(card); + lcs_clear_thread_running_bit(card, LCS_STOPLAN_THREAD); + return rc; +} + +/** + * Kernel Thread helper functions for LGW initiated commands + */ +static void +lcs_start_kernel_thread(struct lcs_card *card) +{ + LCS_DBF_TEXT(5, trace, "krnthrd"); + if (lcs_do_start_thread(card, LCS_STARTUP_THREAD)) + kernel_thread(lcs_lgw_startup_thread, (void *) card, SIGCHLD); + if (lcs_do_start_thread(card, LCS_STARTLAN_THREAD)) + kernel_thread(lcs_lgw_startlan_thread, (void *) card, SIGCHLD); + if (lcs_do_start_thread(card, LCS_STOPLAN_THREAD)) + kernel_thread(lcs_lgw_stoplan_thread, (void *) card, SIGCHLD); +#ifdef CONFIG_IP_MULTICAST + if (lcs_do_start_thread(card, LCS_SET_MC_THREAD)) + kernel_thread(lcs_register_mc_addresses, (void *) card, SIGCHLD); +#endif +} + +/** + * Process control frames. + */ +static void +lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(5, trace, "getctrl"); + if (cmd->initiator == LCS_INITIATOR_LGW) { + switch(cmd->cmd_code) { + case LCS_CMD_STARTUP: + if (!lcs_set_thread_start_bit(card, + LCS_STARTUP_THREAD)) + schedule_work(&card->kernel_thread_starter); + break; + case LCS_CMD_STARTLAN: + if (!lcs_set_thread_start_bit(card, + LCS_STARTLAN_THREAD)) + schedule_work(&card->kernel_thread_starter); + break; + case LCS_CMD_STOPLAN: + if (!lcs_set_thread_start_bit(card, + LCS_STOPLAN_THREAD)) + schedule_work(&card->kernel_thread_starter); + break; + default: + PRINT_INFO("UNRECOGNIZED LGW COMMAND\n"); + break; + } + } else + lcs_notify_lancmd_waiters(card, cmd); +} + +/** + * Unpack network packet. + */ +static void +lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) +{ + struct sk_buff *skb; + + LCS_DBF_TEXT(5, trace, "getskb"); + if (card->dev == NULL || + card->state != DEV_STATE_UP) + /* The card isn't up. Ignore the packet. */ + return; + + skb = dev_alloc_skb(skb_len); + if (skb == NULL) { + PRINT_ERR("LCS: alloc_skb failed for device=%s\n", + card->dev->name); + card->stats.rx_dropped++; + return; + } + skb->dev = card->dev; + memcpy(skb_put(skb, skb_len), skb_data, skb_len); + skb->protocol = card->lan_type_trans(skb, card->dev); + card->stats.rx_bytes += skb_len; + card->stats.rx_packets++; + *((__u32 *)skb->cb) = ++card->pkt_seq; + netif_rx(skb); +} + +/** + * LCS main routine to get packets and lancmd replies from the buffers + */ +static void +lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + struct lcs_card *card; + struct lcs_header *lcs_hdr; + __u16 offset; + + LCS_DBF_TEXT(5, trace, "lcsgtpkt"); + lcs_hdr = (struct lcs_header *) buffer->data; + if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) { + LCS_DBF_TEXT(4, trace, "-eiogpkt"); + return; + } + card = (struct lcs_card *) + ((char *) channel - offsetof(struct lcs_card, read)); + offset = 0; + while (lcs_hdr->offset != 0) { + if (lcs_hdr->offset <= 0 || + lcs_hdr->offset > LCS_IOBUFFERSIZE || + lcs_hdr->offset < offset) { + /* Offset invalid. */ + card->stats.rx_length_errors++; + card->stats.rx_errors++; + return; + } + /* What kind of frame is it? */ + if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) + /* Control frame. */ + lcs_get_control(card, (struct lcs_cmd *) lcs_hdr); + else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET || + lcs_hdr->type == LCS_FRAME_TYPE_TR || + lcs_hdr->type == LCS_FRAME_TYPE_FDDI) + /* Normal network packet. */ + lcs_get_skb(card, (char *)(lcs_hdr + 1), + lcs_hdr->offset - offset - + sizeof(struct lcs_header)); + else + /* Unknown frame type. */ + ; // FIXME: error message ? + /* Proceed to next frame. */ + offset = lcs_hdr->offset; + lcs_hdr->offset = LCS_ILLEGAL_OFFSET; + lcs_hdr = (struct lcs_header *) (buffer->data + offset); + } + /* The buffer is now empty. Make it ready again. */ + lcs_ready_buffer(&card->read, buffer); +} + +/** + * get network statistics for ifconfig and other user programs + */ +static struct net_device_stats * +lcs_getstats(struct net_device *dev) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(4, trace, "netstats"); + card = (struct lcs_card *) dev->priv; + return &card->stats; +} + +/** + * stop lcs device + * This function will be called by user doing ifconfig xxx down + */ +static int +lcs_stop_device(struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, trace, "stopdev"); + card = (struct lcs_card *) dev->priv; + netif_stop_queue(dev); + dev->flags &= ~IFF_UP; + rc = lcs_stopcard(card); + if (rc) + PRINT_ERR("Try it again!\n "); + return rc; +} + +/** + * start lcs device and make it runnable + * This function will be called by user doing ifconfig xxx up + */ +static int +lcs_open_device(struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, trace, "opendev"); + card = (struct lcs_card *) dev->priv; + /* initialize statistics */ + rc = lcs_detect(card); + if (rc) { + PRINT_ERR("LCS:Error in opening device!\n"); + + } else { + dev->flags |= IFF_UP; + netif_wake_queue(dev); + card->state = DEV_STATE_UP; + } + return rc; +} + +/** + * show function for portno called by cat or similar things + */ +static ssize_t +lcs_portno_show (struct device *dev, char *buf) +{ + struct lcs_card *card; + + card = (struct lcs_card *)dev->driver_data; + + if (!card) + return 0; + + return sprintf(buf, "%d\n", card->portno); +} + +/** + * store the value which is piped to file portno + */ +static ssize_t +lcs_portno_store (struct device *dev, const char *buf, size_t count) +{ + struct lcs_card *card; + int value; + + card = (struct lcs_card *)dev->driver_data; + + if (!card) + return 0; + + sscanf(buf, "%u", &value); + /* TODO: sanity checks */ + card->portno = value; + + return count; + +} + +static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); + +static ssize_t +lcs_type_show(struct device *dev, char *buf) +{ + struct ccwgroup_device *cgdev; + + cgdev = to_ccwgroupdev(dev); + if (!cgdev) + return -ENODEV; + + return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); +} + +static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); + +static ssize_t +lcs_timeout_show(struct device *dev, char *buf) +{ + struct lcs_card *card; + + card = (struct lcs_card *)dev->driver_data; + + return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0; +} + +static ssize_t +lcs_timeout_store (struct device *dev, const char *buf, size_t count) +{ + struct lcs_card *card; + int value; + + card = (struct lcs_card *)dev->driver_data; + + if (!card) + return 0; + + sscanf(buf, "%u", &value); + /* TODO: sanity checks */ + card->lancmd_timeout = value; + + return count; + +} + +DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); + +static struct attribute * lcs_attrs[] = { + &dev_attr_portno.attr, + &dev_attr_type.attr, + &dev_attr_lancmd_timeout.attr, + NULL, +}; + +static struct attribute_group lcs_attr_group = { + .attrs = lcs_attrs, +}; + +/** + * lcs_probe_device is called on establishing a new ccwgroup_device. + */ +static int +lcs_probe_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + int ret; + + if (!get_device(&ccwgdev->dev)) + return -ENODEV; + + LCS_DBF_TEXT(2, setup, "add_dev"); + card = lcs_alloc_card(); + if (!card) { + PRINT_ERR("Allocation of lcs card failed\n"); + put_device(&ccwgdev->dev); + return -ENOMEM; + } + ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group); + if (ret) { + PRINT_ERR("Creating attributes failed"); + lcs_free_card(card); + put_device(&ccwgdev->dev); + return ret; + } + ccwgdev->dev.driver_data = card; + ccwgdev->cdev[0]->handler = lcs_irq; + ccwgdev->cdev[1]->handler = lcs_irq; + return 0; +} + +static int +lcs_register_netdev(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(2, setup, "regnetdv"); + card = (struct lcs_card *)ccwgdev->dev.driver_data; + if (card->dev->reg_state != NETREG_UNINITIALIZED) + return 0; + SET_NETDEV_DEV(card->dev, &ccwgdev->dev); + return register_netdev(card->dev); +} + +/** + * lcs_new_device will be called by setting the group device online. + */ + +static int +lcs_new_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + struct net_device *dev=NULL; + enum lcs_dev_states recover_state; + int rc; + + card = (struct lcs_card *)ccwgdev->dev.driver_data; + if (!card) + return -ENODEV; + + LCS_DBF_TEXT(2, setup, "newdev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + card->read.ccwdev = ccwgdev->cdev[0]; + card->write.ccwdev = ccwgdev->cdev[1]; + + recover_state = card->state; + ccw_device_set_online(card->read.ccwdev); + ccw_device_set_online(card->write.ccwdev); + + LCS_DBF_TEXT(3, setup, "lcsnewdv"); + + lcs_setup_card(card); + rc = lcs_detect(card); + if (rc) { + LCS_DBF_TEXT(2, setup, "dtctfail"); + PRINT_WARN("Detection of LCS card failed with return code " + "%d (0x%x)\n", rc, rc); + lcs_stopcard(card); + goto out; + } + if (card->dev) { + LCS_DBF_TEXT(2, setup, "samedev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + goto netdev_out; + } + switch (card->lan_type) { +#ifdef CONFIG_NET_ETHERNET + case LCS_FRAME_TYPE_ENET: + card->lan_type_trans = eth_type_trans; + dev = alloc_etherdev(0); + break; +#endif +#ifdef CONFIG_TR + case LCS_FRAME_TYPE_TR: + card->lan_type_trans = tr_type_trans; + dev = alloc_trdev(0); + break; +#endif +#ifdef CONFIG_FDDI + case LCS_FRAME_TYPE_FDDI: + card->lan_type_trans = fddi_type_trans; + dev = alloc_fddidev(0); + break; +#endif + default: + LCS_DBF_TEXT(3, setup, "errinit"); + PRINT_ERR("LCS: Initialization failed\n"); + PRINT_ERR("LCS: No device found!\n"); + goto out; + } + if (!dev) + goto out; + card->dev = dev; +netdev_out: + card->dev->priv = card; + card->dev->open = lcs_open_device; + card->dev->stop = lcs_stop_device; + card->dev->hard_start_xmit = lcs_start_xmit; + card->dev->get_stats = lcs_getstats; + SET_MODULE_OWNER(dev); + if (lcs_register_netdev(ccwgdev) != 0) + goto out; + memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); +#ifdef CONFIG_IP_MULTICAST + if (!lcs_check_multicast_support(card)) + card->dev->set_multicast_list = lcs_set_multicast_list; +#endif + netif_stop_queue(card->dev); + lcs_set_allowed_threads(card,0xffffffff); + if (recover_state == DEV_STATE_RECOVER) { + lcs_set_multicast_list(card->dev); + card->dev->flags |= IFF_UP; + netif_wake_queue(card->dev); + card->state = DEV_STATE_UP; + } else + lcs_stopcard(card); + + return 0; +out: + + ccw_device_set_offline(card->read.ccwdev); + ccw_device_set_offline(card->write.ccwdev); + return -ENODEV; +} + +/** + * lcs_shutdown_device, called when setting the group device offline. + */ +static int +lcs_shutdown_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + enum lcs_dev_states recover_state; + int ret; + + LCS_DBF_TEXT(3, setup, "shtdndev"); + card = (struct lcs_card *)ccwgdev->dev.driver_data; + if (!card) + return -ENODEV; + lcs_set_allowed_threads(card, 0); + if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) + return -ERESTARTSYS; + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + recover_state = card->state; + + ret = lcs_stop_device(card->dev); + ret = ccw_device_set_offline(card->read.ccwdev); + ret = ccw_device_set_offline(card->write.ccwdev); + if (recover_state == DEV_STATE_UP) { + card->state = DEV_STATE_RECOVER; + } + if (ret) + return ret; + return 0; +} + +/** + * lcs_remove_device, free buffers and card + */ +static void +lcs_remove_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + card = (struct lcs_card *)ccwgdev->dev.driver_data; + if (!card) + return; + + PRINT_INFO("Removing lcs group device ....\n"); + LCS_DBF_TEXT(3, setup, "remdev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + if (ccwgdev->state == CCWGROUP_ONLINE) { + lcs_shutdown_device(ccwgdev); + } + if (card->dev) + unregister_netdev(card->dev); + sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group); + lcs_cleanup_card(card); + lcs_free_card(card); + put_device(&ccwgdev->dev); +} + +/** + * LCS ccwgroup driver registration + */ +static struct ccwgroup_driver lcs_group_driver = { + .owner = THIS_MODULE, + .name = "lcs", + .max_slaves = 2, + .driver_id = 0xD3C3E2, + .probe = lcs_probe_device, + .remove = lcs_remove_device, + .set_online = lcs_new_device, + .set_offline = lcs_shutdown_device, +}; + +/** + * LCS Module/Kernel initialization function + */ +static int +__init lcs_init_module(void) +{ + int rc; + + PRINT_INFO("Loading %s\n",version); + rc = lcs_register_debug_facility(); + LCS_DBF_TEXT(0, setup, "lcsinit"); + if (rc) { + PRINT_ERR("Initialization failed\n"); + return rc; + } + + rc = register_cu3088_discipline(&lcs_group_driver); + if (rc) { + PRINT_ERR("Initialization failed\n"); + return rc; + } + + return 0; +} + + +/** + * LCS module cleanup function + */ +static void +__exit lcs_cleanup_module(void) +{ + PRINT_INFO("Terminating lcs module.\n"); + LCS_DBF_TEXT(0, trace, "cleanup"); + unregister_cu3088_discipline(&lcs_group_driver); + lcs_unregister_debug_facility(); +} + +module_init(lcs_init_module); +module_exit(lcs_cleanup_module); + +MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h new file mode 100644 index 000000000000..a7f348ef1b08 --- /dev/null +++ b/drivers/s390/net/lcs.h @@ -0,0 +1,321 @@ +/*lcs.h*/ + +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <asm/ccwdev.h> + +#define VERSION_LCS_H "$Revision: 1.19 $" + +#define LCS_DBF_TEXT(level, name, text) \ + do { \ + debug_text_event(lcs_dbf_##name, level, text); \ + } while (0) + +#define LCS_DBF_HEX(level,name,addr,len) \ +do { \ + debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ +} while (0) + +#define LCS_DBF_TEXT_(level,name,text...) \ +do { \ + sprintf(debug_buffer, text); \ + debug_text_event(lcs_dbf_##name,level, debug_buffer);\ +} while (0) + +/** + * some more definitions for debug or output stuff + */ +#define PRINTK_HEADER " lcs: " + +/** + * sysfs related stuff + */ +#define CARD_FROM_DEV(cdev) \ + (struct lcs_card *) \ + ((struct ccwgroup_device *)cdev->dev.driver_data)->dev.driver_data; +/** + * CCW commands used in this driver + */ +#define LCS_CCW_WRITE 0x01 +#define LCS_CCW_READ 0x02 +#define LCS_CCW_TRANSFER 0x08 + +/** + * LCS device status primitives + */ +#define LCS_CMD_STARTLAN 0x01 +#define LCS_CMD_STOPLAN 0x02 +#define LCS_CMD_LANSTAT 0x04 +#define LCS_CMD_STARTUP 0x07 +#define LCS_CMD_SHUTDOWN 0x08 +#define LCS_CMD_QIPASSIST 0xb2 +#define LCS_CMD_SETIPM 0xb4 +#define LCS_CMD_DELIPM 0xb5 + +#define LCS_INITIATOR_TCPIP 0x00 +#define LCS_INITIATOR_LGW 0x01 +#define LCS_STD_CMD_SIZE 16 +#define LCS_MULTICAST_CMD_SIZE 404 + +/** + * LCS IPASSIST MASKS,only used when multicast is switched on + */ +/* Not supported by LCS */ +#define LCS_IPASS_ARP_PROCESSING 0x0001 +#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002 +#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004 +#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 +#define LCS_IPASS_IP_FILTERING 0x0010 +/* Supported by lcs 3172 */ +#define LCS_IPASS_IPV6_SUPPORT 0x0020 +#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 + +/** + * LCS sense byte definitions + */ +#define LCS_SENSE_INTERFACE_DISCONNECT 0x01 +#define LCS_SENSE_EQUIPMENT_CHECK 0x10 +#define LCS_SENSE_BUS_OUT_CHECK 0x20 +#define LCS_SENSE_INTERVENTION_REQUIRED 0x40 +#define LCS_SENSE_CMD_REJECT 0x80 +#define LCS_SENSE_RESETTING_EVENT 0x0080 +#define LCS_SENSE_DEVICE_ONLINE 0x0020 + +/** + * LCS packet type definitions + */ +#define LCS_FRAME_TYPE_CONTROL 0 +#define LCS_FRAME_TYPE_ENET 1 +#define LCS_FRAME_TYPE_TR 2 +#define LCS_FRAME_TYPE_FDDI 7 +#define LCS_FRAME_TYPE_AUTO -1 + +/** + * some more definitions,we will sort them later + */ +#define LCS_ILLEGAL_OFFSET 0xffff +#define LCS_IOBUFFERSIZE 0x5000 +#define LCS_NUM_BUFFS 8 /* needs to be power of 2 */ +#define LCS_MAC_LENGTH 6 +#define LCS_INVALID_PORT_NO -1 +#define LCS_LANCMD_TIMEOUT_DEFAULT 5 + +/** + * Multicast state + */ +#define LCS_IPM_STATE_SET_REQUIRED 0 +#define LCS_IPM_STATE_DEL_REQUIRED 1 +#define LCS_IPM_STATE_ON_CARD 2 + +/** + * LCS IP Assist declarations + * seems to be only used for multicast + */ +#define LCS_IPASS_ARP_PROCESSING 0x0001 +#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002 +#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004 +#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 +#define LCS_IPASS_IP_FILTERING 0x0010 +#define LCS_IPASS_IPV6_SUPPORT 0x0020 +#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 + +/** + * LCS Buffer states + */ +enum lcs_buffer_states { + BUF_STATE_EMPTY, /* buffer is empty */ + BUF_STATE_LOCKED, /* buffer is locked, don't touch */ + BUF_STATE_READY, /* buffer is ready for read/write */ + BUF_STATE_PROCESSED, +}; + +/** + * LCS Channel State Machine declarations + */ +enum lcs_channel_states { + CH_STATE_INIT, + CH_STATE_HALTED, + CH_STATE_STOPPED, + CH_STATE_RUNNING, + CH_STATE_SUSPENDED, + CH_STATE_CLEARED, +}; + +/** + * LCS device state machine + */ +enum lcs_dev_states { + DEV_STATE_DOWN, + DEV_STATE_UP, + DEV_STATE_RECOVER, +}; + +enum lcs_threads { + LCS_SET_MC_THREAD = 1, + LCS_STARTLAN_THREAD = 2, + LCS_STOPLAN_THREAD = 4, + LCS_STARTUP_THREAD = 8, +}; +/** + * LCS struct declarations + */ +struct lcs_header { + __u16 offset; + __u8 type; + __u8 slot; +} __attribute__ ((packed)); + +struct lcs_ip_mac_pair { + __u32 ip_addr; + __u8 mac_addr[LCS_MAC_LENGTH]; + __u8 reserved[2]; +} __attribute__ ((packed)); + +struct lcs_ipm_list { + struct list_head list; + struct lcs_ip_mac_pair ipm; + __u8 ipm_state; +}; + +struct lcs_cmd { + __u16 offset; + __u8 type; + __u8 slot; + __u8 cmd_code; + __u8 initiator; + __u16 sequence_no; + __u16 return_code; + union { + struct { + __u8 lan_type; + __u8 portno; + __u16 parameter_count; + __u8 operator_flags[3]; + __u8 reserved[3]; + } lcs_std_cmd; + struct { + __u16 unused1; + __u16 buff_size; + __u8 unused2[6]; + } lcs_startup; + struct { + __u8 lan_type; + __u8 portno; + __u8 unused[10]; + __u8 mac_addr[LCS_MAC_LENGTH]; + __u32 num_packets_deblocked; + __u32 num_packets_blocked; + __u32 num_packets_tx_on_lan; + __u32 num_tx_errors_detected; + __u32 num_tx_packets_disgarded; + __u32 num_packets_rx_from_lan; + __u32 num_rx_errors_detected; + __u32 num_rx_discarded_nobuffs_avail; + __u32 num_rx_packets_too_large; + } lcs_lanstat_cmd; +#ifdef CONFIG_IP_MULTICAST + struct { + __u8 lan_type; + __u8 portno; + __u16 num_ip_pairs; + __u16 ip_assists_supported; + __u16 ip_assists_enabled; + __u16 version; + struct { + struct lcs_ip_mac_pair + ip_mac_pair[32]; + __u32 response_data; + } lcs_ipass_ctlmsg __attribute ((packed)); + } lcs_qipassist __attribute__ ((packed)); +#endif /*CONFIG_IP_MULTICAST */ + } cmd __attribute__ ((packed)); +} __attribute__ ((packed)); + +/** + * Forward declarations. + */ +struct lcs_card; +struct lcs_channel; + +/** + * Definition of an lcs buffer. + */ +struct lcs_buffer { + enum lcs_buffer_states state; + void *data; + int count; + /* Callback for completion notification. */ + void (*callback)(struct lcs_channel *, struct lcs_buffer *); +}; + +struct lcs_reply { + struct list_head list; + __u16 sequence_no; + atomic_t refcnt; + /* Callback for completion notification. */ + void (*callback)(struct lcs_card *, struct lcs_cmd *); + wait_queue_head_t wait_q; + struct lcs_card *card; + int received; + int rc; +}; + +/** + * Definition of an lcs channel + */ +struct lcs_channel { + enum lcs_channel_states state; + struct ccw_device *ccwdev; + struct ccw1 ccws[LCS_NUM_BUFFS + 1]; + wait_queue_head_t wait_q; + struct tasklet_struct irq_tasklet; + struct lcs_buffer iob[LCS_NUM_BUFFS]; + int io_idx; + int buf_idx; +}; + + +/** + * definition of the lcs card + */ +struct lcs_card { + spinlock_t lock; + spinlock_t ipm_lock; + enum lcs_dev_states state; + struct net_device *dev; + struct net_device_stats stats; + unsigned short (*lan_type_trans)(struct sk_buff *skb, + struct net_device *dev); + struct lcs_channel read; + struct lcs_channel write; + struct lcs_buffer *tx_buffer; + int tx_emitted; + struct list_head lancmd_waiters; + int lancmd_timeout; + + struct work_struct kernel_thread_starter; + spinlock_t mask_lock; + unsigned long thread_start_mask; + unsigned long thread_running_mask; + unsigned long thread_allowed_mask; + wait_queue_head_t wait_q; + +#ifdef CONFIG_IP_MULTICAST + struct list_head ipm_list; +#endif + __u8 mac[LCS_MAC_LENGTH]; + __u16 ip_assists_supported; + __u16 ip_assists_enabled; + __s8 lan_type; + __u32 pkt_seq; + __u16 sequence_no; + __s16 portno; + /* Some info copied from probeinfo */ + u8 device_forced; + u8 max_port_no; + u8 hint_port_no; + s16 port_protocol_no; +} __attribute__ ((aligned(8))); + diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c new file mode 100644 index 000000000000..16e8e69afb10 --- /dev/null +++ b/drivers/s390/net/netiucv.c @@ -0,0 +1,2149 @@ +/* + * $Id: netiucv.c,v 1.63 2004/07/27 13:36:05 mschwide Exp $ + * + * IUCV network driver + * + * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * + * Driverfs integration and all bugs therein by Cornelia Huck(cohuck@de.ibm.com) + * + * Documentation used: + * the source of the original IUCV driver by: + * Stefan Hegewald <hegewald@de.ibm.com> + * Hartmut Penner <hpenner@de.ibm.com> + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * RELEASE-TAG: IUCV network driver $Revision: 1.63 $ + * + */ + +#undef DEBUG + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/sched.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> +#include <linux/device.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <asm/io.h> +#include <asm/uaccess.h> + +#include "iucv.h" +#include "fsm.h" + +MODULE_AUTHOR + ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); + + +#define PRINTK_HEADER " iucv: " /* for debugging */ + +static struct device_driver netiucv_driver = { + .name = "netiucv", + .bus = &iucv_bus, +}; + +/** + * Per connection profiling data + */ +struct connection_profile { + unsigned long maxmulti; + unsigned long maxcqueue; + unsigned long doios_single; + unsigned long doios_multi; + unsigned long txlen; + unsigned long tx_time; + struct timespec send_stamp; + unsigned long tx_pending; + unsigned long tx_max_pending; +}; + +/** + * Representation of one iucv connection + */ +struct iucv_connection { + struct iucv_connection *next; + iucv_handle_t handle; + __u16 pathid; + struct sk_buff *rx_buff; + struct sk_buff *tx_buff; + struct sk_buff_head collect_queue; + struct sk_buff_head commit_queue; + spinlock_t collect_lock; + int collect_len; + int max_buffsize; + fsm_timer timer; + fsm_instance *fsm; + struct net_device *netdev; + struct connection_profile prof; + char userid[9]; +}; + +/** + * Linked list of all connection structs. + */ +static struct iucv_connection *iucv_connections; + +/** + * Representation of event-data for the + * connection state machine. + */ +struct iucv_event { + struct iucv_connection *conn; + void *data; +}; + +/** + * Private part of the network device structure + */ +struct netiucv_priv { + struct net_device_stats stats; + unsigned long tbusy; + fsm_instance *fsm; + struct iucv_connection *conn; + struct device *dev; +}; + +/** + * Link level header for a packet. + */ +typedef struct ll_header_t { + __u16 next; +} ll_header; + +#define NETIUCV_HDRLEN (sizeof(ll_header)) +#define NETIUCV_BUFSIZE_MAX 32768 +#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX +#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) +#define NETIUCV_MTU_DEFAULT 9216 +#define NETIUCV_QUEUELEN_DEFAULT 50 +#define NETIUCV_TIMEOUT_5SEC 5000 + +/** + * Compatibility macros for busy handling + * of network devices. + */ +static __inline__ void netiucv_clear_busy(struct net_device *dev) +{ + clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy)); + netif_wake_queue(dev); +} + +static __inline__ int netiucv_test_and_set_busy(struct net_device *dev) +{ + netif_stop_queue(dev); + return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy); +} + +static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static __u8 iucvMagic[16] = { + 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, + 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 +}; + +/** + * This mask means the 16-byte IUCV "magic" and the origin userid must + * match exactly as specified in order to give connection_pending() + * control. + */ +static __u8 netiucv_mask[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff +}; + +/** + * Convert an iucv userId to its printable + * form (strip whitespace at end). + * + * @param An iucv userId + * + * @returns The printable string (static data!!) + */ +static __inline__ char * +netiucv_printname(char *name) +{ + static char tmp[9]; + char *p = tmp; + memcpy(tmp, name, 8); + tmp[8] = '\0'; + while (*p && (!isspace(*p))) + p++; + *p = '\0'; + return tmp; +} + +/** + * States of the interface statemachine. + */ +enum dev_states { + DEV_STATE_STOPPED, + DEV_STATE_STARTWAIT, + DEV_STATE_STOPWAIT, + DEV_STATE_RUNNING, + /** + * MUST be always the last element!! + */ + NR_DEV_STATES +}; + +static const char *dev_state_names[] = { + "Stopped", + "StartWait", + "StopWait", + "Running", +}; + +/** + * Events of the interface statemachine. + */ +enum dev_events { + DEV_EVENT_START, + DEV_EVENT_STOP, + DEV_EVENT_CONUP, + DEV_EVENT_CONDOWN, + /** + * MUST be always the last element!! + */ + NR_DEV_EVENTS +}; + +static const char *dev_event_names[] = { + "Start", + "Stop", + "Connection up", + "Connection down", +}; + +/** + * Events of the connection statemachine + */ +enum conn_events { + /** + * Events, representing callbacks from + * lowlevel iucv layer) + */ + CONN_EVENT_CONN_REQ, + CONN_EVENT_CONN_ACK, + CONN_EVENT_CONN_REJ, + CONN_EVENT_CONN_SUS, + CONN_EVENT_CONN_RES, + CONN_EVENT_RX, + CONN_EVENT_TXDONE, + + /** + * Events, representing errors return codes from + * calls to lowlevel iucv layer + */ + + /** + * Event, representing timer expiry. + */ + CONN_EVENT_TIMER, + + /** + * Events, representing commands from upper levels. + */ + CONN_EVENT_START, + CONN_EVENT_STOP, + + /** + * MUST be always the last element!! + */ + NR_CONN_EVENTS, +}; + +static const char *conn_event_names[] = { + "Remote connection request", + "Remote connection acknowledge", + "Remote connection reject", + "Connection suspended", + "Connection resumed", + "Data received", + "Data sent", + + "Timer", + + "Start", + "Stop", +}; + +/** + * States of the connection statemachine. + */ +enum conn_states { + /** + * Connection not assigned to any device, + * initial state, invalid + */ + CONN_STATE_INVALID, + + /** + * Userid assigned but not operating + */ + CONN_STATE_STOPPED, + + /** + * Connection registered, + * no connection request sent yet, + * no connection request received + */ + CONN_STATE_STARTWAIT, + + /** + * Connection registered and connection request sent, + * no acknowledge and no connection request received yet. + */ + CONN_STATE_SETUPWAIT, + + /** + * Connection up and running idle + */ + CONN_STATE_IDLE, + + /** + * Data sent, awaiting CONN_EVENT_TXDONE + */ + CONN_STATE_TX, + + /** + * Error during registration. + */ + CONN_STATE_REGERR, + + /** + * Error during registration. + */ + CONN_STATE_CONNERR, + + /** + * MUST be always the last element!! + */ + NR_CONN_STATES, +}; + +static const char *conn_state_names[] = { + "Invalid", + "Stopped", + "StartWait", + "SetupWait", + "Idle", + "TX", + "Terminating", + "Registration error", + "Connect error", +}; + + +/** + * Debug Facility Stuff + */ +static debug_info_t *iucv_dbf_setup = NULL; +static debug_info_t *iucv_dbf_data = NULL; +static debug_info_t *iucv_dbf_trace = NULL; + +DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); + +static void +iucv_unregister_dbf_views(void) +{ + if (iucv_dbf_setup) + debug_unregister(iucv_dbf_setup); + if (iucv_dbf_data) + debug_unregister(iucv_dbf_data); + if (iucv_dbf_trace) + debug_unregister(iucv_dbf_trace); +} +static int +iucv_register_dbf_views(void) +{ + iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, + IUCV_DBF_SETUP_INDEX, + IUCV_DBF_SETUP_NR_AREAS, + IUCV_DBF_SETUP_LEN); + iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME, + IUCV_DBF_DATA_INDEX, + IUCV_DBF_DATA_NR_AREAS, + IUCV_DBF_DATA_LEN); + iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME, + IUCV_DBF_TRACE_INDEX, + IUCV_DBF_TRACE_NR_AREAS, + IUCV_DBF_TRACE_LEN); + + if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) || + (iucv_dbf_trace == NULL)) { + iucv_unregister_dbf_views(); + return -ENOMEM; + } + debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL); + + debug_register_view(iucv_dbf_data, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL); + + debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL); + + return 0; +} + +/** + * Callback-wrappers, called from lowlevel iucv layer. + *****************************************************************************/ + +static void +netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data) +{ + struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_event ev; + + ev.conn = conn; + ev.data = (void *)eib; + + fsm_event(conn->fsm, CONN_EVENT_RX, &ev); +} + +static void +netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data) +{ + struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_event ev; + + ev.conn = conn; + ev.data = (void *)eib; + fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); +} + +static void +netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data) +{ + struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_event ev; + + ev.conn = conn; + ev.data = (void *)eib; + fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev); +} + +static void +netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data) +{ + struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_event ev; + + ev.conn = conn; + ev.data = (void *)eib; + fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); +} + +static void +netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data) +{ + struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_event ev; + + ev.conn = conn; + ev.data = (void *)eib; + fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev); +} + +static void +netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data) +{ + struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_event ev; + + ev.conn = conn; + ev.data = (void *)eib; + fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev); +} + +static void +netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data) +{ + struct iucv_connection *conn = (struct iucv_connection *)pgm_data; + struct iucv_event ev; + + ev.conn = conn; + ev.data = (void *)eib; + fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev); +} + +static iucv_interrupt_ops_t netiucv_ops = { + .ConnectionPending = netiucv_callback_connreq, + .ConnectionComplete = netiucv_callback_connack, + .ConnectionSevered = netiucv_callback_connrej, + .ConnectionQuiesced = netiucv_callback_connsusp, + .ConnectionResumed = netiucv_callback_connres, + .MessagePending = netiucv_callback_rx, + .MessageComplete = netiucv_callback_txdone +}; + +/** + * Dummy NOP action for all statemachines + */ +static void +fsm_action_nop(fsm_instance *fi, int event, void *arg) +{ +} + +/** + * Actions of the connection statemachine + *****************************************************************************/ + +/** + * Helper function for conn_action_rx() + * Unpack a just received skb and hand it over to + * upper layers. + * + * @param conn The connection where this skb has been received. + * @param pskb The received skb. + */ +//static __inline__ void +static void +netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb) +{ + struct net_device *dev = conn->netdev; + struct netiucv_priv *privptr = dev->priv; + __u16 offset = 0; + + skb_put(pskb, NETIUCV_HDRLEN); + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_NONE; + pskb->protocol = ntohs(ETH_P_IP); + + while (1) { + struct sk_buff *skb; + ll_header *header = (ll_header *)pskb->data; + + if (!header->next) + break; + + skb_pull(pskb, NETIUCV_HDRLEN); + header->next -= offset; + offset += header->next; + header->next -= NETIUCV_HDRLEN; + if (skb_tailroom(pskb) < header->next) { + PRINT_WARN("%s: Illegal next field in iucv header: " + "%d > %d\n", + dev->name, header->next, skb_tailroom(pskb)); + IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", + header->next, skb_tailroom(pskb)); + return; + } + skb_put(pskb, header->next); + pskb->mac.raw = pskb->data; + skb = dev_alloc_skb(pskb->len); + if (!skb) { + PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n", + dev->name); + IUCV_DBF_TEXT(data, 2, + "Out of memory in netiucv_unpack_skb\n"); + privptr->stats.rx_dropped++; + return; + } + memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len); + skb->mac.raw = skb->data; + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + /* + * Since receiving is always initiated from a tasklet (in iucv.c), + * we must use netif_rx_ni() instead of netif_rx() + */ + netif_rx_ni(skb); + dev->last_rx = jiffies; + privptr->stats.rx_packets++; + privptr->stats.rx_bytes += skb->len; + skb_pull(pskb, header->next); + skb_put(pskb, NETIUCV_HDRLEN); + } +} + +static void +conn_action_rx(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + iucv_MessagePending *eib = (iucv_MessagePending *)ev->data; + struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv; + + __u32 msglen = eib->ln1msg2.ipbfln1f; + int rc; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + + if (!conn->netdev) { + /* FRITZ: How to tell iucv LL to drop the msg? */ + PRINT_WARN("Received data for unlinked connection\n"); + IUCV_DBF_TEXT(data, 2, + "Received data for unlinked connection\n"); + return; + } + if (msglen > conn->max_buffsize) { + /* FRITZ: How to tell iucv LL to drop the msg? */ + privptr->stats.rx_dropped++; + PRINT_WARN("msglen %d > max_buffsize %d\n", + msglen, conn->max_buffsize); + IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", + msglen, conn->max_buffsize); + return; + } + conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; + conn->rx_buff->len = 0; + rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls, + conn->rx_buff->data, msglen, NULL, NULL, NULL); + if (rc || msglen < 5) { + privptr->stats.rx_errors++; + PRINT_WARN("iucv_receive returned %08x\n", rc); + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); + return; + } + netiucv_unpack_skb(conn, conn->rx_buff); +} + +static void +conn_action_txdone(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data; + struct netiucv_priv *privptr = NULL; + /* Shut up, gcc! skb is always below 2G. */ + __u32 single_flag = eib->ipmsgtag; + __u32 txbytes = 0; + __u32 txpackets = 0; + __u32 stat_maxcq = 0; + struct sk_buff *skb; + unsigned long saveflags; + ll_header header; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + + if (conn && conn->netdev && conn->netdev->priv) + privptr = (struct netiucv_priv *)conn->netdev->priv; + conn->prof.tx_pending--; + if (single_flag) { + if ((skb = skb_dequeue(&conn->commit_queue))) { + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); + if (privptr) { + privptr->stats.tx_packets++; + privptr->stats.tx_bytes += + (skb->len - NETIUCV_HDRLEN + - NETIUCV_HDRLEN); + } + } + } + conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head; + conn->tx_buff->len = 0; + spin_lock_irqsave(&conn->collect_lock, saveflags); + while ((skb = skb_dequeue(&conn->collect_queue))) { + header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; + memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, + NETIUCV_HDRLEN); + memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len); + txbytes += skb->len; + txpackets++; + stat_maxcq++; + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); + } + if (conn->collect_len > conn->prof.maxmulti) + conn->prof.maxmulti = conn->collect_len; + conn->collect_len = 0; + spin_unlock_irqrestore(&conn->collect_lock, saveflags); + if (conn->tx_buff->len) { + int rc; + + header.next = 0; + memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, + NETIUCV_HDRLEN); + + conn->prof.send_stamp = xtime; + rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0, + conn->tx_buff->data, conn->tx_buff->len); + conn->prof.doios_multi++; + conn->prof.txlen += conn->tx_buff->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; + if (rc) { + conn->prof.tx_pending--; + fsm_newstate(fi, CONN_STATE_IDLE); + if (privptr) + privptr->stats.tx_errors += txpackets; + PRINT_WARN("iucv_send returned %08x\n", rc); + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); + } else { + if (privptr) { + privptr->stats.tx_packets += txpackets; + privptr->stats.tx_bytes += txbytes; + } + if (stat_maxcq > conn->prof.maxcqueue) + conn->prof.maxcqueue = stat_maxcq; + } + } else + fsm_newstate(fi, CONN_STATE_IDLE); +} + +static void +conn_action_connaccept(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; + int rc; + __u16 msglimit; + __u8 udata[16]; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0, + conn->handle, conn, NULL, &msglimit); + if (rc) { + PRINT_WARN("%s: IUCV accept failed with error %d\n", + netdev->name, rc); + IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); + return; + } + fsm_newstate(fi, CONN_STATE_IDLE); + conn->pathid = eib->ippathid; + netdev->tx_queue_len = msglimit; + fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); +} + +static void +conn_action_connreject(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + struct net_device *netdev = conn->netdev; + iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data; + __u8 udata[16]; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + iucv_sever(eib->ippathid, udata); + if (eib->ippathid != conn->pathid) { + PRINT_INFO("%s: IR Connection Pending; " + "pathid %d does not match original pathid %d\n", + netdev->name, eib->ippathid, conn->pathid); + IUCV_DBF_TEXT_(data, 2, + "connreject: IR pathid %d, conn. pathid %d\n", + eib->ippathid, conn->pathid); + iucv_sever(conn->pathid, udata); + } +} + +static void +conn_action_connack(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + fsm_deltimer(&conn->timer); + fsm_newstate(fi, CONN_STATE_IDLE); + if (eib->ippathid != conn->pathid) { + PRINT_INFO("%s: IR Connection Complete; " + "pathid %d does not match original pathid %d\n", + netdev->name, eib->ippathid, conn->pathid); + IUCV_DBF_TEXT_(data, 2, + "connack: IR pathid %d, conn. pathid %d\n", + eib->ippathid, conn->pathid); + conn->pathid = eib->ippathid; + } + netdev->tx_queue_len = eib->ipmsglim; + fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); +} + +static void +conn_action_conntimsev(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = (struct iucv_connection *)arg; + __u8 udata[16]; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + fsm_deltimer(&conn->timer); + iucv_sever(conn->pathid, udata); + fsm_newstate(fi, CONN_STATE_STARTWAIT); +} + +static void +conn_action_connsever(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; + __u8 udata[16]; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + fsm_deltimer(&conn->timer); + iucv_sever(conn->pathid, udata); + PRINT_INFO("%s: Remote dropped connection\n", netdev->name); + IUCV_DBF_TEXT(data, 2, + "conn_action_connsever: Remote dropped connection\n"); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); +} + +static void +conn_action_start(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + __u16 msglimit; + int rc; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + if (!conn->handle) { + IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n"); + conn->handle = + iucv_register_program(iucvMagic, conn->userid, + netiucv_mask, + &netiucv_ops, conn); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + if (!conn->handle) { + fsm_newstate(fi, CONN_STATE_REGERR); + conn->handle = NULL; + IUCV_DBF_TEXT(setup, 2, + "NULL from iucv_register_program\n"); + return; + } + + PRINT_DEBUG("%s('%s'): registered successfully\n", + conn->netdev->name, conn->userid); + } + + PRINT_DEBUG("%s('%s'): connecting ...\n", + conn->netdev->name, conn->userid); + + /* We must set the state before calling iucv_connect because the callback + * handler could be called at any point after the connection request is + * sent */ + + fsm_newstate(fi, CONN_STATE_SETUPWAIT); + rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic, + conn->userid, iucv_host, 0, NULL, &msglimit, + conn->handle, conn); + switch (rc) { + case 0: + conn->netdev->tx_queue_len = msglimit; + fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, + CONN_EVENT_TIMER, conn); + return; + case 11: + PRINT_INFO("%s: User %s is currently not available.\n", + conn->netdev->name, + netiucv_printname(conn->userid)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + return; + case 12: + PRINT_INFO("%s: User %s is currently not ready.\n", + conn->netdev->name, + netiucv_printname(conn->userid)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + return; + case 13: + PRINT_WARN("%s: Too many IUCV connections.\n", + conn->netdev->name); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 14: + PRINT_WARN( + "%s: User %s has too many IUCV connections.\n", + conn->netdev->name, + netiucv_printname(conn->userid)); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 15: + PRINT_WARN( + "%s: No IUCV authorization in CP directory.\n", + conn->netdev->name); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + default: + PRINT_WARN("%s: iucv_connect returned error %d\n", + conn->netdev->name, rc); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + } + IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); + IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); + iucv_unregister_program(conn->handle); + conn->handle = NULL; +} + +static void +netiucv_purge_skb_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(q))) { + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); + } +} + +static void +conn_action_stop(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + fsm_deltimer(&conn->timer); + fsm_newstate(fi, CONN_STATE_STOPPED); + netiucv_purge_skb_queue(&conn->collect_queue); + if (conn->handle) + IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n"); + iucv_unregister_program(conn->handle); + conn->handle = NULL; + netiucv_purge_skb_queue(&conn->commit_queue); + fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); +} + +static void +conn_action_inval(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = (struct iucv_event *)arg; + struct iucv_connection *conn = ev->conn; + struct net_device *netdev = conn->netdev; + + PRINT_WARN("%s: Cannot connect without username\n", + netdev->name); + IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n"); +} + +static const fsm_node conn_fsm[] = { + { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval }, + { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start }, + + { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop }, + + { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject }, + { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, + { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject }, + { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject }, + + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev }, + + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever }, + { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever }, + { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever }, + + { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx }, + { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx }, + + { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone }, + { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone }, +}; + +static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); + + +/** + * Actions for interface - statemachine. + *****************************************************************************/ + +/** + * Startup connection by sending CONN_EVENT_START to it. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_start(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *)arg; + struct netiucv_priv *privptr = dev->priv; + struct iucv_event ev; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + ev.conn = privptr->conn; + fsm_newstate(fi, DEV_STATE_STARTWAIT); + fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev); +} + +/** + * Shutdown connection by sending CONN_EVENT_STOP to it. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_stop(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *)arg; + struct netiucv_priv *privptr = dev->priv; + struct iucv_event ev; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + ev.conn = privptr->conn; + + fsm_newstate(fi, DEV_STATE_STOPWAIT); + fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev); +} + +/** + * Called from connection statemachine + * when a connection is up and running. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_connup(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = (struct net_device *)arg; + struct netiucv_priv *privptr = dev->priv; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + switch (fsm_getstate(fi)) { + case DEV_STATE_STARTWAIT: + fsm_newstate(fi, DEV_STATE_RUNNING); + PRINT_INFO("%s: connected with remote side %s\n", + dev->name, privptr->conn->userid); + IUCV_DBF_TEXT(setup, 3, + "connection is up and running\n"); + break; + case DEV_STATE_STOPWAIT: + PRINT_INFO( + "%s: got connection UP event during shutdown!\n", + dev->name); + IUCV_DBF_TEXT(data, 2, + "dev_action_connup: in DEV_STATE_STOPWAIT\n"); + break; + } +} + +/** + * Called from connection statemachine + * when a connection has been shutdown. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_conndown(fsm_instance *fi, int event, void *arg) +{ + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + switch (fsm_getstate(fi)) { + case DEV_STATE_RUNNING: + fsm_newstate(fi, DEV_STATE_STARTWAIT); + break; + case DEV_STATE_STOPWAIT: + fsm_newstate(fi, DEV_STATE_STOPPED); + IUCV_DBF_TEXT(setup, 3, "connection is down\n"); + break; + } +} + +static const fsm_node dev_fsm[] = { + { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, + + { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown }, + + { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup }, + + { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, + { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop }, +}; + +static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); + +/** + * Transmit a packet. + * This is a helper function for netiucv_tx(). + * + * @param conn Connection to be used for sending. + * @param skb Pointer to struct sk_buff of packet to send. + * The linklevel header has already been set up + * by netiucv_tx(). + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int +netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) { + unsigned long saveflags; + ll_header header; + int rc = 0; + + if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { + int l = skb->len + NETIUCV_HDRLEN; + + spin_lock_irqsave(&conn->collect_lock, saveflags); + if (conn->collect_len + l > + (conn->max_buffsize - NETIUCV_HDRLEN)) { + rc = -EBUSY; + IUCV_DBF_TEXT(data, 2, + "EBUSY from netiucv_transmit_skb\n"); + } else { + atomic_inc(&skb->users); + skb_queue_tail(&conn->collect_queue, skb); + conn->collect_len += l; + } + spin_unlock_irqrestore(&conn->collect_lock, saveflags); + } else { + struct sk_buff *nskb = skb; + /** + * Copy the skb to a new allocated skb in lowmem only if the + * data is located above 2G in memory or tailroom is < 2. + */ + unsigned long hi = + ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31; + int copied = 0; + if (hi || (skb_tailroom(skb) < 2)) { + nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + + NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + PRINT_WARN("%s: Could not allocate tx_skb\n", + conn->netdev->name); + IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); + rc = -ENOMEM; + return rc; + } else { + skb_reserve(nskb, NETIUCV_HDRLEN); + memcpy(skb_put(nskb, skb->len), + skb->data, skb->len); + } + copied = 1; + } + /** + * skb now is below 2G and has enough room. Add headers. + */ + header.next = nskb->len + NETIUCV_HDRLEN; + memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); + header.next = 0; + memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); + + fsm_newstate(conn->fsm, CONN_STATE_TX); + conn->prof.send_stamp = xtime; + + rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */, + 0, nskb->data, nskb->len); + /* Shut up, gcc! nskb is always below 2G. */ + conn->prof.doios_single++; + conn->prof.txlen += skb->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; + if (rc) { + struct netiucv_priv *privptr; + fsm_newstate(conn->fsm, CONN_STATE_IDLE); + conn->prof.tx_pending--; + privptr = (struct netiucv_priv *)conn->netdev->priv; + if (privptr) + privptr->stats.tx_errors++; + if (copied) + dev_kfree_skb(nskb); + else { + /** + * Remove our headers. They get added + * again on retransmit. + */ + skb_pull(skb, NETIUCV_HDRLEN); + skb_trim(skb, skb->len - NETIUCV_HDRLEN); + } + PRINT_WARN("iucv_send returned %08x\n", rc); + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); + } else { + if (copied) + dev_kfree_skb(skb); + atomic_inc(&nskb->users); + skb_queue_tail(&conn->commit_queue, nskb); + } + } + + return rc; +} + +/** + * Interface API for upper network layers + *****************************************************************************/ + +/** + * Open an interface. + * Called from generic network layer when ifconfig up is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int +netiucv_open(struct net_device *dev) { + fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev); + return 0; +} + +/** + * Close an interface. + * Called from generic network layer when ifconfig down is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int +netiucv_close(struct net_device *dev) { + fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev); + return 0; +} + +/** + * Start transmission of a packet. + * Called from generic network device layer. + * + * @param skb Pointer to buffer containing the packet. + * @param dev Pointer to interface struct. + * + * @return 0 if packet consumed, !0 if packet rejected. + * Note: If we return !0, then the packet is free'd by + * the generic network layer. + */ +static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) +{ + int rc = 0; + struct netiucv_priv *privptr = dev->priv; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + /** + * Some sanity checks ... + */ + if (skb == NULL) { + PRINT_WARN("%s: NULL sk_buff passed\n", dev->name); + IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); + privptr->stats.tx_dropped++; + return 0; + } + if (skb_headroom(skb) < NETIUCV_HDRLEN) { + PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n", + dev->name, NETIUCV_HDRLEN); + IUCV_DBF_TEXT(data, 2, + "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + return 0; + } + + /** + * If connection is not running, try to restart it + * and throw away packet. + */ + if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { + fsm_event(privptr->fsm, DEV_EVENT_START, dev); + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + privptr->stats.tx_errors++; + privptr->stats.tx_carrier_errors++; + return 0; + } + + if (netiucv_test_and_set_busy(dev)) { + IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n"); + return -EBUSY; + } + dev->trans_start = jiffies; + if (netiucv_transmit_skb(privptr->conn, skb)) + rc = 1; + netiucv_clear_busy(dev); + return rc; +} + +/** + * Returns interface statistics of a device. + * + * @param dev Pointer to interface struct. + * + * @return Pointer to stats struct of this interface. + */ +static struct net_device_stats * +netiucv_stats (struct net_device * dev) +{ + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return &((struct netiucv_priv *)dev->priv)->stats; +} + +/** + * Sets MTU of an interface. + * + * @param dev Pointer to interface struct. + * @param new_mtu The new MTU to use for this interface. + * + * @return 0 on success, -EINVAL if MTU is out of valid range. + * (valid range is 576 .. NETIUCV_MTU_MAX). + */ +static int +netiucv_change_mtu (struct net_device * dev, int new_mtu) +{ + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) { + IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n"); + return -EINVAL; + } + dev->mtu = new_mtu; + return 0; +} + +/** + * attributes in sysfs + *****************************************************************************/ + +static ssize_t +user_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid)); +} + +static ssize_t +user_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + struct net_device *ndev = priv->conn->netdev; + char *p; + char *tmp; + char username[10]; + int i; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + if (count>9) { + PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); + IUCV_DBF_TEXT_(setup, 2, + "%d is length of username\n", (int)count); + return -EINVAL; + } + + tmp = strsep((char **) &buf, "\n"); + for (i=0, p=tmp; i<8 && *p; i++, p++) { + if (isalnum(*p) || (*p == '$')) + username[i]= *p; + else if (*p == '\n') { + /* trailing lf, grr */ + break; + } else { + PRINT_WARN("netiucv: Invalid char %c in username!\n", + *p); + IUCV_DBF_TEXT_(setup, 2, + "username: invalid character %c\n", + *p); + return -EINVAL; + } + } + while (i<9) + username[i++] = ' '; + username[9] = '\0'; + + if (memcmp(username, priv->conn->userid, 8)) { + /* username changed */ + if (ndev->flags & (IFF_UP | IFF_RUNNING)) { + PRINT_WARN( + "netiucv: device %s active, connected to %s\n", + dev->bus_id, priv->conn->userid); + PRINT_WARN("netiucv: user cannot be updated\n"); + IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); + return -EBUSY; + } + } + memcpy(priv->conn->userid, username, 9); + + return count; + +} + +static DEVICE_ATTR(user, 0644, user_show, user_write); + +static ssize_t +buffer_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%d\n", priv->conn->max_buffsize); +} + +static ssize_t +buffer_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + struct net_device *ndev = priv->conn->netdev; + char *e; + int bs1; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + if (count >= 39) + return -EINVAL; + + bs1 = simple_strtoul(buf, &e, 0); + + if (e && (!isspace(*e))) { + PRINT_WARN("netiucv: Invalid character in buffer!\n"); + IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e); + return -EINVAL; + } + if (bs1 > NETIUCV_BUFSIZE_MAX) { + PRINT_WARN("netiucv: Given buffer size %d too large.\n", + bs1); + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too large\n", + bs1); + return -EINVAL; + } + if ((ndev->flags & IFF_RUNNING) && + (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { + PRINT_WARN("netiucv: Given buffer size %d too small.\n", + bs1); + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too small\n", + bs1); + return -EINVAL; + } + if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { + PRINT_WARN("netiucv: Given buffer size %d too small.\n", + bs1); + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too small\n", + bs1); + return -EINVAL; + } + + priv->conn->max_buffsize = bs1; + if (!(ndev->flags & IFF_RUNNING)) + ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN; + + return count; + +} + +static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); + +static ssize_t +dev_fsm_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); +} + +static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); + +static ssize_t +conn_fsm_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); +} + +static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); + +static ssize_t +maxmulti_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); +} + +static ssize_t +maxmulti_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + priv->conn->prof.maxmulti = 0; + return count; +} + +static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); + +static ssize_t +maxcq_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); +} + +static ssize_t +maxcq_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + priv->conn->prof.maxcqueue = 0; + return count; +} + +static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); + +static ssize_t +sdoio_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); +} + +static ssize_t +sdoio_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + priv->conn->prof.doios_single = 0; + return count; +} + +static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); + +static ssize_t +mdoio_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); +} + +static ssize_t +mdoio_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + priv->conn->prof.doios_multi = 0; + return count; +} + +static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); + +static ssize_t +txlen_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.txlen); +} + +static ssize_t +txlen_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + priv->conn->prof.txlen = 0; + return count; +} + +static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); + +static ssize_t +txtime_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); +} + +static ssize_t +txtime_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + priv->conn->prof.tx_time = 0; + return count; +} + +static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); + +static ssize_t +txpend_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); +} + +static ssize_t +txpend_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + priv->conn->prof.tx_pending = 0; + return count; +} + +static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); + +static ssize_t +txmpnd_show (struct device *dev, char *buf) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 5, __FUNCTION__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); +} + +static ssize_t +txmpnd_write (struct device *dev, const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev->driver_data; + + IUCV_DBF_TEXT(trace, 4, __FUNCTION__); + priv->conn->prof.tx_max_pending = 0; + return count; +} + +static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write); + +static struct attribute *netiucv_attrs[] = { + &dev_attr_buffer.attr, + &dev_attr_user.attr, + NULL, +}; + +static struct attribute_group netiucv_attr_group = { + .attrs = netiucv_attrs, +}; + +static struct attribute *netiucv_stat_attrs[] = { + &dev_attr_device_fsm_state.attr, + &dev_attr_connection_fsm_state.attr, + &dev_attr_max_tx_buffer_used.attr, + &dev_attr_max_chained_skbs.attr, + &dev_attr_tx_single_write_ops.attr, + &dev_attr_tx_multi_write_ops.attr, + &dev_attr_netto_bytes.attr, + &dev_attr_max_tx_io_time.attr, + &dev_attr_tx_pending.attr, + &dev_attr_tx_max_pending.attr, + NULL, +}; + +static struct attribute_group netiucv_stat_attr_group = { + .name = "stats", + .attrs = netiucv_stat_attrs, +}; + +static inline int +netiucv_add_files(struct device *dev) +{ + int ret; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group); + if (ret) + return ret; + ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group); + if (ret) + sysfs_remove_group(&dev->kobj, &netiucv_attr_group); + return ret; +} + +static inline void +netiucv_remove_files(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group); + sysfs_remove_group(&dev->kobj, &netiucv_attr_group); +} + +static int +netiucv_register_device(struct net_device *ndev) +{ + struct netiucv_priv *priv = ndev->priv; + struct device *dev = kmalloc(sizeof(struct device), GFP_KERNEL); + int ret; + + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + if (dev) { + memset(dev, 0, sizeof(struct device)); + snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name); + dev->bus = &iucv_bus; + dev->parent = iucv_root; + /* + * The release function could be called after the + * module has been unloaded. It's _only_ task is to + * free the struct. Therefore, we specify kfree() + * directly here. (Probably a little bit obfuscating + * but legitime ...). + */ + dev->release = (void (*)(struct device *))kfree; + dev->driver = &netiucv_driver; + } else + return -ENOMEM; + + ret = device_register(dev); + + if (ret) + return ret; + ret = netiucv_add_files(dev); + if (ret) + goto out_unreg; + priv->dev = dev; + dev->driver_data = priv; + return 0; + +out_unreg: + device_unregister(dev); + return ret; +} + +static void +netiucv_unregister_device(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + netiucv_remove_files(dev); + device_unregister(dev); +} + +/** + * Allocate and initialize a new connection structure. + * Add it to the list of netiucv connections; + */ +static struct iucv_connection * +netiucv_new_connection(struct net_device *dev, char *username) +{ + struct iucv_connection **clist = &iucv_connections; + struct iucv_connection *conn = + (struct iucv_connection *) + kmalloc(sizeof(struct iucv_connection), GFP_KERNEL); + + if (conn) { + memset(conn, 0, sizeof(struct iucv_connection)); + skb_queue_head_init(&conn->collect_queue); + skb_queue_head_init(&conn->commit_queue); + conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; + conn->netdev = dev; + + conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, + GFP_KERNEL | GFP_DMA); + if (!conn->rx_buff) { + kfree(conn); + return NULL; + } + conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, + GFP_KERNEL | GFP_DMA); + if (!conn->tx_buff) { + kfree_skb(conn->rx_buff); + kfree(conn); + return NULL; + } + conn->fsm = init_fsm("netiucvconn", conn_state_names, + conn_event_names, NR_CONN_STATES, + NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, + GFP_KERNEL); + if (!conn->fsm) { + kfree_skb(conn->tx_buff); + kfree_skb(conn->rx_buff); + kfree(conn); + return NULL; + } + fsm_settimer(conn->fsm, &conn->timer); + fsm_newstate(conn->fsm, CONN_STATE_INVALID); + + if (username) { + memcpy(conn->userid, username, 9); + fsm_newstate(conn->fsm, CONN_STATE_STOPPED); + } + + conn->next = *clist; + *clist = conn; + } + return conn; +} + +/** + * Release a connection structure and remove it from the + * list of netiucv connections. + */ +static void +netiucv_remove_connection(struct iucv_connection *conn) +{ + struct iucv_connection **clist = &iucv_connections; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + if (conn == NULL) + return; + while (*clist) { + if (*clist == conn) { + *clist = conn->next; + if (conn->handle) { + iucv_unregister_program(conn->handle); + conn->handle = NULL; + } + fsm_deltimer(&conn->timer); + kfree_fsm(conn->fsm); + kfree_skb(conn->rx_buff); + kfree_skb(conn->tx_buff); + return; + } + clist = &((*clist)->next); + } +} + +/** + * Release everything of a net device. + */ +static void +netiucv_free_netdevice(struct net_device *dev) +{ + struct netiucv_priv *privptr; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + if (!dev) + return; + + privptr = (struct netiucv_priv *)dev->priv; + if (privptr) { + if (privptr->conn) + netiucv_remove_connection(privptr->conn); + if (privptr->fsm) + kfree_fsm(privptr->fsm); + privptr->conn = NULL; privptr->fsm = NULL; + /* privptr gets freed by free_netdev() */ + } + free_netdev(dev); +} + +/** + * Initialize a net device. (Called from kernel in alloc_netdev()) + */ +static void +netiucv_setup_netdevice(struct net_device *dev) +{ + memset(dev->priv, 0, sizeof(struct netiucv_priv)); + + dev->mtu = NETIUCV_MTU_DEFAULT; + dev->hard_start_xmit = netiucv_tx; + dev->open = netiucv_open; + dev->stop = netiucv_close; + dev->get_stats = netiucv_stats; + dev->change_mtu = netiucv_change_mtu; + dev->destructor = netiucv_free_netdevice; + dev->hard_header_len = NETIUCV_HDRLEN; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + SET_MODULE_OWNER(dev); +} + +/** + * Allocate and initialize everything of a net device. + */ +static struct net_device * +netiucv_init_netdevice(char *username) +{ + struct netiucv_priv *privptr; + struct net_device *dev; + + dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d", + netiucv_setup_netdevice); + if (!dev) + return NULL; + if (dev_alloc_name(dev, dev->name) < 0) { + free_netdev(dev); + return NULL; + } + + privptr = (struct netiucv_priv *)dev->priv; + privptr->fsm = init_fsm("netiucvdev", dev_state_names, + dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, + dev_fsm, DEV_FSM_LEN, GFP_KERNEL); + if (!privptr->fsm) { + free_netdev(dev); + return NULL; + } + privptr->conn = netiucv_new_connection(dev, username); + if (!privptr->conn) { + kfree_fsm(privptr->fsm); + free_netdev(dev); + IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); + return NULL; + } + fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); + + return dev; +} + +static ssize_t +conn_write(struct device_driver *drv, const char *buf, size_t count) +{ + char *p; + char username[10]; + int i, ret; + struct net_device *dev; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + if (count>9) { + PRINT_WARN("netiucv: username too long (%d)!\n", (int)count); + IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); + return -EINVAL; + } + + for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { + if (isalnum(*p) || (*p == '$')) + username[i]= *p; + else if (*p == '\n') { + /* trailing lf, grr */ + break; + } else { + PRINT_WARN("netiucv: Invalid character in username!\n"); + IUCV_DBF_TEXT_(setup, 2, + "conn_write: invalid character %c\n", *p); + return -EINVAL; + } + } + while (i<9) + username[i++] = ' '; + username[9] = '\0'; + dev = netiucv_init_netdevice(username); + if (!dev) { + PRINT_WARN( + "netiucv: Could not allocate network device structure " + "for user '%s'\n", netiucv_printname(username)); + IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); + return -ENODEV; + } + + if ((ret = netiucv_register_device(dev))) { + IUCV_DBF_TEXT_(setup, 2, + "ret %d from netiucv_register_device\n", ret); + goto out_free_ndev; + } + + /* sysfs magic */ + SET_NETDEV_DEV(dev, + (struct device*)((struct netiucv_priv*)dev->priv)->dev); + + if ((ret = register_netdev(dev))) { + netiucv_unregister_device((struct device*) + ((struct netiucv_priv*)dev->priv)->dev); + goto out_free_ndev; + } + + PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username)); + + return count; + +out_free_ndev: + PRINT_WARN("netiucv: Could not register '%s'\n", dev->name); + IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n"); + netiucv_free_netdevice(dev); + return ret; +} + +DRIVER_ATTR(connection, 0200, NULL, conn_write); + +static ssize_t +remove_write (struct device_driver *drv, const char *buf, size_t count) +{ + struct iucv_connection **clist = &iucv_connections; + struct net_device *ndev; + struct netiucv_priv *priv; + struct device *dev; + char name[IFNAMSIZ]; + char *p; + int i; + + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + + if (count >= IFNAMSIZ) + count = IFNAMSIZ-1; + + for (i=0, p=(char *)buf; i<count && *p; i++, p++) { + if ((*p == '\n') | (*p == ' ')) { + /* trailing lf, grr */ + break; + } else { + name[i]=*p; + } + } + name[i] = '\0'; + + while (*clist) { + ndev = (*clist)->netdev; + priv = (struct netiucv_priv*)ndev->priv; + dev = priv->dev; + + if (strncmp(name, ndev->name, count)) { + clist = &((*clist)->next); + continue; + } + if (ndev->flags & (IFF_UP | IFF_RUNNING)) { + PRINT_WARN( + "netiucv: net device %s active with peer %s\n", + ndev->name, priv->conn->userid); + PRINT_WARN("netiucv: %s cannot be removed\n", + ndev->name); + IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); + return -EBUSY; + } + unregister_netdev(ndev); + netiucv_unregister_device(dev); + return count; + } + PRINT_WARN("netiucv: net device %s unknown\n", name); + IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); + return -EINVAL; +} + +DRIVER_ATTR(remove, 0200, NULL, remove_write); + +static void +netiucv_banner(void) +{ + char vbuf[] = "$Revision: 1.63 $"; + char *version = vbuf; + + if ((version = strchr(version, ':'))) { + char *p = strchr(version + 1, '$'); + if (p) + *p = '\0'; + } else + version = " ??? "; + PRINT_INFO("NETIUCV driver Version%s initialized\n", version); +} + +static void __exit +netiucv_exit(void) +{ + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + while (iucv_connections) { + struct net_device *ndev = iucv_connections->netdev; + struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; + struct device *dev = priv->dev; + + unregister_netdev(ndev); + netiucv_unregister_device(dev); + } + + driver_remove_file(&netiucv_driver, &driver_attr_connection); + driver_remove_file(&netiucv_driver, &driver_attr_remove); + driver_unregister(&netiucv_driver); + iucv_unregister_dbf_views(); + + PRINT_INFO("NETIUCV driver unloaded\n"); + return; +} + +static int __init +netiucv_init(void) +{ + int ret; + + ret = iucv_register_dbf_views(); + if (ret) { + PRINT_WARN("netiucv_init failed, " + "iucv_register_dbf_views rc = %d\n", ret); + return ret; + } + IUCV_DBF_TEXT(trace, 3, __FUNCTION__); + ret = driver_register(&netiucv_driver); + if (ret) { + PRINT_ERR("NETIUCV: failed to register driver.\n"); + IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret); + iucv_unregister_dbf_views(); + return ret; + } + + /* Add entry for specifying connections. */ + ret = driver_create_file(&netiucv_driver, &driver_attr_connection); + if (!ret) { + ret = driver_create_file(&netiucv_driver, &driver_attr_remove); + netiucv_banner(); + } else { + PRINT_ERR("NETIUCV: failed to add driver attribute.\n"); + IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret); + driver_unregister(&netiucv_driver); + iucv_unregister_dbf_views(); + } + return ret; +} + +module_init(netiucv_init); +module_exit(netiucv_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h new file mode 100644 index 000000000000..a341041a6cf7 --- /dev/null +++ b/drivers/s390/net/qeth.h @@ -0,0 +1,1162 @@ +#ifndef __QETH_H__ +#define __QETH_H__ + +#include <linux/if.h> +#include <linux/if_arp.h> + +#include <linux/if_tr.h> +#include <linux/trdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> + +#include <net/ipv6.h> +#include <linux/in6.h> +#include <net/if_inet6.h> +#include <net/addrconf.h> + + +#include <linux/bitops.h> + +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> + +#include "qeth_mpc.h" + +#define VERSION_QETH_H "$Revision: 1.135 $" + +#ifdef CONFIG_QETH_IPV6 +#define QETH_VERSION_IPV6 ":IPv6" +#else +#define QETH_VERSION_IPV6 "" +#endif +#ifdef CONFIG_QETH_VLAN +#define QETH_VERSION_VLAN ":VLAN" +#else +#define QETH_VERSION_VLAN "" +#endif + +/** + * Debug Facility stuff + */ +#define QETH_DBF_SETUP_NAME "qeth_setup" +#define QETH_DBF_SETUP_LEN 8 +#define QETH_DBF_SETUP_INDEX 3 +#define QETH_DBF_SETUP_NR_AREAS 1 +#define QETH_DBF_SETUP_LEVEL 5 + +#define QETH_DBF_MISC_NAME "qeth_misc" +#define QETH_DBF_MISC_LEN 128 +#define QETH_DBF_MISC_INDEX 1 +#define QETH_DBF_MISC_NR_AREAS 1 +#define QETH_DBF_MISC_LEVEL 2 + +#define QETH_DBF_DATA_NAME "qeth_data" +#define QETH_DBF_DATA_LEN 96 +#define QETH_DBF_DATA_INDEX 3 +#define QETH_DBF_DATA_NR_AREAS 1 +#define QETH_DBF_DATA_LEVEL 2 + +#define QETH_DBF_CONTROL_NAME "qeth_control" +#define QETH_DBF_CONTROL_LEN 256 +#define QETH_DBF_CONTROL_INDEX 3 +#define QETH_DBF_CONTROL_NR_AREAS 2 +#define QETH_DBF_CONTROL_LEVEL 5 + +#define QETH_DBF_TRACE_NAME "qeth_trace" +#define QETH_DBF_TRACE_LEN 8 +#define QETH_DBF_TRACE_INDEX 2 +#define QETH_DBF_TRACE_NR_AREAS 2 +#define QETH_DBF_TRACE_LEVEL 3 +extern debug_info_t *qeth_dbf_trace; + +#define QETH_DBF_SENSE_NAME "qeth_sense" +#define QETH_DBF_SENSE_LEN 64 +#define QETH_DBF_SENSE_INDEX 1 +#define QETH_DBF_SENSE_NR_AREAS 1 +#define QETH_DBF_SENSE_LEVEL 2 + +#define QETH_DBF_QERR_NAME "qeth_qerr" +#define QETH_DBF_QERR_LEN 8 +#define QETH_DBF_QERR_INDEX 1 +#define QETH_DBF_QERR_NR_AREAS 2 +#define QETH_DBF_QERR_LEVEL 2 + +#define QETH_DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(qeth_dbf_##name,level,text); \ + } while (0) + +#define QETH_DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(qeth_dbf_##name,level,(void*)(addr),len); \ + } while (0) + +DECLARE_PER_CPU(char[256], qeth_dbf_txt_buf); + +#define QETH_DBF_TEXT_(name,level,text...) \ + do { \ + char* dbf_txt_buf = get_cpu_var(qeth_dbf_txt_buf); \ + sprintf(dbf_txt_buf, text); \ + debug_text_event(qeth_dbf_##name,level,dbf_txt_buf); \ + put_cpu_var(qeth_dbf_txt_buf); \ + } while (0) + +#define QETH_DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(qeth_dbf_trace, level, ##text ); \ + debug_sprintf_event(qeth_dbf_trace, level, text ); \ + } while (0) + +/** + * some more debug stuff + */ +#define PRINTK_HEADER "qeth: " + +#define HEXDUMP16(importance,header,ptr) \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ + *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ + *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ + *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ + *(((char*)ptr)+12),*(((char*)ptr)+13), \ + *(((char*)ptr)+14),*(((char*)ptr)+15)); \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)+16),*(((char*)ptr)+17), \ + *(((char*)ptr)+18),*(((char*)ptr)+19), \ + *(((char*)ptr)+20),*(((char*)ptr)+21), \ + *(((char*)ptr)+22),*(((char*)ptr)+23), \ + *(((char*)ptr)+24),*(((char*)ptr)+25), \ + *(((char*)ptr)+26),*(((char*)ptr)+27), \ + *(((char*)ptr)+28),*(((char*)ptr)+29), \ + *(((char*)ptr)+30),*(((char*)ptr)+31)); + +static inline void +qeth_hex_dump(unsigned char *buf, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + printk("\n"); + printk("%02x ", *(buf + i)); + } + printk("\n"); +} + +#define SENSE_COMMAND_REJECT_BYTE 0 +#define SENSE_COMMAND_REJECT_FLAG 0x80 +#define SENSE_RESETTING_EVENT_BYTE 1 +#define SENSE_RESETTING_EVENT_FLAG 0x80 + +#define atomic_swap(a,b) xchg((int *)a.counter, b) + +/* + * Common IO related definitions + */ +extern struct device *qeth_root_dev; +extern struct ccw_driver qeth_ccw_driver; +extern struct ccwgroup_driver qeth_ccwgroup_driver; + +#define CARD_RDEV(card) card->read.ccwdev +#define CARD_WDEV(card) card->write.ccwdev +#define CARD_DDEV(card) card->data.ccwdev +#define CARD_BUS_ID(card) card->gdev->dev.bus_id +#define CARD_RDEV_ID(card) card->read.ccwdev->dev.bus_id +#define CARD_WDEV_ID(card) card->write.ccwdev->dev.bus_id +#define CARD_DDEV_ID(card) card->data.ccwdev->dev.bus_id +#define CHANNEL_ID(channel) channel->ccwdev->dev.bus_id + +#define CARD_FROM_CDEV(cdev) (struct qeth_card *) \ + ((struct ccwgroup_device *)cdev->dev.driver_data)\ + ->dev.driver_data; + +/** + * card stuff + */ +#ifdef CONFIG_QETH_PERF_STATS +struct qeth_perf_stats { + unsigned int bufs_rec; + unsigned int bufs_sent; + + unsigned int skbs_sent_pack; + unsigned int bufs_sent_pack; + + unsigned int sc_dp_p; + unsigned int sc_p_dp; + /* qdio_input_handler: number of times called, time spent in */ + __u64 inbound_start_time; + unsigned int inbound_cnt; + unsigned int inbound_time; + /* qeth_send_packet: number of times called, time spent in */ + __u64 outbound_start_time; + unsigned int outbound_cnt; + unsigned int outbound_time; + /* qdio_output_handler: number of times called, time spent in */ + __u64 outbound_handler_start_time; + unsigned int outbound_handler_cnt; + unsigned int outbound_handler_time; + /* number of calls to and time spent in do_QDIO for inbound queue */ + __u64 inbound_do_qdio_start_time; + unsigned int inbound_do_qdio_cnt; + unsigned int inbound_do_qdio_time; + /* number of calls to and time spent in do_QDIO for outbound queues */ + __u64 outbound_do_qdio_start_time; + unsigned int outbound_do_qdio_cnt; + unsigned int outbound_do_qdio_time; + /* eddp data */ + unsigned int large_send_bytes; + unsigned int large_send_cnt; + unsigned int sg_skbs_sent; + unsigned int sg_frags_sent; +}; +#endif /* CONFIG_QETH_PERF_STATS */ + +/* Routing stuff */ +struct qeth_routing_info { + enum qeth_routing_types type; +}; + +/* IPA stuff */ +struct qeth_ipa_info { + __u32 supported_funcs; + __u32 enabled_funcs; +}; + +static inline int +qeth_is_ipa_supported(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) +{ + return (ipa->supported_funcs & func); +} + +static inline int +qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) +{ + return (ipa->supported_funcs & ipa->enabled_funcs & func); +} + +#define qeth_adp_supported(c,f) \ + qeth_is_ipa_supported(&c->options.adp, f) +#define qeth_adp_enabled(c,f) \ + qeth_is_ipa_enabled(&c->options.adp, f) +#define qeth_is_supported(c,f) \ + qeth_is_ipa_supported(&c->options.ipa4, f) +#define qeth_is_enabled(c,f) \ + qeth_is_ipa_enabled(&c->options.ipa4, f) +#ifdef CONFIG_QETH_IPV6 +#define qeth_is_supported6(c,f) \ + qeth_is_ipa_supported(&c->options.ipa6, f) +#define qeth_is_enabled6(c,f) \ + qeth_is_ipa_enabled(&c->options.ipa6, f) +#else /* CONFIG_QETH_IPV6 */ +#define qeth_is_supported6(c,f) 0 +#define qeth_is_enabled6(c,f) 0 +#endif /* CONFIG_QETH_IPV6 */ +#define qeth_is_ipafunc_supported(c,prot,f) \ + (prot==QETH_PROT_IPV6)? qeth_is_supported6(c,f):qeth_is_supported(c,f) +#define qeth_is_ipafunc_enabled(c,prot,f) \ + (prot==QETH_PROT_IPV6)? qeth_is_enabled6(c,f):qeth_is_enabled(c,f) + + +#define QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT 0x0101 +#define QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT 0x0101 +#define QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT 0x4108 +#define QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT 0x5108 + +#define QETH_MODELLIST_ARRAY \ + {{0x1731,0x01,0x1732,0x01,QETH_CARD_TYPE_OSAE,1, \ + QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ + QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ + QETH_MAX_QUEUES,0}, \ + {0x1731,0x05,0x1732,0x05,QETH_CARD_TYPE_IQD,0, \ + QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \ + QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \ + QETH_MAX_QUEUES,0x103}, \ + {0,0,0,0,0,0,0,0,0}} + +#define QETH_REAL_CARD 1 +#define QETH_VLAN_CARD 2 +#define QETH_BUFSIZE 4096 + +/** + * some more defs + */ +#define IF_NAME_LEN 16 +#define QETH_TX_TIMEOUT 100 * HZ +#define QETH_HEADER_SIZE 32 +#define MAX_PORTNO 15 +#define QETH_FAKE_LL_LEN ETH_HLEN +#define QETH_FAKE_LL_V6_ADDR_POS 24 + +/*IPv6 address autoconfiguration stuff*/ +#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe +#define UNIQUE_ID_NOT_BY_CARD 0x10000 + +/*****************************************************************************/ +/* QDIO queue and buffer handling */ +/*****************************************************************************/ +#define QETH_MAX_QUEUES 4 +#define QETH_IN_BUF_SIZE_DEFAULT 65536 +#define QETH_IN_BUF_COUNT_DEFAULT 16 +#define QETH_IN_BUF_COUNT_MIN 8 +#define QETH_IN_BUF_COUNT_MAX 128 +#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) +#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ + ((card)->qdio.in_buf_pool.buf_count / 2) + +/* buffers we have to be behind before we get a PCI */ +#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) +/*enqueued free buffers left before we get a PCI*/ +#define QETH_PCI_THRESHOLD_B(card) 0 +/*not used unless the microcode gets patched*/ +#define QETH_PCI_TIMER_VALUE(card) 3 + +#define QETH_MIN_INPUT_THRESHOLD 1 +#define QETH_MAX_INPUT_THRESHOLD 500 +#define QETH_MIN_OUTPUT_THRESHOLD 1 +#define QETH_MAX_OUTPUT_THRESHOLD 300 + +/* priority queing */ +#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING +#define QETH_DEFAULT_QUEUE 2 +#define QETH_NO_PRIO_QUEUEING 0 +#define QETH_PRIO_Q_ING_PREC 1 +#define QETH_PRIO_Q_ING_TOS 2 +#define IP_TOS_LOWDELAY 0x10 +#define IP_TOS_HIGHTHROUGHPUT 0x08 +#define IP_TOS_HIGHRELIABILITY 0x04 +#define IP_TOS_NOTIMPORTANT 0x02 + +/* Packing */ +#define QETH_LOW_WATERMARK_PACK 2 +#define QETH_HIGH_WATERMARK_PACK 5 +#define QETH_WATERMARK_PACK_FUZZ 1 + +#define QETH_IP_HEADER_SIZE 40 + +struct qeth_hdr_layer3 { + __u8 id; + __u8 flags; + __u16 inbound_checksum; /*TSO:__u16 seqno */ + __u32 token; /*TSO: __u32 reserved */ + __u16 length; + __u8 vlan_prio; + __u8 ext_flags; + __u16 vlan_id; + __u16 frame_offset; + __u8 dest_addr[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_layer2 { + __u8 id; + __u8 flags[3]; + __u8 port_no; + __u8 hdr_length; + __u16 pkt_length; + __u16 seq_no; + __u16 vlan_id; + __u32 reserved; + __u8 reserved2[16]; +} __attribute__ ((packed)); + +struct qeth_hdr { + union { + struct qeth_hdr_layer2 l2; + struct qeth_hdr_layer3 l3; + } hdr; +} __attribute__ ((packed)); + + +/* flags for qeth_hdr.flags */ +#define QETH_HDR_PASSTHRU 0x10 +#define QETH_HDR_IPV6 0x80 +#define QETH_HDR_CAST_MASK 0x07 +enum qeth_cast_flags { + QETH_CAST_UNICAST = 0x06, + QETH_CAST_MULTICAST = 0x04, + QETH_CAST_BROADCAST = 0x05, + QETH_CAST_ANYCAST = 0x07, + QETH_CAST_NOCAST = 0x00, +}; + +enum qeth_layer2_frame_flags { + QETH_LAYER2_FLAG_MULTICAST = 0x01, + QETH_LAYER2_FLAG_BROADCAST = 0x02, + QETH_LAYER2_FLAG_UNICAST = 0x04, + QETH_LAYER2_FLAG_VLAN = 0x10, +}; + +enum qeth_header_ids { + QETH_HEADER_TYPE_LAYER3 = 0x01, + QETH_HEADER_TYPE_LAYER2 = 0x02, + QETH_HEADER_TYPE_TSO = 0x03, +}; +/* flags for qeth_hdr.ext_flags */ +#define QETH_HDR_EXT_VLAN_FRAME 0x01 +#define QETH_HDR_EXT_TOKEN_ID 0x02 +#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04 +#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08 +#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10 +#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 +#define QETH_HDR_EXT_UDP_TSO 0x40 /*bit off for TCP*/ + +static inline int +qeth_is_last_sbale(struct qdio_buffer_element *sbale) +{ + return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); +} + +enum qeth_qdio_buffer_states { + /* + * inbound: read out by driver; owned by hardware in order to be filled + * outbound: owned by driver in order to be filled + */ + QETH_QDIO_BUF_EMPTY, + /* + * inbound: filled by hardware; owned by driver in order to be read out + * outbound: filled by driver; owned by hardware in order to be sent + */ + QETH_QDIO_BUF_PRIMED, +}; + +enum qeth_qdio_info_states { + QETH_QDIO_UNINITIALIZED, + QETH_QDIO_ALLOCATED, + QETH_QDIO_ESTABLISHED, +}; + +struct qeth_buffer_pool_entry { + struct list_head list; + struct list_head init_list; + void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; +}; + +struct qeth_qdio_buffer_pool { + struct list_head entry_list; + int buf_count; +}; + +struct qeth_qdio_buffer { + struct qdio_buffer *buffer; + volatile enum qeth_qdio_buffer_states state; + /* the buffer pool entry currently associated to this buffer */ + struct qeth_buffer_pool_entry *pool_entry; +}; + +struct qeth_qdio_q { + struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + /* + * buf_to_init means "buffer must be initialized by driver and must + * be made available for hardware" -> state is set to EMPTY + */ + volatile int next_buf_to_init; +} __attribute__ ((aligned(256))); + +/* possible types of qeth large_send support */ +enum qeth_large_send_types { + QETH_LARGE_SEND_NO, + QETH_LARGE_SEND_EDDP, + QETH_LARGE_SEND_TSO, +}; + +struct qeth_qdio_out_buffer { + struct qdio_buffer *buffer; + atomic_t state; + volatile int next_element_to_fill; + struct sk_buff_head skb_list; + struct list_head ctx_list; +}; + +struct qeth_card; + +enum qeth_out_q_states { + QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED, + QETH_OUT_Q_LOCKED_FLUSH, +}; + +struct qeth_qdio_out_q { + struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_out_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + int queue_no; + struct qeth_card *card; + atomic_t state; + volatile int do_pack; + /* + * index of buffer to be filled by driver; state EMPTY or PACKING + */ + volatile int next_buf_to_fill; + /* + * number of buffers that are currently filled (PRIMED) + * -> these buffers are hardware-owned + */ + atomic_t used_buffers; + /* indicates whether PCI flag must be set (or if one is outstanding) */ + atomic_t set_pci_flags_count; +} __attribute__ ((aligned(256))); + +struct qeth_qdio_info { + volatile enum qeth_qdio_info_states state; + /* input */ + struct qeth_qdio_q *in_q; + struct qeth_qdio_buffer_pool in_buf_pool; + struct qeth_qdio_buffer_pool init_pool; + int in_buf_size; + + /* output */ + int no_out_queues; + struct qeth_qdio_out_q **out_qs; + + /* priority queueing */ + int do_prio_queueing; + int default_out_queue; +}; + +enum qeth_send_errors { + QETH_SEND_ERROR_NONE, + QETH_SEND_ERROR_LINK_FAILURE, + QETH_SEND_ERROR_RETRY, + QETH_SEND_ERROR_KICK_IT, +}; + +#define QETH_ETH_MAC_V4 0x0100 /* like v4 */ +#define QETH_ETH_MAC_V6 0x3333 /* like v6 */ +/* tr mc mac is longer, but that will be enough to detect mc frames */ +#define QETH_TR_MAC_NC 0xc000 /* non-canonical */ +#define QETH_TR_MAC_C 0x0300 /* canonical */ + +#define DEFAULT_ADD_HHLEN 0 +#define MAX_ADD_HHLEN 1024 + +/** + * buffer stuff for read channel + */ +#define QETH_CMD_BUFFER_NO 8 + +/** + * channel state machine + */ +enum qeth_channel_states { + CH_STATE_UP, + CH_STATE_DOWN, + CH_STATE_ACTIVATING, + CH_STATE_HALTED, + CH_STATE_STOPPED, +}; +/** + * card state machine + */ +enum qeth_card_states { + CARD_STATE_DOWN, + CARD_STATE_HARDSETUP, + CARD_STATE_SOFTSETUP, + CARD_STATE_UP, + CARD_STATE_RECOVER, +}; + +/** + * Protocol versions + */ +enum qeth_prot_versions { + QETH_PROT_SNA = 0x0001, + QETH_PROT_IPV4 = 0x0004, + QETH_PROT_IPV6 = 0x0006, +}; + +enum qeth_ip_types { + QETH_IP_TYPE_NORMAL, + QETH_IP_TYPE_VIPA, + QETH_IP_TYPE_RXIP, + QETH_IP_TYPE_DEL_ALL_MC, +}; + +enum qeth_cmd_buffer_state { + BUF_STATE_FREE, + BUF_STATE_LOCKED, + BUF_STATE_PROCESSED, +}; +/** + * IP address and multicast list + */ +struct qeth_ipaddr { + struct list_head entry; + enum qeth_ip_types type; + enum qeth_ipa_setdelip_flags set_flags; + enum qeth_ipa_setdelip_flags del_flags; + int is_multicast; + volatile int users; + enum qeth_prot_versions proto; + unsigned char mac[OSA_ADDR_LEN]; + union { + struct { + unsigned int addr; + unsigned int mask; + } a4; + struct { + struct in6_addr addr; + unsigned int pfxlen; + } a6; + } u; +}; + +struct qeth_ipato_entry { + struct list_head entry; + enum qeth_prot_versions proto; + char addr[16]; + int mask_bits; +}; + +struct qeth_ipato { + int enabled; + int invert4; + int invert6; + struct list_head entries; +}; + +struct qeth_channel; + +struct qeth_cmd_buffer { + enum qeth_cmd_buffer_state state; + struct qeth_channel *channel; + unsigned char *data; + int rc; + void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); +}; + + +/** + * definition of a qeth channel, used for read and write + */ +struct qeth_channel { + enum qeth_channel_states state; + struct ccw1 ccw; + spinlock_t iob_lock; + wait_queue_head_t wait_q; + struct tasklet_struct irq_tasklet; + struct ccw_device *ccwdev; +/*command buffer for control data*/ + struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; + atomic_t irq_pending; + volatile int io_buf_no; + volatile int buf_no; +}; + +/** + * OSA card related definitions + */ +struct qeth_token { + __u32 issuer_rm_w; + __u32 issuer_rm_r; + __u32 cm_filter_w; + __u32 cm_filter_r; + __u32 cm_connection_w; + __u32 cm_connection_r; + __u32 ulp_filter_w; + __u32 ulp_filter_r; + __u32 ulp_connection_w; + __u32 ulp_connection_r; +}; + +struct qeth_seqno { + __u32 trans_hdr; + __u32 pdu_hdr; + __u32 pdu_hdr_ack; + __u16 ipa; +}; + +struct qeth_reply { + struct list_head list; + wait_queue_head_t wait_q; + int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); + u32 seqno; + unsigned long offset; + int received; + int rc; + void *param; + struct qeth_card *card; + atomic_t refcnt; +}; + +#define QETH_BROADCAST_WITH_ECHO 1 +#define QETH_BROADCAST_WITHOUT_ECHO 2 + +struct qeth_card_blkt { + int time_total; + int inter_packet; + int inter_packet_jumbo; +}; + + + +struct qeth_card_info { + unsigned short unit_addr2; + unsigned short cula; + unsigned short chpid; + __u16 func_level; + char mcl_level[QETH_MCL_LENGTH + 1]; + int guestlan; + int layer2_mac_registered; + int portname_required; + int portno; + char portname[9]; + enum qeth_card_types type; + enum qeth_link_types link_type; + int is_multicast_different; + int initial_mtu; + int max_mtu; + int broadcast_capable; + int unique_id; + struct qeth_card_blkt blkt; + __u32 csum_mask; +}; + +struct qeth_card_options { + struct qeth_routing_info route4; + struct qeth_ipa_info ipa4; + struct qeth_ipa_info adp; /*Adapter parameters*/ +#ifdef CONFIG_QETH_IPV6 + struct qeth_routing_info route6; + struct qeth_ipa_info ipa6; +#endif /* QETH_IPV6 */ + enum qeth_checksum_types checksum_type; + int broadcast_mode; + int macaddr_mode; + int fake_broadcast; + int add_hhlen; + int fake_ll; + int layer2; + enum qeth_large_send_types large_send; +}; + +/* + * thread bits for qeth_card thread masks + */ +enum qeth_threads { + QETH_SET_IP_THREAD = 1, + QETH_RECOVER_THREAD = 2, +}; + +struct qeth_card { + struct list_head list; + enum qeth_card_states state; + int lan_online; + spinlock_t lock; +/*hardware and sysfs stuff*/ + struct ccwgroup_device *gdev; + struct qeth_channel read; + struct qeth_channel write; + struct qeth_channel data; + + struct net_device *dev; + struct net_device_stats stats; + + struct qeth_card_info info; + struct qeth_token token; + struct qeth_seqno seqno; + struct qeth_card_options options; + + wait_queue_head_t wait_q; +#ifdef CONFIG_QETH_VLAN + spinlock_t vlanlock; + struct vlan_group *vlangrp; +#endif + struct work_struct kernel_thread_starter; + spinlock_t thread_mask_lock; + volatile unsigned long thread_start_mask; + volatile unsigned long thread_allowed_mask; + volatile unsigned long thread_running_mask; + spinlock_t ip_lock; + struct list_head ip_list; + struct list_head *ip_tbd_list; + struct qeth_ipato ipato; + struct list_head cmd_waiter_list; + /* QDIO buffer handling */ + struct qeth_qdio_info qdio; +#ifdef CONFIG_QETH_PERF_STATS + struct qeth_perf_stats perf_stats; +#endif /* CONFIG_QETH_PERF_STATS */ + int use_hard_stop; + int (*orig_hard_header)(struct sk_buff *,struct net_device *, + unsigned short,void *,void *,unsigned); +}; + +struct qeth_card_list_struct { + struct list_head list; + rwlock_t rwlock; +}; + +extern struct qeth_card_list_struct qeth_card_list; + +/*notifier list */ +struct qeth_notify_list_struct { + struct list_head list; + struct task_struct *task; + int signum; +}; +extern spinlock_t qeth_notify_lock; +extern struct list_head qeth_notify_list; + +/*some helper functions*/ + +#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") + +inline static __u8 +qeth_get_ipa_adp_type(enum qeth_link_types link_type) +{ + switch (link_type) { + case QETH_LINK_TYPE_HSTR: + return 2; + default: + return 1; + } +} + +inline static int +qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size) +{ + struct sk_buff *new_skb = NULL; + + if (skb_headroom(*skb) < size){ + new_skb = skb_realloc_headroom(*skb, size); + if (!new_skb) { + PRINT_ERR("qeth_prepare_skb: could " + "not realloc headroom for qeth_hdr " + "on interface %s", QETH_CARD_IFNAME(card)); + return -ENOMEM; + } + *skb = new_skb; + } + return 0; +} +static inline struct sk_buff * +qeth_pskb_unshare(struct sk_buff *skb, int pri) +{ + struct sk_buff *nskb; + if (!skb_cloned(skb)) + return skb; + nskb = skb_copy(skb, pri); + kfree_skb(skb); /* free our shared copy */ + return nskb; +} + + +inline static void * +qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size) +{ + void *hdr; + + hdr = (void *) skb_push(*skb, size); + /* + * sanity check, the Linux memory allocation scheme should + * never present us cases like this one (the qdio header size plus + * the first 40 bytes of the paket cross a 4k boundary) + */ + if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) != + (((unsigned long) hdr + size + + QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) { + PRINT_ERR("qeth_prepare_skb: misaligned " + "packet on interface %s. Discarded.", + QETH_CARD_IFNAME(card)); + return NULL; + } + return hdr; +} + +inline static int +qeth_get_hlen(__u8 link_type) +{ +#ifdef CONFIG_QETH_IPV6 + switch (link_type) { + case QETH_LINK_TYPE_HSTR: + case QETH_LINK_TYPE_LANE_TR: + return sizeof(struct qeth_hdr) + TR_HLEN; + default: +#ifdef CONFIG_QETH_VLAN + return sizeof(struct qeth_hdr) + VLAN_ETH_HLEN; +#else + return sizeof(struct qeth_hdr) + ETH_HLEN; +#endif + } +#else /* CONFIG_QETH_IPV6 */ +#ifdef CONFIG_QETH_VLAN + return sizeof(struct qeth_hdr) + VLAN_HLEN; +#else + return sizeof(struct qeth_hdr); +#endif +#endif /* CONFIG_QETH_IPV6 */ +} + +inline static unsigned short +qeth_get_netdev_flags(struct qeth_card *card) +{ + if (card->options.layer2) + return 0; + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + return IFF_NOARP; +#ifdef CONFIG_QETH_IPV6 + default: + return 0; +#else + default: + return IFF_NOARP; +#endif + } +} + +inline static int +qeth_get_initial_mtu_for_card(struct qeth_card * card) +{ + switch (card->info.type) { + case QETH_CARD_TYPE_UNKNOWN: + return 1500; + case QETH_CARD_TYPE_IQD: + return card->info.max_mtu; + case QETH_CARD_TYPE_OSAE: + switch (card->info.link_type) { + case QETH_LINK_TYPE_HSTR: + case QETH_LINK_TYPE_LANE_TR: + return 2000; + default: + return 1492; + } + default: + return 1500; + } +} + +inline static int +qeth_get_max_mtu_for_card(int cardtype) +{ + switch (cardtype) { + case QETH_CARD_TYPE_UNKNOWN: + return 61440; + case QETH_CARD_TYPE_OSAE: + return 61440; + case QETH_CARD_TYPE_IQD: + return 57344; + default: + return 1500; + } +} + +inline static int +qeth_get_mtu_out_of_mpc(int cardtype) +{ + switch (cardtype) { + case QETH_CARD_TYPE_IQD: + return 1; + default: + return 0; + } +} + +inline static int +qeth_get_mtu_outof_framesize(int framesize) +{ + switch (framesize) { + case 0x4000: + return 8192; + case 0x6000: + return 16384; + case 0xa000: + return 32768; + case 0xffff: + return 57344; + default: + return 0; + } +} + +inline static int +qeth_mtu_is_valid(struct qeth_card * card, int mtu) +{ + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + return ((mtu >= 576) && (mtu <= 61440)); + case QETH_CARD_TYPE_IQD: + return ((mtu >= 576) && + (mtu <= card->info.max_mtu + 4096 - 32)); + case QETH_CARD_TYPE_UNKNOWN: + default: + return 1; + } +} + +inline static int +qeth_get_arphdr_type(int cardtype, int linktype) +{ + switch (cardtype) { + case QETH_CARD_TYPE_OSAE: + switch (linktype) { + case QETH_LINK_TYPE_LANE_TR: + case QETH_LINK_TYPE_HSTR: + return ARPHRD_IEEE802_TR; + default: + return ARPHRD_ETHER; + } + case QETH_CARD_TYPE_IQD: + default: + return ARPHRD_ETHER; + } +} + +#ifdef CONFIG_QETH_PERF_STATS +inline static int +qeth_get_micros(void) +{ + return (int) (get_clock() >> 12); +} +#endif + +static inline int +qeth_get_qdio_q_format(struct qeth_card *card) +{ + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + return 2; + default: + return 0; + } +} + +static inline void +qeth_ipaddr4_to_string(const __u8 *addr, char *buf) +{ + sprintf(buf, "%i.%i.%i.%i", addr[0], addr[1], addr[2], addr[3]); +} + +static inline int +qeth_string_to_ipaddr4(const char *buf, __u8 *addr) +{ + const char *start, *end; + char abuf[4]; + char *tmp; + int len; + int i; + + start = buf; + for (i = 0; i < 3; i++) { + if (!(end = strchr(start, '.'))) + return -EINVAL; + len = end - start; + memset(abuf, 0, 4); + strncpy(abuf, start, len); + addr[i] = simple_strtoul(abuf, &tmp, 10); + start = end + 1; + } + memset(abuf, 0, 4); + strcpy(abuf, start); + addr[3] = simple_strtoul(abuf, &tmp, 10); + return 0; +} + +static inline void +qeth_ipaddr6_to_string(const __u8 *addr, char *buf) +{ + sprintf(buf, "%02x%02x:%02x%02x:%02x%02x:%02x%02x" + ":%02x%02x:%02x%02x:%02x%02x:%02x%02x", + addr[0], addr[1], addr[2], addr[3], + addr[4], addr[5], addr[6], addr[7], + addr[8], addr[9], addr[10], addr[11], + addr[12], addr[13], addr[14], addr[15]); +} + +static inline int +qeth_string_to_ipaddr6(const char *buf, __u8 *addr) +{ + const char *start, *end; + u16 *tmp_addr; + char abuf[5]; + char *tmp; + int len; + int i; + + tmp_addr = (u16 *)addr; + start = buf; + for (i = 0; i < 7; i++) { + if (!(end = strchr(start, ':'))) + return -EINVAL; + len = end - start; + memset(abuf, 0, 5); + strncpy(abuf, start, len); + tmp_addr[i] = simple_strtoul(abuf, &tmp, 16); + start = end + 1; + } + memset(abuf, 0, 5); + strcpy(abuf, start); + tmp_addr[7] = simple_strtoul(abuf, &tmp, 16); + return 0; +} + +static inline void +qeth_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, + char *buf) +{ + if (proto == QETH_PROT_IPV4) + return qeth_ipaddr4_to_string(addr, buf); + else if (proto == QETH_PROT_IPV6) + return qeth_ipaddr6_to_string(addr, buf); +} + +static inline int +qeth_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto, + __u8 *addr) +{ + if (proto == QETH_PROT_IPV4) + return qeth_string_to_ipaddr4(buf, addr); + else if (proto == QETH_PROT_IPV6) + return qeth_string_to_ipaddr6(buf, addr); + else + return -EINVAL; +} + +extern int +qeth_setrouting_v4(struct qeth_card *); +extern int +qeth_setrouting_v6(struct qeth_card *); + +extern int +qeth_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *); + +extern void +qeth_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions, u8 *, int); + +extern int +qeth_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); + +extern void +qeth_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); + +extern int +qeth_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); + +extern void +qeth_del_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); + +extern int +qeth_notifier_register(struct task_struct *, int ); + +extern int +qeth_notifier_unregister(struct task_struct * ); + +extern void +qeth_schedule_recovery(struct qeth_card *); + +extern int +qeth_realloc_buffer_pool(struct qeth_card *, int); + +extern int +qeth_set_large_send(struct qeth_card *); + +extern void +qeth_fill_header(struct qeth_card *, struct qeth_hdr *, + struct sk_buff *, int, int); +extern void +qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int); + +#endif /* __QETH_H__ */ diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c new file mode 100644 index 000000000000..7ee1c06ed68a --- /dev/null +++ b/drivers/s390/net/qeth_eddp.c @@ -0,0 +1,643 @@ +/* + * + * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $) + * + * Enhanced Device Driver Packing (EDDP) support for the qeth driver. + * + * Copyright 2004 IBM Corporation + * + * Author(s): Thomas Spatzier <tspat@de.ibm.com> + * + * $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $ + * + */ +#include <linux/config.h> +#include <linux/errno.h> +#include <linux/ip.h> +#include <linux/inetdevice.h> +#include <linux/netdevice.h> +#include <linux/kernel.h> +#include <linux/tcp.h> +#include <net/tcp.h> +#include <linux/skbuff.h> + +#include <net/ip.h> + +#include "qeth.h" +#include "qeth_mpc.h" +#include "qeth_eddp.h" + +int +qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, + struct qeth_eddp_context *ctx) +{ + int index = queue->next_buf_to_fill; + int elements_needed = ctx->num_elements; + int elements_in_buffer; + int skbs_in_buffer; + int buffers_needed = 0; + + QETH_DBF_TEXT(trace, 5, "eddpcbfc"); + while(elements_needed > 0) { + buffers_needed++; + if (atomic_read(&queue->bufs[index].state) != + QETH_QDIO_BUF_EMPTY) + return -EBUSY; + + elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) - + queue->bufs[index].next_element_to_fill; + skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb; + elements_needed -= skbs_in_buffer * ctx->elements_per_skb; + index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; + } + return buffers_needed; +} + +static inline void +qeth_eddp_free_context(struct qeth_eddp_context *ctx) +{ + int i; + + QETH_DBF_TEXT(trace, 5, "eddpfctx"); + for (i = 0; i < ctx->num_pages; ++i) + free_page((unsigned long)ctx->pages[i]); + kfree(ctx->pages); + if (ctx->elements != NULL) + kfree(ctx->elements); + kfree(ctx); +} + + +static inline void +qeth_eddp_get_context(struct qeth_eddp_context *ctx) +{ + atomic_inc(&ctx->refcnt); +} + +void +qeth_eddp_put_context(struct qeth_eddp_context *ctx) +{ + if (atomic_dec_return(&ctx->refcnt) == 0) + qeth_eddp_free_context(ctx); +} + +void +qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) +{ + struct qeth_eddp_context_reference *ref; + + QETH_DBF_TEXT(trace, 6, "eddprctx"); + while (!list_empty(&buf->ctx_list)){ + ref = list_entry(buf->ctx_list.next, + struct qeth_eddp_context_reference, list); + qeth_eddp_put_context(ref->ctx); + list_del(&ref->list); + kfree(ref); + } +} + +static inline int +qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, + struct qeth_eddp_context *ctx) +{ + struct qeth_eddp_context_reference *ref; + + QETH_DBF_TEXT(trace, 6, "eddprfcx"); + ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC); + if (ref == NULL) + return -ENOMEM; + qeth_eddp_get_context(ctx); + ref->ctx = ctx; + list_add_tail(&ref->list, &buf->ctx_list); + return 0; +} + +int +qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, + struct qeth_eddp_context *ctx, + int index) +{ + struct qeth_qdio_out_buffer *buf = NULL; + struct qdio_buffer *buffer; + int elements = ctx->num_elements; + int element = 0; + int flush_cnt = 0; + int must_refcnt = 1; + int i; + + QETH_DBF_TEXT(trace, 5, "eddpfibu"); + while (elements > 0) { + buf = &queue->bufs[index]; + if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){ + /* normally this should not happen since we checked for + * available elements in qeth_check_elements_for_context + */ + if (element == 0) + return -EBUSY; + else { + PRINT_WARN("could only partially fill eddp " + "buffer!\n"); + goto out; + } + } + /* check if the whole next skb fits into current buffer */ + if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) - + buf->next_element_to_fill) + < ctx->elements_per_skb){ + /* no -> go to next buffer */ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; + flush_cnt++; + /* new buffer, so we have to add ctx to buffer'ctx_list + * and increment ctx's refcnt */ + must_refcnt = 1; + continue; + } + if (must_refcnt){ + must_refcnt = 0; + if (qeth_eddp_buf_ref_context(buf, ctx)){ + PRINT_WARN("no memory to create eddp context " + "reference\n"); + goto out_check; + } + } + buffer = buf->buffer; + /* fill one skb into buffer */ + for (i = 0; i < ctx->elements_per_skb; ++i){ + buffer->element[buf->next_element_to_fill].addr = + ctx->elements[element].addr; + buffer->element[buf->next_element_to_fill].length = + ctx->elements[element].length; + buffer->element[buf->next_element_to_fill].flags = + ctx->elements[element].flags; + buf->next_element_to_fill++; + element++; + elements--; + } + } +out_check: + if (!queue->do_pack) { + QETH_DBF_TEXT(trace, 6, "fillbfnp"); + /* set state to PRIMED -> will be flushed */ + if (buf->next_element_to_fill > 0){ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + flush_cnt++; + } + } else { +#ifdef CONFIG_QETH_PERF_STATS + queue->card->perf_stats.skbs_sent_pack++; +#endif + QETH_DBF_TEXT(trace, 6, "fillbfpa"); + if (buf->next_element_to_fill >= + QETH_MAX_BUFFER_ELEMENTS(queue->card)) { + /* + * packed buffer if full -> set state PRIMED + * -> will be flushed + */ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + flush_cnt++; + } + } +out: + return flush_cnt; +} + +static inline int +qeth_get_skb_data_len(struct sk_buff *skb) +{ + int len = skb->len; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) + len -= skb_shinfo(skb)->frags[i].size; + return len; +} + +static inline void +qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, + struct qeth_eddp_data *eddp) +{ + u8 *page; + int page_remainder; + int page_offset; + int hdr_len; + struct qeth_eddp_element *element; + + QETH_DBF_TEXT(trace, 5, "eddpcrsh"); + page = ctx->pages[ctx->offset >> PAGE_SHIFT]; + page_offset = ctx->offset % PAGE_SIZE; + element = &ctx->elements[ctx->num_elements]; + hdr_len = eddp->nhl + eddp->thl; + /* FIXME: layer2 and VLAN !!! */ + if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) + hdr_len += ETH_HLEN; + if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) + hdr_len += VLAN_HLEN; + /* does complete header fit in current page ? */ + page_remainder = PAGE_SIZE - page_offset; + if (page_remainder < (sizeof(struct qeth_hdr) + hdr_len)){ + /* no -> go to start of next page */ + ctx->offset += page_remainder; + page = ctx->pages[ctx->offset >> PAGE_SHIFT]; + page_offset = 0; + } + memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr)); + element->addr = page + page_offset; + element->length = sizeof(struct qeth_hdr); + ctx->offset += sizeof(struct qeth_hdr); + page_offset += sizeof(struct qeth_hdr); + /* add mac header (?) */ + if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ + memcpy(page + page_offset, &eddp->mac, ETH_HLEN); + element->length += ETH_HLEN; + ctx->offset += ETH_HLEN; + page_offset += ETH_HLEN; + } + /* add VLAN tag */ + if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){ + memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN); + element->length += VLAN_HLEN; + ctx->offset += VLAN_HLEN; + page_offset += VLAN_HLEN; + } + /* add network header */ + memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl); + element->length += eddp->nhl; + eddp->nh_in_ctx = page + page_offset; + ctx->offset += eddp->nhl; + page_offset += eddp->nhl; + /* add transport header */ + memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl); + element->length += eddp->thl; + eddp->th_in_ctx = page + page_offset; + ctx->offset += eddp->thl; +} + +static inline void +qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, + u32 *hcsum) +{ + struct skb_frag_struct *frag; + int left_in_frag; + int copy_len; + u8 *src; + + QETH_DBF_TEXT(trace, 5, "eddpcdtc"); + if (skb_shinfo(eddp->skb)->nr_frags == 0) { + memcpy(dst, eddp->skb->data + eddp->skb_offset, len); + *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len, + *hcsum); + eddp->skb_offset += len; + } else { + while (len > 0) { + if (eddp->frag < 0) { + /* we're in skb->data */ + left_in_frag = qeth_get_skb_data_len(eddp->skb) + - eddp->skb_offset; + src = eddp->skb->data + eddp->skb_offset; + } else { + frag = &skb_shinfo(eddp->skb)-> + frags[eddp->frag]; + left_in_frag = frag->size - eddp->frag_offset; + src = (u8 *)( + (page_to_pfn(frag->page) << PAGE_SHIFT)+ + frag->page_offset + eddp->frag_offset); + } + if (left_in_frag <= 0) { + eddp->frag++; + eddp->frag_offset = 0; + continue; + } + copy_len = min(left_in_frag, len); + memcpy(dst, src, copy_len); + *hcsum = csum_partial(src, copy_len, *hcsum); + dst += copy_len; + eddp->frag_offset += copy_len; + eddp->skb_offset += copy_len; + len -= copy_len; + } + } +} + +static inline void +qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, + struct qeth_eddp_data *eddp, int data_len, + u32 hcsum) +{ + u8 *page; + int page_remainder; + int page_offset; + struct qeth_eddp_element *element; + int first_lap = 1; + + QETH_DBF_TEXT(trace, 5, "eddpcsdt"); + page = ctx->pages[ctx->offset >> PAGE_SHIFT]; + page_offset = ctx->offset % PAGE_SIZE; + element = &ctx->elements[ctx->num_elements]; + while (data_len){ + page_remainder = PAGE_SIZE - page_offset; + if (page_remainder < data_len){ + qeth_eddp_copy_data_tcp(page + page_offset, eddp, + page_remainder, &hcsum); + element->length += page_remainder; + if (first_lap) + element->flags = SBAL_FLAGS_FIRST_FRAG; + else + element->flags = SBAL_FLAGS_MIDDLE_FRAG; + ctx->num_elements++; + element++; + data_len -= page_remainder; + ctx->offset += page_remainder; + page = ctx->pages[ctx->offset >> PAGE_SHIFT]; + page_offset = 0; + element->addr = page + page_offset; + } else { + qeth_eddp_copy_data_tcp(page + page_offset, eddp, + data_len, &hcsum); + element->length += data_len; + if (!first_lap) + element->flags = SBAL_FLAGS_LAST_FRAG; + ctx->num_elements++; + ctx->offset += data_len; + data_len = 0; + } + first_lap = 0; + } + ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); +} + +static inline u32 +qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) +{ + u32 phcsum; /* pseudo header checksum */ + + QETH_DBF_TEXT(trace, 5, "eddpckt4"); + eddp->th.tcp.h.check = 0; + /* compute pseudo header checksum */ + phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr, + eddp->thl + data_len, IPPROTO_TCP, 0); + /* compute checksum of tcp header */ + return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); +} + +static inline u32 +qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) +{ + u32 proto; + u32 phcsum; /* pseudo header checksum */ + + QETH_DBF_TEXT(trace, 5, "eddpckt6"); + eddp->th.tcp.h.check = 0; + /* compute pseudo header checksum */ + phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr, + sizeof(struct in6_addr), 0); + phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr, + sizeof(struct in6_addr), phcsum); + proto = htonl(IPPROTO_TCP); + phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum); + return phcsum; +} + +static inline struct qeth_eddp_data * +qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) +{ + struct qeth_eddp_data *eddp; + + QETH_DBF_TEXT(trace, 5, "eddpcrda"); + eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC); + if (eddp){ + memset(eddp, 0, sizeof(struct qeth_eddp_data)); + eddp->nhl = nhl; + eddp->thl = thl; + memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr)); + memcpy(&eddp->nh, nh, nhl); + memcpy(&eddp->th, th, thl); + eddp->frag = -1; /* initially we're in skb->data */ + } + return eddp; +} + +static inline void +__qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, + struct qeth_eddp_data *eddp) +{ + struct tcphdr *tcph; + int data_len; + u32 hcsum; + + QETH_DBF_TEXT(trace, 5, "eddpftcp"); + eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl; + tcph = eddp->skb->h.th; + while (eddp->skb_offset < eddp->skb->len) { + data_len = min((int)skb_shinfo(eddp->skb)->tso_size, + (int)(eddp->skb->len - eddp->skb_offset)); + /* prepare qdio hdr */ + if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){ + eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN + + eddp->nhl + eddp->thl - + sizeof(struct qeth_hdr); +#ifdef CONFIG_QETH_VLAN + if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) + eddp->qh.hdr.l2.pkt_length += VLAN_HLEN; +#endif /* CONFIG_QETH_VLAN */ + } else + eddp->qh.hdr.l3.length = data_len + eddp->nhl + + eddp->thl; + /* prepare ip hdr */ + if (eddp->skb->protocol == ETH_P_IP){ + eddp->nh.ip4.h.tot_len = data_len + eddp->nhl + + eddp->thl; + eddp->nh.ip4.h.check = 0; + eddp->nh.ip4.h.check = + ip_fast_csum((u8 *)&eddp->nh.ip4.h, + eddp->nh.ip4.h.ihl); + } else + eddp->nh.ip6.h.payload_len = data_len + eddp->thl; + /* prepare tcp hdr */ + if (data_len == (eddp->skb->len - eddp->skb_offset)){ + /* last segment -> set FIN and PSH flags */ + eddp->th.tcp.h.fin = tcph->fin; + eddp->th.tcp.h.psh = tcph->psh; + } + if (eddp->skb->protocol == ETH_P_IP) + hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len); + else + hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len); + /* fill the next segment into the context */ + qeth_eddp_create_segment_hdrs(ctx, eddp); + qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum); + if (eddp->skb_offset >= eddp->skb->len) + break; + /* prepare headers for next round */ + if (eddp->skb->protocol == ETH_P_IP) + eddp->nh.ip4.h.id++; + eddp->th.tcp.h.seq += data_len; + } +} + +static inline int +qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, + struct sk_buff *skb, struct qeth_hdr *qhdr) +{ + struct qeth_eddp_data *eddp = NULL; + + QETH_DBF_TEXT(trace, 5, "eddpficx"); + /* create our segmentation headers and copy original headers */ + if (skb->protocol == ETH_P_IP) + eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph, + skb->nh.iph->ihl*4, + (u8 *)skb->h.th, skb->h.th->doff*4); + else + eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h, + sizeof(struct ipv6hdr), + (u8 *)skb->h.th, skb->h.th->doff*4); + + if (eddp == NULL) { + QETH_DBF_TEXT(trace, 2, "eddpfcnm"); + return -ENOMEM; + } + if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { + memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN); +#ifdef CONFIG_QETH_VLAN + if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) { + eddp->vlan[0] = __constant_htons(skb->protocol); + eddp->vlan[1] = htons(vlan_tx_tag_get(skb)); + } +#endif /* CONFIG_QETH_VLAN */ + } + /* the next flags will only be set on the last segment */ + eddp->th.tcp.h.fin = 0; + eddp->th.tcp.h.psh = 0; + eddp->skb = skb; + /* begin segmentation and fill context */ + __qeth_eddp_fill_context_tcp(ctx, eddp); + kfree(eddp); + return 0; +} + +static inline void +qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, + int hdr_len) +{ + int skbs_per_page; + + QETH_DBF_TEXT(trace, 5, "eddpcanp"); + /* can we put multiple skbs in one page? */ + skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len); + if (skbs_per_page > 1){ + ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) / + skbs_per_page + 1; + ctx->elements_per_skb = 1; + } else { + /* no -> how many elements per skb? */ + ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len + + PAGE_SIZE) >> PAGE_SHIFT; + ctx->num_pages = ctx->elements_per_skb * + (skb_shinfo(skb)->tso_segs + 1); + } + ctx->num_elements = ctx->elements_per_skb * + (skb_shinfo(skb)->tso_segs + 1); +} + +static inline struct qeth_eddp_context * +qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, + int hdr_len) +{ + struct qeth_eddp_context *ctx = NULL; + u8 *addr; + int i; + + QETH_DBF_TEXT(trace, 5, "creddpcg"); + /* create the context and allocate pages */ + ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC); + if (ctx == NULL){ + QETH_DBF_TEXT(trace, 2, "ceddpcn1"); + return NULL; + } + memset(ctx, 0, sizeof(struct qeth_eddp_context)); + ctx->type = QETH_LARGE_SEND_EDDP; + qeth_eddp_calc_num_pages(ctx, skb, hdr_len); + if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){ + QETH_DBF_TEXT(trace, 2, "ceddpcis"); + kfree(ctx); + return NULL; + } + ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC); + if (ctx->pages == NULL){ + QETH_DBF_TEXT(trace, 2, "ceddpcn2"); + kfree(ctx); + return NULL; + } + memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *)); + for (i = 0; i < ctx->num_pages; ++i){ + addr = (u8 *)__get_free_page(GFP_ATOMIC); + if (addr == NULL){ + QETH_DBF_TEXT(trace, 2, "ceddpcn3"); + ctx->num_pages = i; + qeth_eddp_free_context(ctx); + return NULL; + } + memset(addr, 0, PAGE_SIZE); + ctx->pages[i] = addr; + } + ctx->elements = kmalloc(ctx->num_elements * + sizeof(struct qeth_eddp_element), GFP_ATOMIC); + if (ctx->elements == NULL){ + QETH_DBF_TEXT(trace, 2, "ceddpcn4"); + qeth_eddp_free_context(ctx); + return NULL; + } + memset(ctx->elements, 0, + ctx->num_elements * sizeof(struct qeth_eddp_element)); + /* reset num_elements; will be incremented again in fill_buffer to + * reflect number of actually used elements */ + ctx->num_elements = 0; + return ctx; +} + +static inline struct qeth_eddp_context * +qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *qhdr) +{ + struct qeth_eddp_context *ctx = NULL; + + QETH_DBF_TEXT(trace, 5, "creddpct"); + if (skb->protocol == ETH_P_IP) + ctx = qeth_eddp_create_context_generic(card, skb, + sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 + + skb->h.th->doff*4); + else if (skb->protocol == ETH_P_IPV6) + ctx = qeth_eddp_create_context_generic(card, skb, + sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) + + skb->h.th->doff*4); + else + QETH_DBF_TEXT(trace, 2, "cetcpinv"); + + if (ctx == NULL) { + QETH_DBF_TEXT(trace, 2, "creddpnl"); + return NULL; + } + if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){ + QETH_DBF_TEXT(trace, 2, "ceddptfe"); + qeth_eddp_free_context(ctx); + return NULL; + } + atomic_set(&ctx->refcnt, 1); + return ctx; +} + +struct qeth_eddp_context * +qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *qhdr) +{ + QETH_DBF_TEXT(trace, 5, "creddpc"); + switch (skb->sk->sk_protocol){ + case IPPROTO_TCP: + return qeth_eddp_create_context_tcp(card, skb, qhdr); + default: + QETH_DBF_TEXT(trace, 2, "eddpinvp"); + } + return NULL; +} + + diff --git a/drivers/s390/net/qeth_eddp.h b/drivers/s390/net/qeth_eddp.h new file mode 100644 index 000000000000..e1b51860bc57 --- /dev/null +++ b/drivers/s390/net/qeth_eddp.h @@ -0,0 +1,85 @@ +/* + * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.5 $) + * + * Header file for qeth enhanced device driver pakcing. + * + * Copyright 2004 IBM Corporation + * + * Author(s): Thomas Spatzier <tspat@de.ibm.com> + * + * $Revision: 1.5 $ $Date: 2005/03/24 09:04:18 $ + * + */ +#ifndef __QETH_EDDP_H__ +#define __QETH_EDDP_H__ + +struct qeth_eddp_element { + u32 flags; + u32 length; + void *addr; +}; + +struct qeth_eddp_context { + atomic_t refcnt; + enum qeth_large_send_types type; + int num_pages; /* # of allocated pages */ + u8 **pages; /* pointers to pages */ + int offset; /* offset in ctx during creation */ + int num_elements; /* # of required 'SBALEs' */ + struct qeth_eddp_element *elements; /* array of 'SBALEs' */ + int elements_per_skb; /* # of 'SBALEs' per skb **/ +}; + +struct qeth_eddp_context_reference { + struct list_head list; + struct qeth_eddp_context *ctx; +}; + +extern struct qeth_eddp_context * +qeth_eddp_create_context(struct qeth_card *,struct sk_buff *,struct qeth_hdr *); + +extern void +qeth_eddp_put_context(struct qeth_eddp_context *); + +extern int +qeth_eddp_fill_buffer(struct qeth_qdio_out_q *,struct qeth_eddp_context *,int); + +extern void +qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *); + +extern int +qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *, + struct qeth_eddp_context *); +/* + * Data used for fragmenting a IP packet. + */ +struct qeth_eddp_data { + struct qeth_hdr qh; + struct ethhdr mac; + u16 vlan[2]; + union { + struct { + struct iphdr h; + u8 options[40]; + } ip4; + struct { + struct ipv6hdr h; + } ip6; + } nh; + u8 nhl; + void *nh_in_ctx; /* address of nh within the ctx */ + union { + struct { + struct tcphdr h; + u8 options[40]; + } tcp; + } th; + u8 thl; + void *th_in_ctx; /* address of th within the ctx */ + struct sk_buff *skb; + int skb_offset; + int frag; + int frag_offset; +} __attribute__ ((packed)); + +#endif /* __QETH_EDDP_H__ */ diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h new file mode 100644 index 000000000000..5c9a51ce91b6 --- /dev/null +++ b/drivers/s390/net/qeth_fs.h @@ -0,0 +1,163 @@ +/* + * linux/drivers/s390/net/qeth_fs.h + * + * Linux on zSeries OSA Express and HiperSockets support. + * + * This header file contains definitions related to sysfs and procfs. + * + * Copyright 2000,2003 IBM Corporation + * Author(s): Thomas Spatzier <tspat@de.ibm.com> + * + */ +#ifndef __QETH_FS_H__ +#define __QETH_FS_H__ + +#define VERSION_QETH_FS_H "$Revision: 1.9 $" + +extern const char *VERSION_QETH_PROC_C; +extern const char *VERSION_QETH_SYS_C; + +#ifdef CONFIG_PROC_FS +extern int +qeth_create_procfs_entries(void); + +extern void +qeth_remove_procfs_entries(void); +#else +static inline int +qeth_create_procfs_entries(void) +{ + return 0; +} + +static inline void +qeth_remove_procfs_entries(void) +{ +} +#endif /* CONFIG_PROC_FS */ + +extern int +qeth_create_device_attributes(struct device *dev); + +extern void +qeth_remove_device_attributes(struct device *dev); + +extern int +qeth_create_driver_attributes(void); + +extern void +qeth_remove_driver_attributes(void); + +/* + * utility functions used in qeth_proc.c and qeth_sys.c + */ + +static inline const char * +qeth_get_checksum_str(struct qeth_card *card) +{ + if (card->options.checksum_type == SW_CHECKSUMMING) + return "sw"; + else if (card->options.checksum_type == HW_CHECKSUMMING) + return "hw"; + else + return "no"; +} + +static inline const char * +qeth_get_prioq_str(struct qeth_card *card, char *buf) +{ + if (card->qdio.do_prio_queueing == QETH_NO_PRIO_QUEUEING) + sprintf(buf, "always_q_%i", card->qdio.default_out_queue); + else + strcpy(buf, (card->qdio.do_prio_queueing == + QETH_PRIO_Q_ING_PREC)? + "by_prec." : "by_ToS"); + return buf; +} + +static inline const char * +qeth_get_bufsize_str(struct qeth_card *card) +{ + if (card->qdio.in_buf_size == 16384) + return "16k"; + else if (card->qdio.in_buf_size == 24576) + return "24k"; + else if (card->qdio.in_buf_size == 32768) + return "32k"; + else if (card->qdio.in_buf_size == 40960) + return "40k"; + else + return "64k"; +} + +static inline const char * +qeth_get_cardname(struct qeth_card *card) +{ + if (card->info.guestlan) { + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + return " Guest LAN QDIO"; + case QETH_CARD_TYPE_IQD: + return " Guest LAN Hiper"; + default: + return " unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + return " OSD Express"; + case QETH_CARD_TYPE_IQD: + return " HiperSockets"; + default: + return " unknown"; + } + } + return " n/a"; +} + +/* max length to be returned: 14 */ +static inline const char * +qeth_get_cardname_short(struct qeth_card *card) +{ + if (card->info.guestlan){ + switch (card->info.type){ + case QETH_CARD_TYPE_OSAE: + return "GuestLAN QDIO"; + case QETH_CARD_TYPE_IQD: + return "GuestLAN Hiper"; + default: + return "unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + switch (card->info.link_type) { + case QETH_LINK_TYPE_FAST_ETH: + return "OSD_100"; + case QETH_LINK_TYPE_HSTR: + return "HSTR"; + case QETH_LINK_TYPE_GBIT_ETH: + return "OSD_1000"; + case QETH_LINK_TYPE_10GBIT_ETH: + return "OSD_10GIG"; + case QETH_LINK_TYPE_LANE_ETH100: + return "OSD_FE_LANE"; + case QETH_LINK_TYPE_LANE_TR: + return "OSD_TR_LANE"; + case QETH_LINK_TYPE_LANE_ETH1000: + return "OSD_GbE_LANE"; + case QETH_LINK_TYPE_LANE: + return "OSD_ATM_LANE"; + default: + return "OSD_Express"; + } + case QETH_CARD_TYPE_IQD: + return "HiperSockets"; + default: + return "unknown"; + } + } + return "n/a"; +} + +#endif /* __QETH_FS_H__ */ diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c new file mode 100644 index 000000000000..607b92542df6 --- /dev/null +++ b/drivers/s390/net/qeth_main.c @@ -0,0 +1,8236 @@ +/* + * + * linux/drivers/s390/net/qeth_main.c ($Revision: 1.206 $) + * + * Linux on zSeries OSA Express and HiperSockets support + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Original Code written by + * Utz Bacher (utz.bacher@de.ibm.com) + * Rewritten by + * Frank Pavlic (pavlic@de.ibm.com) and + * Thomas Spatzier <tspat@de.ibm.com> + * + * $Revision: 1.206 $ $Date: 2005/03/24 09:04:18 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/*** + * eye catcher; just for debugging purposes + */ +void volatile +qeth_eyecatcher(void) +{ + return; +} + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/ip.h> +#include <linux/inetdevice.h> +#include <linux/netdevice.h> +#include <linux/sched.h> +#include <linux/workqueue.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/tcp.h> +#include <linux/icmp.h> +#include <linux/skbuff.h> +#include <linux/in.h> +#include <linux/igmp.h> +#include <linux/init.h> +#include <linux/reboot.h> +#include <linux/mii.h> +#include <linux/rcupdate.h> +#include <linux/ethtool.h> + +#include <net/arp.h> +#include <net/ip.h> +#include <net/route.h> + +#include <asm/ebcdic.h> +#include <asm/io.h> +#include <asm/qeth.h> +#include <asm/timex.h> +#include <asm/semaphore.h> +#include <asm/uaccess.h> + +#include "qeth.h" +#include "qeth_mpc.h" +#include "qeth_fs.h" +#include "qeth_eddp.h" +#include "qeth_tso.h" + +#define VERSION_QETH_C "$Revision: 1.206 $" +static const char *version = "qeth S/390 OSA-Express driver"; + +/** + * Debug Facility Stuff + */ +static debug_info_t *qeth_dbf_setup = NULL; +static debug_info_t *qeth_dbf_data = NULL; +static debug_info_t *qeth_dbf_misc = NULL; +static debug_info_t *qeth_dbf_control = NULL; +debug_info_t *qeth_dbf_trace = NULL; +static debug_info_t *qeth_dbf_sense = NULL; +static debug_info_t *qeth_dbf_qerr = NULL; + +DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf); + +/** + * some more definitions and declarations + */ +static unsigned int known_devices[][10] = QETH_MODELLIST_ARRAY; + +/* list of our cards */ +struct qeth_card_list_struct qeth_card_list; +/*process list want to be notified*/ +spinlock_t qeth_notify_lock; +struct list_head qeth_notify_list; + +static void qeth_send_control_data_cb(struct qeth_channel *, + struct qeth_cmd_buffer *); + +/** + * here we go with function implementation + */ +static void +qeth_init_qdio_info(struct qeth_card *card); + +static int +qeth_init_qdio_queues(struct qeth_card *card); + +static int +qeth_alloc_qdio_buffers(struct qeth_card *card); + +static void +qeth_free_qdio_buffers(struct qeth_card *); + +static void +qeth_clear_qdio_buffers(struct qeth_card *); + +static void +qeth_clear_ip_list(struct qeth_card *, int, int); + +static void +qeth_clear_ipacmd_list(struct qeth_card *); + +static int +qeth_qdio_clear_card(struct qeth_card *, int); + +static void +qeth_clear_working_pool_list(struct qeth_card *); + +static void +qeth_clear_cmd_buffers(struct qeth_channel *); + +static int +qeth_stop(struct net_device *); + +static void +qeth_clear_ipato_list(struct qeth_card *); + +static int +qeth_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); + +static void +qeth_irq_tasklet(unsigned long); + +static int +qeth_set_online(struct ccwgroup_device *); + +static struct qeth_ipaddr * +qeth_get_addr_buffer(enum qeth_prot_versions); + +static void +qeth_set_multicast_list(struct net_device *); + +static void +qeth_notify_processes(void) +{ + /*notify all registered processes */ + struct qeth_notify_list_struct *n_entry; + + QETH_DBF_TEXT(trace,3,"procnoti"); + spin_lock(&qeth_notify_lock); + list_for_each_entry(n_entry, &qeth_notify_list, list) { + send_sig(n_entry->signum, n_entry->task, 1); + } + spin_unlock(&qeth_notify_lock); + +} +int +qeth_notifier_unregister(struct task_struct *p) +{ + struct qeth_notify_list_struct *n_entry, *tmp; + + QETH_DBF_TEXT(trace, 2, "notunreg"); + spin_lock(&qeth_notify_lock); + list_for_each_entry_safe(n_entry, tmp, &qeth_notify_list, list) { + if (n_entry->task == p) { + list_del(&n_entry->list); + kfree(n_entry); + goto out; + } + } +out: + spin_unlock(&qeth_notify_lock); + return 0; +} +int +qeth_notifier_register(struct task_struct *p, int signum) +{ + struct qeth_notify_list_struct *n_entry; + + + /*check first if entry already exists*/ + spin_lock(&qeth_notify_lock); + list_for_each_entry(n_entry, &qeth_notify_list, list) { + if (n_entry->task == p) { + n_entry->signum = signum; + spin_unlock(&qeth_notify_lock); + return 0; + } + } + spin_unlock(&qeth_notify_lock); + + n_entry = (struct qeth_notify_list_struct *) + kmalloc(sizeof(struct qeth_notify_list_struct),GFP_KERNEL); + if (!n_entry) + return -ENOMEM; + n_entry->task = p; + n_entry->signum = signum; + spin_lock(&qeth_notify_lock); + list_add(&n_entry->list,&qeth_notify_list); + spin_unlock(&qeth_notify_lock); + return 0; +} + + +/** + * free channel command buffers + */ +static void +qeth_clean_channel(struct qeth_channel *channel) +{ + int cnt; + + QETH_DBF_TEXT(setup, 2, "freech"); + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) + kfree(channel->iob[cnt].data); +} + +/** + * free card + */ +static void +qeth_free_card(struct qeth_card *card) +{ + + QETH_DBF_TEXT(setup, 2, "freecrd"); + QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); + qeth_clean_channel(&card->read); + qeth_clean_channel(&card->write); + if (card->dev) + free_netdev(card->dev); + qeth_clear_ip_list(card, 0, 0); + qeth_clear_ipato_list(card); + kfree(card->ip_tbd_list); + qeth_free_qdio_buffers(card); + kfree(card); +} + +/** + * alloc memory for command buffer per channel + */ +static int +qeth_setup_channel(struct qeth_channel *channel) +{ + int cnt; + + QETH_DBF_TEXT(setup, 2, "setupch"); + for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) { + channel->iob[cnt].data = (char *) + kmalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = BUF_STATE_FREE; + channel->iob[cnt].channel = channel; + channel->iob[cnt].callback = qeth_send_control_data_cb; + channel->iob[cnt].rc = 0; + } + if (cnt < QETH_CMD_BUFFER_NO) { + while (cnt-- > 0) + kfree(channel->iob[cnt].data); + return -ENOMEM; + } + channel->buf_no = 0; + channel->io_buf_no = 0; + atomic_set(&channel->irq_pending, 0); + spin_lock_init(&channel->iob_lock); + + init_waitqueue_head(&channel->wait_q); + channel->irq_tasklet.data = (unsigned long) channel; + channel->irq_tasklet.func = qeth_irq_tasklet; + return 0; +} + +/** + * alloc memory for card structure + */ +static struct qeth_card * +qeth_alloc_card(void) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(setup, 2, "alloccrd"); + card = (struct qeth_card *) kmalloc(sizeof(struct qeth_card), + GFP_DMA|GFP_KERNEL); + if (!card) + return NULL; + QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); + memset(card, 0, sizeof(struct qeth_card)); + if (qeth_setup_channel(&card->read)) { + kfree(card); + return NULL; + } + if (qeth_setup_channel(&card->write)) { + qeth_clean_channel(&card->read); + kfree(card); + return NULL; + } + return card; +} + +static long +__qeth_check_irb_error(struct ccw_device *cdev, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + PRINT_WARN("i/o-error on device %s\n", cdev->dev.bus_id); + QETH_DBF_TEXT(trace, 2, "ckirberr"); + QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO); + break; + case -ETIMEDOUT: + PRINT_WARN("timeout on device %s\n", cdev->dev.bus_id); + QETH_DBF_TEXT(trace, 2, "ckirberr"); + QETH_DBF_TEXT_(trace, 2, " rc%d", -ETIMEDOUT); + break; + default: + PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), + cdev->dev.bus_id); + QETH_DBF_TEXT(trace, 2, "ckirberr"); + QETH_DBF_TEXT(trace, 2, " rc???"); + } + return PTR_ERR(irb); +} + +static int +qeth_get_problem(struct ccw_device *cdev, struct irb *irb) +{ + int dstat,cstat; + char *sense; + + sense = (char *) irb->ecw; + cstat = irb->scsw.cstat; + dstat = irb->scsw.dstat; + + if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | + SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { + QETH_DBF_TEXT(trace,2, "CGENCHK"); + PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", + cdev->dev.bus_id, dstat, cstat); + HEXDUMP16(WARN, "irb: ", irb); + HEXDUMP16(WARN, "irb: ", ((char *) irb) + 32); + return 1; + } + + if (dstat & DEV_STAT_UNIT_CHECK) { + if (sense[SENSE_RESETTING_EVENT_BYTE] & + SENSE_RESETTING_EVENT_FLAG) { + QETH_DBF_TEXT(trace,2,"REVIND"); + return 1; + } + if (sense[SENSE_COMMAND_REJECT_BYTE] & + SENSE_COMMAND_REJECT_FLAG) { + QETH_DBF_TEXT(trace,2,"CMDREJi"); + return 0; + } + if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { + QETH_DBF_TEXT(trace,2,"AFFE"); + return 1; + } + if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { + QETH_DBF_TEXT(trace,2,"ZEROSEN"); + return 0; + } + QETH_DBF_TEXT(trace,2,"DGENCHK"); + return 1; + } + return 0; +} +static int qeth_issue_next_read(struct qeth_card *); + +/** + * interrupt handler + */ +static void +qeth_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) +{ + int rc; + int cstat,dstat; + struct qeth_cmd_buffer *buffer; + struct qeth_channel *channel; + struct qeth_card *card; + + QETH_DBF_TEXT(trace,5,"irq"); + + if (__qeth_check_irb_error(cdev, irb)) + return; + cstat = irb->scsw.cstat; + dstat = irb->scsw.dstat; + + card = CARD_FROM_CDEV(cdev); + if (!card) + return; + + if (card->read.ccwdev == cdev){ + channel = &card->read; + QETH_DBF_TEXT(trace,5,"read"); + } else if (card->write.ccwdev == cdev) { + channel = &card->write; + QETH_DBF_TEXT(trace,5,"write"); + } else { + channel = &card->data; + QETH_DBF_TEXT(trace,5,"data"); + } + atomic_set(&channel->irq_pending, 0); + + if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC)) + channel->state = CH_STATE_STOPPED; + + if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC)) + channel->state = CH_STATE_HALTED; + + /*let's wake up immediately on data channel*/ + if ((channel == &card->data) && (intparm != 0)) + goto out; + + if (intparm == QETH_CLEAR_CHANNEL_PARM) { + QETH_DBF_TEXT(trace, 6, "clrchpar"); + /* we don't have to handle this further */ + intparm = 0; + } + if (intparm == QETH_HALT_CHANNEL_PARM) { + QETH_DBF_TEXT(trace, 6, "hltchpar"); + /* we don't have to handle this further */ + intparm = 0; + } + if ((dstat & DEV_STAT_UNIT_EXCEP) || + (dstat & DEV_STAT_UNIT_CHECK) || + (cstat)) { + if (irb->esw.esw0.erw.cons) { + /* TODO: we should make this s390dbf */ + PRINT_WARN("sense data available on channel %s.\n", + CHANNEL_ID(channel)); + PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat); + HEXDUMP16(WARN,"irb: ",irb); + HEXDUMP16(WARN,"sense data: ",irb->ecw); + } + rc = qeth_get_problem(cdev,irb); + if (rc) { + qeth_schedule_recovery(card); + goto out; + } + } + + if (intparm) { + buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + buffer->state = BUF_STATE_PROCESSED; + } + if (channel == &card->data) + return; + + if (channel == &card->read && + channel->state == CH_STATE_UP) + qeth_issue_next_read(card); + + tasklet_schedule(&channel->irq_tasklet); + return; +out: + wake_up(&card->wait_q); +} + +/** + * tasklet function scheduled from irq handler + */ +static void +qeth_irq_tasklet(unsigned long data) +{ + struct qeth_card *card; + struct qeth_channel *channel; + struct qeth_cmd_buffer *iob; + __u8 index; + + QETH_DBF_TEXT(trace,5,"irqtlet"); + channel = (struct qeth_channel *) data; + iob = channel->iob; + index = channel->buf_no; + card = CARD_FROM_CDEV(channel->ccwdev); + while (iob[index].state == BUF_STATE_PROCESSED) { + if (iob[index].callback !=NULL) { + iob[index].callback(channel,iob + index); + } + index = (index + 1) % QETH_CMD_BUFFER_NO; + } + channel->buf_no = index; + wake_up(&card->wait_q); +} + +static int qeth_stop_card(struct qeth_card *); + +static int +qeth_set_offline(struct ccwgroup_device *cgdev) +{ + struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; + int rc = 0; + enum qeth_card_states recover_flag; + + QETH_DBF_TEXT(setup, 3, "setoffl"); + QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); + + recover_flag = card->state; + if (qeth_stop_card(card) == -ERESTARTSYS){ + PRINT_WARN("Stopping card %s interrupted by user!\n", + CARD_BUS_ID(card)); + return -ERESTARTSYS; + } + if ((rc = ccw_device_set_offline(CARD_DDEV(card))) || + (rc = ccw_device_set_offline(CARD_WDEV(card))) || + (rc = ccw_device_set_offline(CARD_RDEV(card)))) { + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + } + if (recover_flag == CARD_STATE_UP) + card->state = CARD_STATE_RECOVER; + qeth_notify_processes(); + return 0; +} + +static int +qeth_wait_for_threads(struct qeth_card *card, unsigned long threads); + + +static void +qeth_remove_device(struct ccwgroup_device *cgdev) +{ + struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; + unsigned long flags; + + QETH_DBF_TEXT(setup, 3, "rmdev"); + QETH_DBF_HEX(setup, 3, &card, sizeof(void *)); + + if (!card) + return; + + if (qeth_wait_for_threads(card, 0xffffffff)) + return; + + if (cgdev->state == CCWGROUP_ONLINE){ + card->use_hard_stop = 1; + qeth_set_offline(cgdev); + } + /* remove form our internal list */ + write_lock_irqsave(&qeth_card_list.rwlock, flags); + list_del(&card->list); + write_unlock_irqrestore(&qeth_card_list.rwlock, flags); + if (card->dev) + unregister_netdev(card->dev); + qeth_remove_device_attributes(&cgdev->dev); + qeth_free_card(card); + cgdev->dev.driver_data = NULL; + put_device(&cgdev->dev); +} + +static int +qeth_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *); +static int +qeth_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *); + +/** + * Add/remove address to/from card's ip list, i.e. try to add or remove + * reference to/from an IP address that is already registered on the card. + * Returns: + * 0 address was on card and its reference count has been adjusted, + * but is still > 0, so nothing has to be done + * also returns 0 if card was not on card and the todo was to delete + * the address -> there is also nothing to be done + * 1 address was not on card and the todo is to add it to the card's ip + * list + * -1 address was on card and its reference count has been decremented + * to <= 0 by the todo -> address must be removed from card + */ +static int +__qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo, + struct qeth_ipaddr **__addr) +{ + struct qeth_ipaddr *addr; + int found = 0; + + list_for_each_entry(addr, &card->ip_list, entry) { + if ((addr->proto == QETH_PROT_IPV4) && + (todo->proto == QETH_PROT_IPV4) && + (addr->type == todo->type) && + (addr->u.a4.addr == todo->u.a4.addr) && + (addr->u.a4.mask == todo->u.a4.mask) ){ + found = 1; + break; + } + if ((addr->proto == QETH_PROT_IPV6) && + (todo->proto == QETH_PROT_IPV6) && + (addr->type == todo->type) && + (addr->u.a6.pfxlen == todo->u.a6.pfxlen) && + (memcmp(&addr->u.a6.addr, &todo->u.a6.addr, + sizeof(struct in6_addr)) == 0)) { + found = 1; + break; + } + } + if (found){ + addr->users += todo->users; + if (addr->users <= 0){ + *__addr = addr; + return -1; + } else { + /* for VIPA and RXIP limit refcount to 1 */ + if (addr->type != QETH_IP_TYPE_NORMAL) + addr->users = 1; + return 0; + } + } + if (todo->users > 0){ + /* for VIPA and RXIP limit refcount to 1 */ + if (todo->type != QETH_IP_TYPE_NORMAL) + todo->users = 1; + return 1; + } else + return 0; +} + +static inline int +__qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, + int same_type) +{ + struct qeth_ipaddr *tmp; + + list_for_each_entry(tmp, list, entry) { + if ((tmp->proto == QETH_PROT_IPV4) && + (addr->proto == QETH_PROT_IPV4) && + ((same_type && (tmp->type == addr->type)) || + (!same_type && (tmp->type != addr->type)) ) && + (tmp->u.a4.addr == addr->u.a4.addr) ){ + return 1; + } + if ((tmp->proto == QETH_PROT_IPV6) && + (addr->proto == QETH_PROT_IPV6) && + ((same_type && (tmp->type == addr->type)) || + (!same_type && (tmp->type != addr->type)) ) && + (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, + sizeof(struct in6_addr)) == 0) ) { + return 1; + } + } + return 0; +} + +/* + * Add IP to be added to todo list. If there is already an "add todo" + * in this list we just incremenent the reference count. + * Returns 0 if we just incremented reference count. + */ +static int +__qeth_insert_ip_todo(struct qeth_card *card, struct qeth_ipaddr *addr, int add) +{ + struct qeth_ipaddr *tmp, *t; + int found = 0; + + list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) { + if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) && + (tmp->type == QETH_IP_TYPE_DEL_ALL_MC)) + return 0; + if ((tmp->proto == QETH_PROT_IPV4) && + (addr->proto == QETH_PROT_IPV4) && + (tmp->type == addr->type) && + (tmp->is_multicast == addr->is_multicast) && + (tmp->u.a4.addr == addr->u.a4.addr) && + (tmp->u.a4.mask == addr->u.a4.mask) ){ + found = 1; + break; + } + if ((tmp->proto == QETH_PROT_IPV6) && + (addr->proto == QETH_PROT_IPV6) && + (tmp->type == addr->type) && + (tmp->is_multicast == addr->is_multicast) && + (tmp->u.a6.pfxlen == addr->u.a6.pfxlen) && + (memcmp(&tmp->u.a6.addr, &addr->u.a6.addr, + sizeof(struct in6_addr)) == 0) ){ + found = 1; + break; + } + } + if (found){ + if (addr->users != 0) + tmp->users += addr->users; + else + tmp->users += add? 1:-1; + if (tmp->users == 0){ + list_del(&tmp->entry); + kfree(tmp); + } + return 0; + } else { + if (addr->type == QETH_IP_TYPE_DEL_ALL_MC) + list_add(&addr->entry, card->ip_tbd_list); + else { + if (addr->users == 0) + addr->users += add? 1:-1; + if (add && (addr->type == QETH_IP_TYPE_NORMAL) && + qeth_is_addr_covered_by_ipato(card, addr)){ + QETH_DBF_TEXT(trace, 2, "tkovaddr"); + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; + } + list_add_tail(&addr->entry, card->ip_tbd_list); + } + return 1; + } +} + +/** + * Remove IP address from list + */ +static int +qeth_delete_ip(struct qeth_card *card, struct qeth_ipaddr *addr) +{ + unsigned long flags; + int rc = 0; + + QETH_DBF_TEXT(trace,4,"delip"); + if (addr->proto == QETH_PROT_IPV4) + QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4); + else { + QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8); + QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8); + } + spin_lock_irqsave(&card->ip_lock, flags); + rc = __qeth_insert_ip_todo(card, addr, 0); + spin_unlock_irqrestore(&card->ip_lock, flags); + return rc; +} + +static int +qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) +{ + unsigned long flags; + int rc = 0; + + QETH_DBF_TEXT(trace,4,"addip"); + if (addr->proto == QETH_PROT_IPV4) + QETH_DBF_HEX(trace,4,&addr->u.a4.addr,4); + else { + QETH_DBF_HEX(trace,4,&addr->u.a6.addr,8); + QETH_DBF_HEX(trace,4,((char *)&addr->u.a6.addr)+8,8); + } + spin_lock_irqsave(&card->ip_lock, flags); + rc = __qeth_insert_ip_todo(card, addr, 1); + spin_unlock_irqrestore(&card->ip_lock, flags); + return rc; +} + +static inline void +__qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) +{ + struct qeth_ipaddr *addr, *tmp; + int rc; + + list_for_each_entry_safe(addr, tmp, &card->ip_list, entry) { + if (addr->is_multicast) { + spin_unlock_irqrestore(&card->ip_lock, *flags); + rc = qeth_deregister_addr_entry(card, addr); + spin_lock_irqsave(&card->ip_lock, *flags); + if (!rc) { + list_del(&addr->entry); + kfree(addr); + } + } + } +} + +static void +qeth_set_ip_addr_list(struct qeth_card *card) +{ + struct list_head *tbd_list; + struct qeth_ipaddr *todo, *addr; + unsigned long flags; + int rc; + + QETH_DBF_TEXT(trace, 2, "sdiplist"); + QETH_DBF_HEX(trace, 2, &card, sizeof(void *)); + + spin_lock_irqsave(&card->ip_lock, flags); + tbd_list = card->ip_tbd_list; + card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC); + if (!card->ip_tbd_list) { + QETH_DBF_TEXT(trace, 0, "silnomem"); + card->ip_tbd_list = tbd_list; + spin_unlock_irqrestore(&card->ip_lock, flags); + return; + } else + INIT_LIST_HEAD(card->ip_tbd_list); + + while (!list_empty(tbd_list)){ + todo = list_entry(tbd_list->next, struct qeth_ipaddr, entry); + list_del(&todo->entry); + if (todo->type == QETH_IP_TYPE_DEL_ALL_MC){ + __qeth_delete_all_mc(card, &flags); + kfree(todo); + continue; + } + rc = __qeth_ref_ip_on_card(card, todo, &addr); + if (rc == 0) { + /* nothing to be done; only adjusted refcount */ + kfree(todo); + } else if (rc == 1) { + /* new entry to be added to on-card list */ + spin_unlock_irqrestore(&card->ip_lock, flags); + rc = qeth_register_addr_entry(card, todo); + spin_lock_irqsave(&card->ip_lock, flags); + if (!rc) + list_add_tail(&todo->entry, &card->ip_list); + else + kfree(todo); + } else if (rc == -1) { + /* on-card entry to be removed */ + list_del_init(&addr->entry); + spin_unlock_irqrestore(&card->ip_lock, flags); + rc = qeth_deregister_addr_entry(card, addr); + spin_lock_irqsave(&card->ip_lock, flags); + if (!rc) + kfree(addr); + else + list_add_tail(&addr->entry, &card->ip_list); + kfree(todo); + } + } + spin_unlock_irqrestore(&card->ip_lock, flags); + kfree(tbd_list); +} + +static void qeth_delete_mc_addresses(struct qeth_card *); +static void qeth_add_multicast_ipv4(struct qeth_card *); +#ifdef CONFIG_QETH_IPV6 +static void qeth_add_multicast_ipv6(struct qeth_card *); +#endif + +static inline int +qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if ( !(card->thread_allowed_mask & thread) || + (card->thread_start_mask & thread) ) { + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return -EPERM; + } + card->thread_start_mask |= thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return 0; +} + +static void +qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_start_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} + +static void +qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_running_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} + +static inline int +__qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if (card->thread_start_mask & thread){ + if ((card->thread_allowed_mask & thread) && + !(card->thread_running_mask & thread)){ + rc = 1; + card->thread_start_mask &= ~thread; + card->thread_running_mask |= thread; + } else + rc = -EPERM; + } + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +static int +qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + int rc = 0; + + wait_event(card->wait_q, + (rc = __qeth_do_run_thread(card, thread)) >= 0); + return rc; +} + +static int +qeth_register_ip_addresses(void *ptr) +{ + struct qeth_card *card; + + card = (struct qeth_card *) ptr; + daemonize("qeth_reg_ip"); + QETH_DBF_TEXT(trace,4,"regipth1"); + if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD)) + return 0; + QETH_DBF_TEXT(trace,4,"regipth2"); + qeth_set_ip_addr_list(card); + qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD); + return 0; +} + +static int +qeth_recover(void *ptr) +{ + struct qeth_card *card; + int rc = 0; + + card = (struct qeth_card *) ptr; + daemonize("qeth_recover"); + QETH_DBF_TEXT(trace,2,"recover1"); + QETH_DBF_HEX(trace, 2, &card, sizeof(void *)); + if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) + return 0; + QETH_DBF_TEXT(trace,2,"recover2"); + PRINT_WARN("Recovery of device %s started ...\n", + CARD_BUS_ID(card)); + card->use_hard_stop = 1; + qeth_set_offline(card->gdev); + rc = qeth_set_online(card->gdev); + if (!rc) + PRINT_INFO("Device %s successfully recovered!\n", + CARD_BUS_ID(card)); + else + PRINT_INFO("Device %s could not be recovered!\n", + CARD_BUS_ID(card)); + /* don't run another scheduled recovery */ + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); + return 0; +} + +void +qeth_schedule_recovery(struct qeth_card *card) +{ + QETH_DBF_TEXT(trace,2,"startrec"); + + if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +} + +static int +qeth_do_start_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + QETH_DBF_TEXT_(trace, 4, " %02x%02x%02x", + (u8) card->thread_start_mask, + (u8) card->thread_allowed_mask, + (u8) card->thread_running_mask); + rc = (card->thread_start_mask & thread); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +static void +qeth_start_kernel_thread(struct qeth_card *card) +{ + QETH_DBF_TEXT(trace , 2, "strthrd"); + + if (card->read.state != CH_STATE_UP && + card->write.state != CH_STATE_UP) + return; + + if (qeth_do_start_thread(card, QETH_SET_IP_THREAD)) + kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD); + if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) + kernel_thread(qeth_recover, (void *) card, SIGCHLD); +} + + +static void +qeth_set_intial_options(struct qeth_card *card) +{ + card->options.route4.type = NO_ROUTER; +#ifdef CONFIG_QETH_IPV6 + card->options.route6.type = NO_ROUTER; +#endif /* QETH_IPV6 */ + card->options.checksum_type = QETH_CHECKSUM_DEFAULT; + card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; + card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL; + card->options.fake_broadcast = 0; + card->options.add_hhlen = DEFAULT_ADD_HHLEN; + card->options.fake_ll = 0; + card->options.layer2 = 0; +} + +/** + * initialize channels ,card and all state machines + */ +static int +qeth_setup_card(struct qeth_card *card) +{ + + QETH_DBF_TEXT(setup, 2, "setupcrd"); + QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); + + card->read.state = CH_STATE_DOWN; + card->write.state = CH_STATE_DOWN; + card->data.state = CH_STATE_DOWN; + card->state = CARD_STATE_DOWN; + card->lan_online = 0; + card->use_hard_stop = 0; + card->dev = NULL; +#ifdef CONFIG_QETH_VLAN + spin_lock_init(&card->vlanlock); + card->vlangrp = NULL; +#endif + spin_lock_init(&card->ip_lock); + spin_lock_init(&card->thread_mask_lock); + card->thread_start_mask = 0; + card->thread_allowed_mask = 0; + card->thread_running_mask = 0; + INIT_WORK(&card->kernel_thread_starter, + (void *)qeth_start_kernel_thread,card); + INIT_LIST_HEAD(&card->ip_list); + card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (!card->ip_tbd_list) { + QETH_DBF_TEXT(setup, 0, "iptbdnom"); + return -ENOMEM; + } + INIT_LIST_HEAD(card->ip_tbd_list); + INIT_LIST_HEAD(&card->cmd_waiter_list); + init_waitqueue_head(&card->wait_q); + /* intial options */ + qeth_set_intial_options(card); + /* IP address takeover */ + INIT_LIST_HEAD(&card->ipato.entries); + card->ipato.enabled = 0; + card->ipato.invert4 = 0; + card->ipato.invert6 = 0; + /* init QDIO stuff */ + qeth_init_qdio_info(card); + return 0; +} + +static int +is_1920_device (struct qeth_card *card) +{ + int single_queue = 0; + struct ccw_device *ccwdev; + struct channelPath_dsc { + u8 flags; + u8 lsn; + u8 desc; + u8 chpid; + u8 swla; + u8 zeroes; + u8 chla; + u8 chpp; + } *chp_dsc; + + QETH_DBF_TEXT(setup, 2, "chk_1920"); + + ccwdev = card->data.ccwdev; + chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0); + if (chp_dsc != NULL) { + /* CHPP field bit 6 == 1 -> single queue */ + single_queue = ((chp_dsc->chpp & 0x02) == 0x02); + kfree(chp_dsc); + } + QETH_DBF_TEXT_(setup, 2, "rc:%x", single_queue); + return single_queue; +} + +static int +qeth_determine_card_type(struct qeth_card *card) +{ + int i = 0; + + QETH_DBF_TEXT(setup, 2, "detcdtyp"); + + while (known_devices[i][4]) { + if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) && + (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) { + card->info.type = known_devices[i][4]; + if (is_1920_device(card)) { + PRINT_INFO("Priority Queueing not able " + "due to hardware limitations!\n"); + card->qdio.no_out_queues = 1; + card->qdio.default_out_queue = 0; + } else { + card->qdio.no_out_queues = known_devices[i][8]; + } + card->info.is_multicast_different = known_devices[i][9]; + return 0; + } + i++; + } + card->info.type = QETH_CARD_TYPE_UNKNOWN; + PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card)); + return -ENOENT; +} + +static int +qeth_probe_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card; + struct device *dev; + unsigned long flags; + int rc; + + QETH_DBF_TEXT(setup, 2, "probedev"); + + dev = &gdev->dev; + if (!get_device(dev)) + return -ENODEV; + + card = qeth_alloc_card(); + if (!card) { + put_device(dev); + QETH_DBF_TEXT_(setup, 2, "1err%d", -ENOMEM); + return -ENOMEM; + } + card->read.ccwdev = gdev->cdev[0]; + card->write.ccwdev = gdev->cdev[1]; + card->data.ccwdev = gdev->cdev[2]; + + if ((rc = qeth_setup_card(card))){ + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + put_device(dev); + qeth_free_card(card); + return rc; + } + gdev->dev.driver_data = card; + card->gdev = gdev; + gdev->cdev[0]->handler = qeth_irq; + gdev->cdev[1]->handler = qeth_irq; + gdev->cdev[2]->handler = qeth_irq; + + rc = qeth_create_device_attributes(dev); + if (rc) { + put_device(dev); + qeth_free_card(card); + return rc; + } + if ((rc = qeth_determine_card_type(card))){ + PRINT_WARN("%s: not a valid card type\n", __func__); + QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + put_device(dev); + qeth_free_card(card); + return rc; + } + /* insert into our internal list */ + write_lock_irqsave(&qeth_card_list.rwlock, flags); + list_add_tail(&card->list, &qeth_card_list.list); + write_unlock_irqrestore(&qeth_card_list.rwlock, flags); + return rc; +} + + +static int +qeth_get_unitaddr(struct qeth_card *card) +{ + int length; + char *prcd; + int rc; + + QETH_DBF_TEXT(setup, 2, "getunit"); + rc = read_conf_data(CARD_DDEV(card), (void **) &prcd, &length); + if (rc) { + PRINT_ERR("read_conf_data for device %s returned %i\n", + CARD_DDEV_ID(card), rc); + return rc; + } + card->info.chpid = prcd[30]; + card->info.unit_addr2 = prcd[31]; + card->info.cula = prcd[63]; + card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && + (prcd[0x11] == _ascebc['M'])); + return 0; +} + +static void +qeth_init_tokens(struct qeth_card *card) +{ + card->token.issuer_rm_w = 0x00010103UL; + card->token.cm_filter_w = 0x00010108UL; + card->token.cm_connection_w = 0x0001010aUL; + card->token.ulp_filter_w = 0x0001010bUL; + card->token.ulp_connection_w = 0x0001010dUL; +} + +static inline __u16 +raw_devno_from_bus_id(char *id) +{ + id += (strlen(id) - 4); + return (__u16) simple_strtoul(id, &id, 16); +} +/** + * setup channel + */ +static void +qeth_setup_ccw(struct qeth_channel *channel,unsigned char *iob, __u32 len) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(trace, 4, "setupccw"); + card = CARD_FROM_CDEV(channel->ccwdev); + if (channel == &card->read) + memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); + else + memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); + channel->ccw.count = len; + channel->ccw.cda = (__u32) __pa(iob); +} + +/** + * get free buffer for ccws (IDX activation, lancmds,ipassists...) + */ +static struct qeth_cmd_buffer * +__qeth_get_buffer(struct qeth_channel *channel) +{ + __u8 index; + + QETH_DBF_TEXT(trace, 6, "getbuff"); + index = channel->io_buf_no; + do { + if (channel->iob[index].state == BUF_STATE_FREE) { + channel->iob[index].state = BUF_STATE_LOCKED; + channel->io_buf_no = (channel->io_buf_no + 1) % + QETH_CMD_BUFFER_NO; + memset(channel->iob[index].data, 0, QETH_BUFSIZE); + return channel->iob + index; + } + index = (index + 1) % QETH_CMD_BUFFER_NO; + } while(index != channel->io_buf_no); + + return NULL; +} + +/** + * release command buffer + */ +static void +qeth_release_buffer(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) +{ + unsigned long flags; + + QETH_DBF_TEXT(trace, 6, "relbuff"); + spin_lock_irqsave(&channel->iob_lock, flags); + memset(iob->data, 0, QETH_BUFSIZE); + iob->state = BUF_STATE_FREE; + iob->callback = qeth_send_control_data_cb; + iob->rc = 0; + spin_unlock_irqrestore(&channel->iob_lock, flags); +} + +static struct qeth_cmd_buffer * +qeth_get_buffer(struct qeth_channel *channel) +{ + struct qeth_cmd_buffer *buffer = NULL; + unsigned long flags; + + spin_lock_irqsave(&channel->iob_lock, flags); + buffer = __qeth_get_buffer(channel); + spin_unlock_irqrestore(&channel->iob_lock, flags); + return buffer; +} + +static struct qeth_cmd_buffer * +qeth_wait_for_buffer(struct qeth_channel *channel) +{ + struct qeth_cmd_buffer *buffer; + wait_event(channel->wait_q, + ((buffer = qeth_get_buffer(channel)) != NULL)); + return buffer; +} + +static void +qeth_clear_cmd_buffers(struct qeth_channel *channel) +{ + int cnt = 0; + + for (cnt=0; cnt < QETH_CMD_BUFFER_NO; cnt++) + qeth_release_buffer(channel,&channel->iob[cnt]); + channel->buf_no = 0; + channel->io_buf_no = 0; +} + +/** + * start IDX for read and write channel + */ +static int +qeth_idx_activate_get_answer(struct qeth_channel *channel, + void (*idx_reply_cb)(struct qeth_channel *, + struct qeth_cmd_buffer *)) +{ + struct qeth_cmd_buffer *iob; + unsigned long flags; + int rc; + struct qeth_card *card; + + QETH_DBF_TEXT(setup, 2, "idxanswr"); + card = CARD_FROM_CDEV(channel->ccwdev); + iob = qeth_get_buffer(channel); + iob->callback = idx_reply_cb; + memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1)); + channel->ccw.count = QETH_BUFSIZE; + channel->ccw.cda = (__u32) __pa(iob->data); + + wait_event(card->wait_q, + atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); + QETH_DBF_TEXT(setup, 6, "noirqpnd"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start(channel->ccwdev, + &channel->ccw,(addr_t) iob, 0, 0); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) { + PRINT_ERR("qeth: Error2 in activating channel rc=%d\n",rc); + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return rc; + } + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_UP, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_UP){ + rc = -ETIME; + QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + qeth_clear_cmd_buffers(channel); + } else + rc = 0; + return rc; +} + +static int +qeth_idx_activate_channel(struct qeth_channel *channel, + void (*idx_reply_cb)(struct qeth_channel *, + struct qeth_cmd_buffer *)) +{ + struct qeth_card *card; + struct qeth_cmd_buffer *iob; + unsigned long flags; + __u16 temp; + int rc; + + card = CARD_FROM_CDEV(channel->ccwdev); + + QETH_DBF_TEXT(setup, 2, "idxactch"); + + iob = qeth_get_buffer(channel); + iob->callback = idx_reply_cb; + memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1)); + channel->ccw.count = IDX_ACTIVATE_SIZE; + channel->ccw.cda = (__u32) __pa(iob->data); + if (channel == &card->write) { + memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), + &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.trans_hdr++; + } else { + memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), + &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + } + memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_w,QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), + &card->info.func_level,sizeof(__u16)); + temp = raw_devno_from_bus_id(CARD_DDEV_ID(card)); + memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2); + temp = (card->info.cula << 8) + card->info.unit_addr2; + memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); + + wait_event(card->wait_q, + atomic_compare_and_swap(0,1,&channel->irq_pending) == 0); + QETH_DBF_TEXT(setup, 6, "noirqpnd"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start(channel->ccwdev, + &channel->ccw,(addr_t) iob, 0, 0); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) { + PRINT_ERR("qeth: Error1 in activating channel. rc=%d\n",rc); + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return rc; + } + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_ACTIVATING) { + PRINT_WARN("qeth: IDX activate timed out!\n"); + QETH_DBF_TEXT_(setup, 2, "2err%d", -ETIME); + qeth_clear_cmd_buffers(channel); + return -ETIME; + } + return qeth_idx_activate_get_answer(channel,idx_reply_cb); +} + +static int +qeth_peer_func_level(int level) +{ + if ((level & 0xff) == 8) + return (level & 0xff) + 0x400; + if (((level >> 8) & 3) == 1) + return (level & 0xff) + 0x200; + return level; +} + +static void +qeth_idx_write_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) +{ + struct qeth_card *card; + __u16 temp; + + QETH_DBF_TEXT(setup ,2, "idxwrcb"); + + if (channel->state == CH_STATE_DOWN) { + channel->state = CH_STATE_ACTIVATING; + goto out; + } + card = CARD_FROM_CDEV(channel->ccwdev); + + if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { + PRINT_ERR("IDX_ACTIVATE on write channel device %s: negative " + "reply\n", CARD_WDEV_ID(card)); + goto out; + } + memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { + PRINT_WARN("IDX_ACTIVATE on write channel device %s: " + "function level mismatch " + "(sent: 0x%x, received: 0x%x)\n", + CARD_WDEV_ID(card), card->info.func_level, temp); + goto out; + } + channel->state = CH_STATE_UP; +out: + qeth_release_buffer(channel, iob); +} + +static int +qeth_check_idx_response(unsigned char *buffer) +{ + if (!buffer) + return 0; + + QETH_DBF_HEX(control, 2, buffer, QETH_DBF_CONTROL_LEN); + if ((buffer[2] & 0xc0) == 0xc0) { + PRINT_WARN("received an IDX TERMINATE " + "with cause code 0x%02x%s\n", + buffer[4], + ((buffer[4] == 0x22) ? + " -- try another portname" : "")); + QETH_DBF_TEXT(trace, 2, "ckidxres"); + QETH_DBF_TEXT(trace, 2, " idxterm"); + QETH_DBF_TEXT_(trace, 2, " rc%d", -EIO); + return -EIO; + } + return 0; +} + +static void +qeth_idx_read_cb(struct qeth_channel *channel, struct qeth_cmd_buffer *iob) +{ + struct qeth_card *card; + __u16 temp; + + QETH_DBF_TEXT(setup , 2, "idxrdcb"); + if (channel->state == CH_STATE_DOWN) { + channel->state = CH_STATE_ACTIVATING; + goto out; + } + + card = CARD_FROM_CDEV(channel->ccwdev); + if (qeth_check_idx_response(iob->data)) { + goto out; + } + if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { + PRINT_ERR("IDX_ACTIVATE on read channel device %s: negative " + "reply\n", CARD_RDEV_ID(card)); + goto out; + } + +/** + * temporary fix for microcode bug + * to revert it,replace OR by AND + */ + if ( (!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) || + (card->info.type == QETH_CARD_TYPE_OSAE) ) + card->info.portname_required = 1; + + memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if (temp != qeth_peer_func_level(card->info.func_level)) { + PRINT_WARN("IDX_ACTIVATE on read channel device %s: function " + "level mismatch (sent: 0x%x, received: 0x%x)\n", + CARD_RDEV_ID(card), card->info.func_level, temp); + goto out; + } + memcpy(&card->token.issuer_rm_r, + QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + memcpy(&card->info.mcl_level[0], + QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); + channel->state = CH_STATE_UP; +out: + qeth_release_buffer(channel,iob); +} + +static int +qeth_issue_next_read(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(trace,5,"issnxrd"); + if (card->read.state != CH_STATE_UP) + return -EIO; + iob = qeth_get_buffer(&card->read); + if (!iob) { + PRINT_WARN("issue_next_read failed: no iob available!\n"); + return -ENOMEM; + } + qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); + wait_event(card->wait_q, + atomic_compare_and_swap(0,1,&card->read.irq_pending) == 0); + QETH_DBF_TEXT(trace, 6, "noirqpnd"); + rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, + (addr_t) iob, 0, 0); + if (rc) { + PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc); + atomic_set(&card->read.irq_pending, 0); + qeth_schedule_recovery(card); + wake_up(&card->wait_q); + } + return rc; +} + +static struct qeth_reply * +qeth_alloc_reply(struct qeth_card *card) +{ + struct qeth_reply *reply; + + reply = kmalloc(sizeof(struct qeth_reply), GFP_ATOMIC); + if (reply){ + memset(reply, 0, sizeof(struct qeth_reply)); + atomic_set(&reply->refcnt, 1); + reply->card = card; + }; + return reply; +} + +static void +qeth_get_reply(struct qeth_reply *reply) +{ + WARN_ON(atomic_read(&reply->refcnt) <= 0); + atomic_inc(&reply->refcnt); +} + +static void +qeth_put_reply(struct qeth_reply *reply) +{ + WARN_ON(atomic_read(&reply->refcnt) <= 0); + if (atomic_dec_and_test(&reply->refcnt)) + kfree(reply); +} + +static void +qeth_cmd_timeout(unsigned long data) +{ + struct qeth_reply *reply, *list_reply, *r; + unsigned long flags; + + reply = (struct qeth_reply *) data; + spin_lock_irqsave(&reply->card->lock, flags); + list_for_each_entry_safe(list_reply, r, + &reply->card->cmd_waiter_list, list) { + if (reply == list_reply){ + qeth_get_reply(reply); + list_del_init(&reply->list); + spin_unlock_irqrestore(&reply->card->lock, flags); + reply->rc = -ETIME; + reply->received = 1; + wake_up(&reply->wait_q); + qeth_put_reply(reply); + return; + } + } + spin_unlock_irqrestore(&reply->card->lock, flags); +} + +static void +qeth_reset_ip_addresses(struct qeth_card *card) +{ + QETH_DBF_TEXT(trace, 2, "rstipadd"); + + qeth_clear_ip_list(card, 0, 1); + /* this function will also schedule the SET_IP_THREAD */ + qeth_set_multicast_list(card->dev); +} + +static struct qeth_ipa_cmd * +qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) +{ + struct qeth_ipa_cmd *cmd = NULL; + + QETH_DBF_TEXT(trace,5,"chkipad"); + if (IS_IPA(iob->data)){ + cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); + if (IS_IPA_REPLY(cmd)) + return cmd; + else { + switch (cmd->hdr.command) { + case IPA_CMD_STOPLAN: + PRINT_WARN("Link failure on %s (CHPID 0x%X) - " + "there is a network problem or " + "someone pulled the cable or " + "disabled the port.\n", + QETH_CARD_IFNAME(card), + card->info.chpid); + card->lan_online = 0; + netif_carrier_off(card->dev); + return NULL; + case IPA_CMD_STARTLAN: + PRINT_INFO("Link reestablished on %s " + "(CHPID 0x%X). Scheduling " + "IP address reset.\n", + QETH_CARD_IFNAME(card), + card->info.chpid); + card->lan_online = 1; + netif_carrier_on(card->dev); + qeth_reset_ip_addresses(card); + return NULL; + case IPA_CMD_REGISTER_LOCAL_ADDR: + QETH_DBF_TEXT(trace,3, "irla"); + break; + case IPA_CMD_UNREGISTER_LOCAL_ADDR: + QETH_DBF_TEXT(trace,3, "urla"); + break; + default: + PRINT_WARN("Received data is IPA " + "but not a reply!\n"); + break; + } + } + } + return cmd; +} + +/** + * wake all waiting ipa commands + */ +static void +qeth_clear_ipacmd_list(struct qeth_card *card) +{ + struct qeth_reply *reply, *r; + unsigned long flags; + + QETH_DBF_TEXT(trace, 4, "clipalst"); + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { + qeth_get_reply(reply); + reply->rc = -EIO; + reply->received = 1; + list_del_init(&reply->list); + wake_up(&reply->wait_q); + qeth_put_reply(reply); + } + spin_unlock_irqrestore(&card->lock, flags); +} + +static void +qeth_send_control_data_cb(struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + struct qeth_card *card; + struct qeth_reply *reply, *r; + struct qeth_ipa_cmd *cmd; + unsigned long flags; + int keep_reply; + + QETH_DBF_TEXT(trace,4,"sndctlcb"); + + card = CARD_FROM_CDEV(channel->ccwdev); + if (qeth_check_idx_response(iob->data)) { + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + goto out; + } + + cmd = qeth_check_ipa_data(card, iob); + if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) + goto out; + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { + if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || + ((cmd) && (reply->seqno == cmd->hdr.seqno))) { + qeth_get_reply(reply); + list_del_init(&reply->list); + spin_unlock_irqrestore(&card->lock, flags); + keep_reply = 0; + if (reply->callback != NULL) { + if (cmd) { + reply->offset = (__u16)((char*)cmd - + (char *)iob->data); + keep_reply = reply->callback(card, + reply, + (unsigned long)cmd); + } + else + keep_reply = reply->callback(card, + reply, + (unsigned long)iob); + } + if (cmd) + reply->rc = (u16) cmd->hdr.return_code; + else if (iob->rc) + reply->rc = iob->rc; + if (keep_reply) { + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, + &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + } else { + reply->received = 1; + wake_up(&reply->wait_q); + } + qeth_put_reply(reply); + goto out; + } + } + spin_unlock_irqrestore(&card->lock, flags); +out: + memcpy(&card->seqno.pdu_hdr_ack, + QETH_PDU_HEADER_SEQ_NO(iob->data), + QETH_SEQ_NO_LENGTH); + qeth_release_buffer(channel,iob); +} + +static int +qeth_send_control_data(struct qeth_card *card, int len, + struct qeth_cmd_buffer *iob, + int (*reply_cb) + (struct qeth_card *, struct qeth_reply*, unsigned long), + void *reply_param) + +{ + int rc; + unsigned long flags; + struct qeth_reply *reply; + struct timer_list timer; + + QETH_DBF_TEXT(trace, 2, "sendctl"); + + qeth_setup_ccw(&card->write,iob->data,len); + + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), + &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.trans_hdr++; + + memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), + &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.pdu_hdr++; + memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), + &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); + iob->callback = qeth_release_buffer; + + reply = qeth_alloc_reply(card); + if (!reply) { + PRINT_WARN("Could no alloc qeth_reply!\n"); + return -ENOMEM; + } + reply->callback = reply_cb; + reply->param = reply_param; + if (card->state == CARD_STATE_DOWN) + reply->seqno = QETH_IDX_COMMAND_SEQNO; + else + reply->seqno = card->seqno.ipa++; + init_timer(&timer); + timer.function = qeth_cmd_timeout; + timer.data = (unsigned long) reply; + if (IS_IPA(iob->data)) + timer.expires = jiffies + QETH_IPA_TIMEOUT; + else + timer.expires = jiffies + QETH_TIMEOUT; + init_waitqueue_head(&reply->wait_q); + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); + wait_event(card->wait_q, + atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); + QETH_DBF_TEXT(trace, 6, "noirqpnd"); + spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); + rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, + (addr_t) iob, 0, 0); + spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); + if (rc){ + PRINT_WARN("qeth_send_control_data: " + "ccw_device_start rc = %i\n", rc); + QETH_DBF_TEXT_(trace, 2, " err%d", rc); + spin_lock_irqsave(&card->lock, flags); + list_del_init(&reply->list); + qeth_put_reply(reply); + spin_unlock_irqrestore(&card->lock, flags); + qeth_release_buffer(iob->channel, iob); + atomic_set(&card->write.irq_pending, 0); + wake_up(&card->wait_q); + return rc; + } + add_timer(&timer); + wait_event(reply->wait_q, reply->received); + del_timer_sync(&timer); + rc = reply->rc; + qeth_put_reply(reply); + return rc; +} + +static int +qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + int (*reply_cb) + (struct qeth_card *,struct qeth_reply*, unsigned long), + void *reply_param) +{ + int rc; + char prot_type; + + QETH_DBF_TEXT(trace,4,"sendipa"); + + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + + if (card->options.layer2) + prot_type = QETH_PROT_LAYER2; + else + prot_type = QETH_PROT_TCPIP; + + memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + + rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, + reply_cb, reply_param); + return rc; +} + + +static int +qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup, 2, "cmenblcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.cm_filter_r, + QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); + return 0; +} + +static int +qeth_cm_enable(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup,2,"cmenable"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); + memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), + &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); + + rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob, + qeth_cm_enable_cb, NULL); + return rc; +} + +static int +qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup, 2, "cmsetpcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.cm_connection_r, + QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), + QETH_MPC_TOKEN_LENGTH); + QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); + return 0; +} + +static int +qeth_cm_setup(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup,2,"cmsetup"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); + memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), + &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), + &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), + &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); + rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, + qeth_cm_setup_cb, NULL); + return rc; + +} + +static int +qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + + __u16 mtu, framesize; + __u16 len; + __u8 link_type; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup, 2, "ulpenacb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.ulp_filter_r, + QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + if (qeth_get_mtu_out_of_mpc(card->info.type)) { + memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); + mtu = qeth_get_mtu_outof_framesize(framesize); + if (!mtu) { + iob->rc = -EINVAL; + QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); + return 0; + } + card->info.max_mtu = mtu; + card->info.initial_mtu = mtu; + card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE; + } else { + card->info.initial_mtu = qeth_get_initial_mtu_for_card(card); + card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type); + card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; + } + + memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); + if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { + memcpy(&link_type, + QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); + card->info.link_type = link_type; + } else + card->info.link_type = 0; + QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); + return 0; +} + +static int +qeth_ulp_enable(struct qeth_card *card) +{ + int rc; + char prot_type; + struct qeth_cmd_buffer *iob; + + /*FIXME: trace view callbacks*/ + QETH_DBF_TEXT(setup,2,"ulpenabl"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); + + *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = + (__u8) card->info.portno; + if (card->options.layer2) + prot_type = QETH_PROT_LAYER2; + else + prot_type = QETH_PROT_TCPIP; + + memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data),&prot_type,1); + memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), + &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data), + card->info.portname, 9); + rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, + qeth_ulp_enable_cb, NULL); + return rc; + +} + +static inline __u16 +__raw_devno_from_bus_id(char *id) +{ + id += (strlen(id) - 4); + return (__u16) simple_strtoul(id, &id, 16); +} + +static int +qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup, 2, "ulpstpcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.ulp_connection_r, + QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + QETH_DBF_TEXT_(setup, 2, " rc%d", iob->rc); + return 0; +} + +static int +qeth_ulp_setup(struct qeth_card *card) +{ + int rc; + __u16 temp; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup,2,"ulpsetup"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); + + memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), + &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), + &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); + + temp = __raw_devno_from_bus_id(CARD_DDEV_ID(card)); + memcpy(QETH_ULP_SETUP_CUA(iob->data), &temp, 2); + temp = (card->info.cula << 8) + card->info.unit_addr2; + memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); + rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, + qeth_ulp_setup_cb, NULL); + return rc; +} + +static inline int +qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf, + unsigned int qdio_error, + unsigned int siga_error) +{ + int rc = 0; + + if (qdio_error || siga_error) { + QETH_DBF_TEXT(trace, 2, "qdinerr"); + QETH_DBF_TEXT(qerr, 2, "qdinerr"); + QETH_DBF_TEXT_(qerr, 2, " F15=%02X", + buf->buffer->element[15].flags & 0xff); + QETH_DBF_TEXT_(qerr, 2, " F14=%02X", + buf->buffer->element[14].flags & 0xff); + QETH_DBF_TEXT_(qerr, 2, " qerr=%X", qdio_error); + QETH_DBF_TEXT_(qerr, 2, " serr=%X", siga_error); + rc = 1; + } + return rc; +} + +static inline struct sk_buff * +qeth_get_skb(unsigned int length) +{ + struct sk_buff* skb; +#ifdef CONFIG_QETH_VLAN + if ((skb = dev_alloc_skb(length + VLAN_HLEN))) + skb_reserve(skb, VLAN_HLEN); +#else + skb = dev_alloc_skb(length); +#endif + return skb; +} + +static inline struct sk_buff * +qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, + struct qdio_buffer_element **__element, int *__offset, + struct qeth_hdr **hdr) +{ + struct qdio_buffer_element *element = *__element; + int offset = *__offset; + struct sk_buff *skb = NULL; + int skb_len; + void *data_ptr; + int data_len; + + QETH_DBF_TEXT(trace,6,"nextskb"); + /* qeth_hdr must not cross element boundaries */ + if (element->length < offset + sizeof(struct qeth_hdr)){ + if (qeth_is_last_sbale(element)) + return NULL; + element++; + offset = 0; + if (element->length < sizeof(struct qeth_hdr)) + return NULL; + } + *hdr = element->addr + offset; + + offset += sizeof(struct qeth_hdr); + if (card->options.layer2) + skb_len = (*hdr)->hdr.l2.pkt_length; + else + skb_len = (*hdr)->hdr.l3.length; + + if (!skb_len) + return NULL; + if (card->options.fake_ll){ + if (!(skb = qeth_get_skb(skb_len + QETH_FAKE_LL_LEN))) + goto no_mem; + skb_pull(skb, QETH_FAKE_LL_LEN); + } else if (!(skb = qeth_get_skb(skb_len))) + goto no_mem; + data_ptr = element->addr + offset; + while (skb_len) { + data_len = min(skb_len, (int)(element->length - offset)); + if (data_len) + memcpy(skb_put(skb, data_len), data_ptr, data_len); + skb_len -= data_len; + if (skb_len){ + if (qeth_is_last_sbale(element)){ + QETH_DBF_TEXT(trace,4,"unexeob"); + QETH_DBF_TEXT_(trace,4,"%s",CARD_BUS_ID(card)); + QETH_DBF_TEXT(qerr,2,"unexeob"); + QETH_DBF_TEXT_(qerr,2,"%s",CARD_BUS_ID(card)); + QETH_DBF_HEX(misc,4,buffer,sizeof(*buffer)); + dev_kfree_skb_any(skb); + card->stats.rx_errors++; + return NULL; + } + element++; + offset = 0; + data_ptr = element->addr; + } else { + offset += data_len; + } + } + *__element = element; + *__offset = offset; + return skb; +no_mem: + if (net_ratelimit()){ + PRINT_WARN("No memory for packet received on %s.\n", + QETH_CARD_IFNAME(card)); + QETH_DBF_TEXT(trace,2,"noskbmem"); + QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card)); + } + card->stats.rx_dropped++; + return NULL; +} + +static inline unsigned short +qeth_type_trans(struct sk_buff *skb, struct net_device *dev) +{ + struct qeth_card *card; + struct ethhdr *eth; + + QETH_DBF_TEXT(trace,6,"typtrans"); + + card = (struct qeth_card *)dev->priv; +#ifdef CONFIG_TR + if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || + (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) + return tr_type_trans(skb,dev); +#endif /* CONFIG_TR */ + skb->mac.raw = skb->data; + skb_pull(skb, ETH_HLEN ); + eth = eth_hdr(skb); + + if (*eth->h_dest & 1) { + if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0) + skb->pkt_type = PACKET_BROADCAST; + else + skb->pkt_type = PACKET_MULTICAST; + } else if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) + skb->pkt_type = PACKET_OTHERHOST; + + if (ntohs(eth->h_proto) >= 1536) + return eth->h_proto; + if (*(unsigned short *) (skb->data) == 0xFFFF) + return htons(ETH_P_802_3); + return htons(ETH_P_802_2); +} + +static inline void +qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) +{ + struct ethhdr *fake_hdr; + struct iphdr *ip_hdr; + + QETH_DBF_TEXT(trace,5,"skbfake"); + skb->mac.raw = skb->data - QETH_FAKE_LL_LEN; + /* this is a fake ethernet header */ + fake_hdr = (struct ethhdr *) skb->mac.raw; + + /* the destination MAC address */ + switch (skb->pkt_type){ + case PACKET_MULTICAST: + switch (skb->protocol){ +#ifdef CONFIG_QETH_IPV6 + case __constant_htons(ETH_P_IPV6): + ndisc_mc_map((struct in6_addr *) + skb->data + QETH_FAKE_LL_V6_ADDR_POS, + fake_hdr->h_dest, card->dev, 0); + break; +#endif /* CONFIG_QETH_IPV6 */ + case __constant_htons(ETH_P_IP): + ip_hdr = (struct iphdr *)skb->data; + if (card->dev->type == ARPHRD_IEEE802_TR) + ip_tr_mc_map(ip_hdr->daddr, fake_hdr->h_dest); + else + ip_eth_mc_map(ip_hdr->daddr, fake_hdr->h_dest); + break; + default: + memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN); + } + break; + case PACKET_BROADCAST: + memset(fake_hdr->h_dest, 0xff, ETH_ALEN); + break; + default: + memcpy(fake_hdr->h_dest, card->dev->dev_addr, ETH_ALEN); + } + /* the source MAC address */ + if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) + memcpy(fake_hdr->h_source, &hdr->hdr.l3.dest_addr[2], ETH_ALEN); + else + memset(fake_hdr->h_source, 0, ETH_ALEN); + /* the protocol */ + fake_hdr->h_proto = skb->protocol; +} + +static inline void +qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) +{ +#ifdef CONFIG_QETH_VLAN + u16 *vlan_tag; + + if (hdr->hdr.l3.ext_flags & + (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { + vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN); + *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? + hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); + *(vlan_tag + 1) = skb->protocol; + skb->protocol = __constant_htons(ETH_P_8021Q); + } +#endif /* CONFIG_QETH_VLAN */ +} + +static inline __u16 +qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) +{ + unsigned short vlan_id = 0; +#ifdef CONFIG_QETH_VLAN + struct vlan_hdr *vhdr; +#endif + + skb->pkt_type = PACKET_HOST; + skb->protocol = qeth_type_trans(skb, skb->dev); + if (card->options.checksum_type == NO_CHECKSUMMING) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; +#ifdef CONFIG_QETH_VLAN + if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) { + vhdr = (struct vlan_hdr *) skb->data; + skb->protocol = + __constant_htons(vhdr->h_vlan_encapsulated_proto); + vlan_id = hdr->hdr.l2.vlan_id; + skb_pull(skb, VLAN_HLEN); + } +#endif + return vlan_id; +} + +static inline void +qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) +{ +#ifdef CONFIG_QETH_IPV6 + if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { + skb->pkt_type = PACKET_HOST; + skb->protocol = qeth_type_trans(skb, card->dev); + return; + } +#endif /* CONFIG_QETH_IPV6 */ + skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : + ETH_P_IP); + switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK){ + case QETH_CAST_UNICAST: + skb->pkt_type = PACKET_HOST; + break; + case QETH_CAST_MULTICAST: + skb->pkt_type = PACKET_MULTICAST; + card->stats.multicast++; + break; + case QETH_CAST_BROADCAST: + skb->pkt_type = PACKET_BROADCAST; + card->stats.multicast++; + break; + case QETH_CAST_ANYCAST: + case QETH_CAST_NOCAST: + default: + skb->pkt_type = PACKET_HOST; + } + qeth_rebuild_skb_vlan(card, skb, hdr); + if (card->options.fake_ll) + qeth_rebuild_skb_fake_ll(card, skb, hdr); + else + skb->mac.raw = skb->data; + skb->ip_summed = card->options.checksum_type; + if (card->options.checksum_type == HW_CHECKSUMMING){ + if ( (hdr->hdr.l3.ext_flags & + (QETH_HDR_EXT_CSUM_HDR_REQ | + QETH_HDR_EXT_CSUM_TRANSP_REQ)) == + (QETH_HDR_EXT_CSUM_HDR_REQ | + QETH_HDR_EXT_CSUM_TRANSP_REQ) ) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = SW_CHECKSUMMING; + } +} + +static inline void +qeth_process_inbound_buffer(struct qeth_card *card, + struct qeth_qdio_buffer *buf, int index) +{ + struct qdio_buffer_element *element; + struct sk_buff *skb; + struct qeth_hdr *hdr; + int offset; + int rxrc; + __u16 vlan_tag = 0; + + /* get first element of current buffer */ + element = (struct qdio_buffer_element *)&buf->buffer->element[0]; + offset = 0; +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.bufs_rec++; +#endif + while((skb = qeth_get_next_skb(card, buf->buffer, &element, + &offset, &hdr))) { + skb->dev = card->dev; + if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) + vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); + else + qeth_rebuild_skb(card, skb, hdr); + /* is device UP ? */ + if (!(card->dev->flags & IFF_UP)){ + dev_kfree_skb_any(skb); + continue; + } +#ifdef CONFIG_QETH_VLAN + if (vlan_tag) + vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag); + else +#endif + rxrc = netif_rx(skb); + card->dev->last_rx = jiffies; + card->stats.rx_packets++; + card->stats.rx_bytes += skb->len; + } +} + +static inline struct qeth_buffer_pool_entry * +qeth_get_buffer_pool_entry(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry; + + QETH_DBF_TEXT(trace, 6, "gtbfplen"); + if (!list_empty(&card->qdio.in_buf_pool.entry_list)) { + entry = list_entry(card->qdio.in_buf_pool.entry_list.next, + struct qeth_buffer_pool_entry, list); + list_del_init(&entry->list); + return entry; + } + return NULL; +} + +static inline void +qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) +{ + struct qeth_buffer_pool_entry *pool_entry; + int i; + + pool_entry = qeth_get_buffer_pool_entry(card); + /* + * since the buffer is accessed only from the input_tasklet + * there shouldn't be a need to synchronize; also, since we use + * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off + * buffers + */ + BUG_ON(!pool_entry); + + buf->pool_entry = pool_entry; + for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){ + buf->buffer->element[i].length = PAGE_SIZE; + buf->buffer->element[i].addr = pool_entry->elements[i]; + if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) + buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY; + else + buf->buffer->element[i].flags = 0; + } + buf->state = QETH_QDIO_BUF_EMPTY; +} + +static inline void +qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf) +{ + int i; + struct sk_buff *skb; + + /* is PCI flag set on buffer? */ + if (buf->buffer->element[0].flags & 0x40) + atomic_dec(&queue->set_pci_flags_count); + + while ((skb = skb_dequeue(&buf->skb_list))){ + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); + } + qeth_eddp_buf_release_contexts(buf); + for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i){ + buf->buffer->element[i].length = 0; + buf->buffer->element[i].addr = NULL; + buf->buffer->element[i].flags = 0; + } + buf->next_element_to_fill = 0; + atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); +} + +static inline void +qeth_queue_input_buffer(struct qeth_card *card, int index) +{ + struct qeth_qdio_q *queue = card->qdio.in_q; + int count; + int i; + int rc; + + QETH_DBF_TEXT(trace,6,"queinbuf"); + count = (index < queue->next_buf_to_init)? + card->qdio.in_buf_pool.buf_count - + (queue->next_buf_to_init - index) : + card->qdio.in_buf_pool.buf_count - + (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); + /* only requeue at a certain threshold to avoid SIGAs */ + if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){ + for (i = queue->next_buf_to_init; + i < queue->next_buf_to_init + count; ++i) + qeth_init_input_buffer(card, + &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]); + /* + * according to old code it should be avoided to requeue all + * 128 buffers in order to benefit from PCI avoidance. + * this function keeps at least one buffer (the buffer at + * 'index') un-requeued -> this buffer is the first buffer that + * will be requeued the next time + */ +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.inbound_do_qdio_cnt++; + card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros(); +#endif + rc = do_QDIO(CARD_DDEV(card), + QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, + 0, queue->next_buf_to_init, count, NULL); +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.inbound_do_qdio_time += qeth_get_micros() - + card->perf_stats.inbound_do_qdio_start_time; +#endif + if (rc){ + PRINT_WARN("qeth_queue_input_buffer's do_QDIO " + "return %i (device %s).\n", + rc, CARD_DDEV_ID(card)); + QETH_DBF_TEXT(trace,2,"qinberr"); + QETH_DBF_TEXT_(trace,2,"%s",CARD_BUS_ID(card)); + } + queue->next_buf_to_init = (queue->next_buf_to_init + count) % + QDIO_MAX_BUFFERS_PER_Q; + } +} + +static inline void +qeth_put_buffer_pool_entry(struct qeth_card *card, + struct qeth_buffer_pool_entry *entry) +{ + QETH_DBF_TEXT(trace, 6, "ptbfplen"); + list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); +} + +static void +qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status, + unsigned int qdio_err, unsigned int siga_err, + unsigned int queue, int first_element, int count, + unsigned long card_ptr) +{ + struct net_device *net_dev; + struct qeth_card *card; + struct qeth_qdio_buffer *buffer; + int index; + int i; + + QETH_DBF_TEXT(trace, 6, "qdinput"); + card = (struct qeth_card *) card_ptr; + net_dev = card->dev; +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.inbound_cnt++; + card->perf_stats.inbound_start_time = qeth_get_micros(); +#endif + if (status & QDIO_STATUS_LOOK_FOR_ERROR) { + if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){ + QETH_DBF_TEXT(trace, 1,"qdinchk"); + QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); + QETH_DBF_TEXT_(trace,1,"%04X%04X",first_element,count); + QETH_DBF_TEXT_(trace,1,"%04X%04X", queue, status); + qeth_schedule_recovery(card); + return; + } + } + for (i = first_element; i < (first_element + count); ++i) { + index = i % QDIO_MAX_BUFFERS_PER_Q; + buffer = &card->qdio.in_q->bufs[index]; + if (!((status == QDIO_STATUS_LOOK_FOR_ERROR) && + qeth_check_for_inbound_error(buffer, qdio_err, siga_err))) + qeth_process_inbound_buffer(card, buffer, index); + /* clear buffer and give back to hardware */ + qeth_put_buffer_pool_entry(card, buffer->pool_entry); + qeth_queue_input_buffer(card, index); + } +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.inbound_time += qeth_get_micros() - + card->perf_stats.inbound_start_time; +#endif +} + +static inline int +qeth_handle_send_error(struct qeth_card *card, + struct qeth_qdio_out_buffer *buffer, + int qdio_err, int siga_err) +{ + int sbalf15 = buffer->buffer->element[15].flags & 0xff; + int cc = siga_err & 3; + + QETH_DBF_TEXT(trace, 6, "hdsnderr"); + switch (cc) { + case 0: + if (qdio_err){ + QETH_DBF_TEXT(trace, 1,"lnkfail"); + QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); + QETH_DBF_TEXT_(trace,1,"%04x %02x", + (u16)qdio_err, (u8)sbalf15); + return QETH_SEND_ERROR_LINK_FAILURE; + } + return QETH_SEND_ERROR_NONE; + case 2: + if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) { + QETH_DBF_TEXT(trace, 1, "SIGAcc2B"); + QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); + return QETH_SEND_ERROR_KICK_IT; + } + if ((sbalf15 >= 15) && (sbalf15 <= 31)) + return QETH_SEND_ERROR_RETRY; + return QETH_SEND_ERROR_LINK_FAILURE; + /* look at qdio_error and sbalf 15 */ + case 1: + QETH_DBF_TEXT(trace, 1, "SIGAcc1"); + QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); + return QETH_SEND_ERROR_LINK_FAILURE; + case 3: + QETH_DBF_TEXT(trace, 1, "SIGAcc3"); + QETH_DBF_TEXT_(trace,1,"%s",CARD_BUS_ID(card)); + return QETH_SEND_ERROR_KICK_IT; + } + return QETH_SEND_ERROR_LINK_FAILURE; +} + +void +qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, + int index, int count) +{ + struct qeth_qdio_out_buffer *buf; + int rc; + int i; + + QETH_DBF_TEXT(trace, 6, "flushbuf"); + + for (i = index; i < index + count; ++i) { + buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; + buf->buffer->element[buf->next_element_to_fill - 1].flags |= + SBAL_FLAGS_LAST_ENTRY; + + if (queue->card->info.type == QETH_CARD_TYPE_IQD) + continue; + + if (!queue->do_pack){ + if ((atomic_read(&queue->used_buffers) >= + (QETH_HIGH_WATERMARK_PACK - + QETH_WATERMARK_PACK_FUZZ)) && + !atomic_read(&queue->set_pci_flags_count)){ + /* it's likely that we'll go to packing + * mode soon */ + atomic_inc(&queue->set_pci_flags_count); + buf->buffer->element[0].flags |= 0x40; + } + } else { + if (!atomic_read(&queue->set_pci_flags_count)){ + /* + * there's no outstanding PCI any more, so we + * have to request a PCI to be sure the the PCI + * will wake at some time in the future then we + * can flush packed buffers that might still be + * hanging around, which can happen if no + * further send was requested by the stack + */ + atomic_inc(&queue->set_pci_flags_count); + buf->buffer->element[0].flags |= 0x40; + } + } + } + + queue->card->dev->trans_start = jiffies; +#ifdef CONFIG_QETH_PERF_STATS + queue->card->perf_stats.outbound_do_qdio_cnt++; + queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros(); +#endif + if (under_int) + rc = do_QDIO(CARD_DDEV(queue->card), + QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT, + queue->queue_no, index, count, NULL); + else + rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT, + queue->queue_no, index, count, NULL); +#ifdef CONFIG_QETH_PERF_STATS + queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - + queue->card->perf_stats.outbound_do_qdio_start_time; +#endif + if (rc){ + QETH_DBF_SPRINTF(trace, 0, "qeth_flush_buffers: do_QDIO " + "returned error (%i) on device %s.", + rc, CARD_DDEV_ID(queue->card)); + QETH_DBF_TEXT(trace, 2, "flushbuf"); + QETH_DBF_TEXT_(trace, 2, " err%d", rc); + queue->card->stats.tx_errors += count; + /* this must not happen under normal circumstances. if it + * happens something is really wrong -> recover */ + qeth_schedule_recovery(queue->card); + return; + } + atomic_add(count, &queue->used_buffers); +#ifdef CONFIG_QETH_PERF_STATS + queue->card->perf_stats.bufs_sent += count; +#endif +} + +/* + * Switched to packing state if the number of used buffers on a queue + * reaches a certain limit. + */ +static inline void +qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) +{ + if (!queue->do_pack) { + if (atomic_read(&queue->used_buffers) + >= QETH_HIGH_WATERMARK_PACK){ + /* switch non-PACKING -> PACKING */ + QETH_DBF_TEXT(trace, 6, "np->pack"); +#ifdef CONFIG_QETH_PERF_STATS + queue->card->perf_stats.sc_dp_p++; +#endif + queue->do_pack = 1; + } + } +} + +/* + * Switches from packing to non-packing mode. If there is a packing + * buffer on the queue this buffer will be prepared to be flushed. + * In that case 1 is returned to inform the caller. If no buffer + * has to be flushed, zero is returned. + */ +static inline int +qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) +{ + struct qeth_qdio_out_buffer *buffer; + int flush_count = 0; + + if (queue->do_pack) { + if (atomic_read(&queue->used_buffers) + <= QETH_LOW_WATERMARK_PACK) { + /* switch PACKING -> non-PACKING */ + QETH_DBF_TEXT(trace, 6, "pack->np"); +#ifdef CONFIG_QETH_PERF_STATS + queue->card->perf_stats.sc_p_dp++; +#endif + queue->do_pack = 0; + /* flush packing buffers */ + buffer = &queue->bufs[queue->next_buf_to_fill]; + if ((atomic_read(&buffer->state) == + QETH_QDIO_BUF_EMPTY) && + (buffer->next_element_to_fill > 0)) { + atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED); + flush_count++; + queue->next_buf_to_fill = + (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + } + } + } + return flush_count; +} + +/* + * Called to flush a packing buffer if no more pci flags are on the queue. + * Checks if there is a packing buffer and prepares it to be flushed. + * In that case returns 1, otherwise zero. + */ +static inline int +qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) +{ + struct qeth_qdio_out_buffer *buffer; + + buffer = &queue->bufs[queue->next_buf_to_fill]; + if((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && + (buffer->next_element_to_fill > 0)){ + /* it's a packing buffer */ + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = + (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; + return 1; + } + return 0; +} + +static inline void +qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) +{ + int index; + int flush_cnt = 0; + int q_was_packing = 0; + + /* + * check if weed have to switch to non-packing mode or if + * we have to get a pci flag out on the queue + */ + if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || + !atomic_read(&queue->set_pci_flags_count)){ + if (atomic_swap(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == + QETH_OUT_Q_UNLOCKED) { + /* + * If we get in here, there was no action in + * do_send_packet. So, we check if there is a + * packing buffer to be flushed here. + */ + netif_stop_queue(queue->card->dev); + index = queue->next_buf_to_fill; + q_was_packing = queue->do_pack; + flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); + if (!flush_cnt && + !atomic_read(&queue->set_pci_flags_count)) + flush_cnt += + qeth_flush_buffers_on_no_pci(queue); +#ifdef CONFIG_QETH_PERF_STATS + if (q_was_packing) + queue->card->perf_stats.bufs_sent_pack += + flush_cnt; +#endif + if (flush_cnt) + qeth_flush_buffers(queue, 1, index, flush_cnt); + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + } + } +} + +static void +qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status, + unsigned int qdio_error, unsigned int siga_error, + unsigned int __queue, int first_element, int count, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *) card_ptr; + struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; + struct qeth_qdio_out_buffer *buffer; + int i; + + QETH_DBF_TEXT(trace, 6, "qdouhdl"); + if (status & QDIO_STATUS_LOOK_FOR_ERROR) { + if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){ + QETH_DBF_SPRINTF(trace, 2, "On device %s: " + "received active check " + "condition (0x%08x).", + CARD_BUS_ID(card), status); + QETH_DBF_TEXT(trace, 2, "chkcond"); + QETH_DBF_TEXT_(trace, 2, "%08x", status); + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + return; + } + } +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.outbound_handler_cnt++; + card->perf_stats.outbound_handler_start_time = qeth_get_micros(); +#endif + for(i = first_element; i < (first_element + count); ++i){ + buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; + /*we only handle the KICK_IT error by doing a recovery */ + if (qeth_handle_send_error(card, buffer, qdio_error, siga_error) + == QETH_SEND_ERROR_KICK_IT){ + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + return; + } + qeth_clear_output_buffer(queue, buffer); + } + atomic_sub(count, &queue->used_buffers); + /* check if we need to do something on this outbound queue */ + if (card->info.type != QETH_CARD_TYPE_IQD) + qeth_check_outbound_queue(queue); + + netif_wake_queue(queue->card->dev); +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.outbound_handler_time += qeth_get_micros() - + card->perf_stats.outbound_handler_start_time; +#endif +} + +static void +qeth_create_qib_param_field(struct qeth_card *card, char *param_field) +{ + + param_field[0] = _ascebc['P']; + param_field[1] = _ascebc['C']; + param_field[2] = _ascebc['I']; + param_field[3] = _ascebc['T']; + *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card); + *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card); + *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card); +} + +static void +qeth_create_qib_param_field_blkt(struct qeth_card *card, char *param_field) +{ + param_field[16] = _ascebc['B']; + param_field[17] = _ascebc['L']; + param_field[18] = _ascebc['K']; + param_field[19] = _ascebc['T']; + *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total; + *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet; + *((unsigned int *) (¶m_field[28])) = card->info.blkt.inter_packet_jumbo; +} + +static void +qeth_initialize_working_pool_list(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry; + + QETH_DBF_TEXT(trace,5,"inwrklst"); + + list_for_each_entry(entry, + &card->qdio.init_pool.entry_list, init_list) { + qeth_put_buffer_pool_entry(card,entry); + } +} + +static void +qeth_clear_working_pool_list(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry, *tmp; + + QETH_DBF_TEXT(trace,5,"clwrklst"); + list_for_each_entry_safe(pool_entry, tmp, + &card->qdio.in_buf_pool.entry_list, list){ + list_del(&pool_entry->list); + } +} + +static void +qeth_free_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry, *tmp; + int i=0; + QETH_DBF_TEXT(trace,5,"freepool"); + list_for_each_entry_safe(pool_entry, tmp, + &card->qdio.init_pool.entry_list, init_list){ + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) + free_page((unsigned long)pool_entry->elements[i]); + list_del(&pool_entry->init_list); + kfree(pool_entry); + } +} + +static int +qeth_alloc_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry; + void *ptr; + int i, j; + + QETH_DBF_TEXT(trace,5,"alocpool"); + for (i = 0; i < card->qdio.init_pool.buf_count; ++i){ + pool_entry = kmalloc(sizeof(*pool_entry), GFP_KERNEL); + if (!pool_entry){ + qeth_free_buffer_pool(card); + return -ENOMEM; + } + for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ + ptr = (void *) __get_free_page(GFP_KERNEL); + if (!ptr) { + while (j > 0) + free_page((unsigned long) + pool_entry->elements[--j]); + kfree(pool_entry); + qeth_free_buffer_pool(card); + return -ENOMEM; + } + pool_entry->elements[j] = ptr; + } + list_add(&pool_entry->init_list, + &card->qdio.init_pool.entry_list); + } + return 0; +} + +int +qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) +{ + QETH_DBF_TEXT(trace, 2, "realcbp"); + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ + qeth_clear_working_pool_list(card); + qeth_free_buffer_pool(card); + card->qdio.in_buf_pool.buf_count = bufcnt; + card->qdio.init_pool.buf_count = bufcnt; + return qeth_alloc_buffer_pool(card); +} + +static int +qeth_alloc_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + QETH_DBF_TEXT(setup, 2, "allcqdbf"); + + if (card->qdio.state == QETH_QDIO_ALLOCATED) + return 0; + + card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); + if (!card->qdio.in_q) + return - ENOMEM; + QETH_DBF_TEXT(setup, 2, "inq"); + QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *)); + memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); + /* give inbound qeth_qdio_buffers their qdio_buffers */ + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + card->qdio.in_q->bufs[i].buffer = + &card->qdio.in_q->qdio_bufs[i]; + /* inbound buffer pool */ + if (qeth_alloc_buffer_pool(card)){ + kfree(card->qdio.in_q); + return -ENOMEM; + } + /* outbound */ + card->qdio.out_qs = + kmalloc(card->qdio.no_out_queues * + sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); + if (!card->qdio.out_qs){ + qeth_free_buffer_pool(card); + return -ENOMEM; + } + for (i = 0; i < card->qdio.no_out_queues; ++i){ + card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), + GFP_KERNEL); + if (!card->qdio.out_qs[i]){ + while (i > 0) + kfree(card->qdio.out_qs[--i]); + kfree(card->qdio.out_qs); + return -ENOMEM; + } + QETH_DBF_TEXT_(setup, 2, "outq %i", i); + QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *)); + memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q)); + card->qdio.out_qs[i]->queue_no = i; + /* give outbound qeth_qdio_buffers their qdio_buffers */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){ + card->qdio.out_qs[i]->bufs[j].buffer = + &card->qdio.out_qs[i]->qdio_bufs[j]; + skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. + skb_list); + INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list); + } + } + card->qdio.state = QETH_QDIO_ALLOCATED; + return 0; +} + +static void +qeth_free_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + QETH_DBF_TEXT(trace, 2, "freeqdbf"); + if (card->qdio.state == QETH_QDIO_UNINITIALIZED) + return; + kfree(card->qdio.in_q); + /* inbound buffer pool */ + qeth_free_buffer_pool(card); + /* free outbound qdio_qs */ + for (i = 0; i < card->qdio.no_out_queues; ++i){ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) + qeth_clear_output_buffer(card->qdio.out_qs[i], + &card->qdio.out_qs[i]->bufs[j]); + kfree(card->qdio.out_qs[i]); + } + kfree(card->qdio.out_qs); + card->qdio.state = QETH_QDIO_UNINITIALIZED; +} + +static void +qeth_clear_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + QETH_DBF_TEXT(trace, 2, "clearqdbf"); + /* clear outbound buffers to free skbs */ + for (i = 0; i < card->qdio.no_out_queues; ++i) + if (card->qdio.out_qs[i]){ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) + qeth_clear_output_buffer(card->qdio.out_qs[i], + &card->qdio.out_qs[i]->bufs[j]); + } +} + +static void +qeth_init_qdio_info(struct qeth_card *card) +{ + QETH_DBF_TEXT(setup, 4, "intqdinf"); + card->qdio.state = QETH_QDIO_UNINITIALIZED; + /* inbound */ + card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; + card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; + INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); + INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); + /* outbound */ + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; +} + +static int +qeth_init_qdio_queues(struct qeth_card *card) +{ + int i, j; + int rc; + + QETH_DBF_TEXT(setup, 2, "initqdqs"); + + /* inbound queue */ + memset(card->qdio.in_q->qdio_bufs, 0, + QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); + qeth_initialize_working_pool_list(card); + /*give only as many buffers to hardware as we have buffer pool entries*/ + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) + qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + card->qdio.in_q->next_buf_to_init = card->qdio.in_buf_pool.buf_count - 1; + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, + card->qdio.in_buf_pool.buf_count - 1, NULL); + if (rc) { + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + return rc; + } + rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0); + if (rc) { + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + return rc; + } + /* outbound queue */ + for (i = 0; i < card->qdio.no_out_queues; ++i){ + memset(card->qdio.out_qs[i]->qdio_bufs, 0, + QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j){ + qeth_clear_output_buffer(card->qdio.out_qs[i], + &card->qdio.out_qs[i]->bufs[j]); + } + card->qdio.out_qs[i]->card = card; + card->qdio.out_qs[i]->next_buf_to_fill = 0; + card->qdio.out_qs[i]->do_pack = 0; + atomic_set(&card->qdio.out_qs[i]->used_buffers,0); + atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); + atomic_set(&card->qdio.out_qs[i]->state, + QETH_OUT_Q_UNLOCKED); + } + return 0; +} + +static int +qeth_qdio_establish(struct qeth_card *card) +{ + struct qdio_initialize init_data; + char *qib_param_field; + struct qdio_buffer **in_sbal_ptrs; + struct qdio_buffer **out_sbal_ptrs; + int i, j, k; + int rc; + + QETH_DBF_TEXT(setup, 2, "qdioest"); + + qib_param_field = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char), + GFP_KERNEL); + if (!qib_param_field) + return -ENOMEM; + + memset(qib_param_field, 0, QDIO_MAX_BUFFERS_PER_Q * sizeof(char)); + + qeth_create_qib_param_field(card, qib_param_field); + qeth_create_qib_param_field_blkt(card, qib_param_field); + + in_sbal_ptrs = kmalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(void *), + GFP_KERNEL); + if (!in_sbal_ptrs) { + kfree(qib_param_field); + return -ENOMEM; + } + for(i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + in_sbal_ptrs[i] = (struct qdio_buffer *) + virt_to_phys(card->qdio.in_q->bufs[i].buffer); + + out_sbal_ptrs = + kmalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q * + sizeof(void *), GFP_KERNEL); + if (!out_sbal_ptrs) { + kfree(in_sbal_ptrs); + kfree(qib_param_field); + return -ENOMEM; + } + for(i = 0, k = 0; i < card->qdio.no_out_queues; ++i) + for(j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k){ + out_sbal_ptrs[k] = (struct qdio_buffer *) + virt_to_phys(card->qdio.out_qs[i]-> + bufs[j].buffer); + } + + memset(&init_data, 0, sizeof(struct qdio_initialize)); + init_data.cdev = CARD_DDEV(card); + init_data.q_format = qeth_get_qdio_q_format(card); + init_data.qib_param_field_format = 0; + init_data.qib_param_field = qib_param_field; + init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD; + init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD; + init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD; + init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD; + init_data.no_input_qs = 1; + init_data.no_output_qs = card->qdio.no_out_queues; + init_data.input_handler = (qdio_handler_t *) + qeth_qdio_input_handler; + init_data.output_handler = (qdio_handler_t *) + qeth_qdio_output_handler; + init_data.int_parm = (unsigned long) card; + init_data.flags = QDIO_INBOUND_0COPY_SBALS | + QDIO_OUTBOUND_0COPY_SBALS | + QDIO_USE_OUTBOUND_PCIS; + init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; + init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; + + if (!(rc = qdio_initialize(&init_data))) + card->qdio.state = QETH_QDIO_ESTABLISHED; + + kfree(out_sbal_ptrs); + kfree(in_sbal_ptrs); + kfree(qib_param_field); + return rc; +} + +static int +qeth_qdio_activate(struct qeth_card *card) +{ + QETH_DBF_TEXT(setup,3,"qdioact"); + return qdio_activate(CARD_DDEV(card), 0); +} + +static int +qeth_clear_channel(struct qeth_channel *channel) +{ + unsigned long flags; + struct qeth_card *card; + int rc; + + QETH_DBF_TEXT(trace,3,"clearch"); + card = CARD_FROM_CDEV(channel->ccwdev); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state==CH_STATE_STOPPED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_STOPPED) + return -ETIME; + channel->state = CH_STATE_DOWN; + return 0; +} + +static int +qeth_halt_channel(struct qeth_channel *channel) +{ + unsigned long flags; + struct qeth_card *card; + int rc; + + QETH_DBF_TEXT(trace,3,"haltch"); + card = CARD_FROM_CDEV(channel->ccwdev); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state==CH_STATE_HALTED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_HALTED) + return -ETIME; + return 0; +} + +static int +qeth_halt_channels(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(trace,3,"haltchs"); + if ((rc = qeth_halt_channel(&card->read))) + return rc; + if ((rc = qeth_halt_channel(&card->write))) + return rc; + return qeth_halt_channel(&card->data); +} +static int +qeth_clear_channels(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(trace,3,"clearchs"); + if ((rc = qeth_clear_channel(&card->read))) + return rc; + if ((rc = qeth_clear_channel(&card->write))) + return rc; + return qeth_clear_channel(&card->data); +} + +static int +qeth_clear_halt_card(struct qeth_card *card, int halt) +{ + int rc = 0; + + QETH_DBF_TEXT(trace,3,"clhacrd"); + QETH_DBF_HEX(trace, 3, &card, sizeof(void *)); + + if (halt) + rc = qeth_halt_channels(card); + if (rc) + return rc; + return qeth_clear_channels(card); +} + +static int +qeth_qdio_clear_card(struct qeth_card *card, int use_halt) +{ + int rc = 0; + + QETH_DBF_TEXT(trace,3,"qdioclr"); + if (card->qdio.state == QETH_QDIO_ESTABLISHED){ + if ((rc = qdio_cleanup(CARD_DDEV(card), + (card->info.type == QETH_CARD_TYPE_IQD) ? + QDIO_FLAG_CLEANUP_USING_HALT : + QDIO_FLAG_CLEANUP_USING_CLEAR))) + QETH_DBF_TEXT_(trace, 3, "1err%d", rc); + card->qdio.state = QETH_QDIO_ALLOCATED; + } + if ((rc = qeth_clear_halt_card(card, use_halt))) + QETH_DBF_TEXT_(trace, 3, "2err%d", rc); + card->state = CARD_STATE_DOWN; + return rc; +} + +static int +qeth_dm_act(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(setup,2,"dmact"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, DM_ACT, DM_ACT_SIZE); + + memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL); + return rc; +} + +static int +qeth_mpc_initialize(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(setup,2,"mpcinit"); + + if ((rc = qeth_issue_next_read(card))){ + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + return rc; + } + if ((rc = qeth_cm_enable(card))){ + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + return rc; + } + if ((rc = qeth_cm_setup(card))){ + QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + return rc; + } + if ((rc = qeth_ulp_enable(card))){ + QETH_DBF_TEXT_(setup, 2, "4err%d", rc); + return rc; + } + if ((rc = qeth_ulp_setup(card))){ + QETH_DBF_TEXT_(setup, 2, "5err%d", rc); + return rc; + } + if ((rc = qeth_alloc_qdio_buffers(card))){ + QETH_DBF_TEXT_(setup, 2, "5err%d", rc); + return rc; + } + if ((rc = qeth_qdio_establish(card))){ + QETH_DBF_TEXT_(setup, 2, "6err%d", rc); + qeth_free_qdio_buffers(card); + goto out_qdio; + } + if ((rc = qeth_qdio_activate(card))){ + QETH_DBF_TEXT_(setup, 2, "7err%d", rc); + goto out_qdio; + } + if ((rc = qeth_dm_act(card))){ + QETH_DBF_TEXT_(setup, 2, "8err%d", rc); + goto out_qdio; + } + + return 0; +out_qdio: + qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE); + return rc; +} + +static struct net_device * +qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype) +{ + struct net_device *dev = NULL; + + switch (type) { + case QETH_CARD_TYPE_OSAE: + switch (linktype) { + case QETH_LINK_TYPE_LANE_TR: + case QETH_LINK_TYPE_HSTR: +#ifdef CONFIG_TR + dev = alloc_trdev(0); +#endif /* CONFIG_TR */ + break; + default: + dev = alloc_etherdev(0); + } + break; + case QETH_CARD_TYPE_IQD: + dev = alloc_netdev(0, "hsi%d", ether_setup); + break; + default: + dev = alloc_etherdev(0); + } + return dev; +} + +/*hard_header fake function; used in case fake_ll is set */ +static int +qeth_fake_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, void *daddr, void *saddr, + unsigned len) +{ + struct ethhdr *hdr; + + hdr = (struct ethhdr *)skb_push(skb, QETH_FAKE_LL_LEN); + memcpy(hdr->h_source, dev->dev_addr, ETH_ALEN); + memcpy(hdr->h_dest, "FAKELL", ETH_ALEN); + if (type != ETH_P_802_3) + hdr->h_proto = htons(type); + else + hdr->h_proto = htons(len); + return QETH_FAKE_LL_LEN; +} + +static inline int +qeth_send_packet(struct qeth_card *, struct sk_buff *); + +static int +qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int rc; + struct qeth_card *card; + + QETH_DBF_TEXT(trace, 6, "hrdstxmi"); + card = (struct qeth_card *)dev->priv; + if (skb==NULL) { + card->stats.tx_dropped++; + card->stats.tx_errors++; + /* return OK; otherwise ksoftirqd goes to 100% */ + return NETDEV_TX_OK; + } + if ((card->state != CARD_STATE_UP) || !card->lan_online) { + card->stats.tx_dropped++; + card->stats.tx_errors++; + card->stats.tx_carrier_errors++; + dev_kfree_skb_any(skb); + /* return OK; otherwise ksoftirqd goes to 100% */ + return NETDEV_TX_OK; + } +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.outbound_cnt++; + card->perf_stats.outbound_start_time = qeth_get_micros(); +#endif + netif_stop_queue(dev); + if ((rc = qeth_send_packet(card, skb))) { + if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } else { + card->stats.tx_errors++; + card->stats.tx_dropped++; + dev_kfree_skb_any(skb); + /*set to OK; otherwise ksoftirqd goes to 100% */ + rc = NETDEV_TX_OK; + } + } + netif_wake_queue(dev); +#ifdef CONFIG_QETH_PERF_STATS + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; +#endif + return rc; +} + +static int +qeth_verify_vlan_dev(struct net_device *dev, struct qeth_card *card) +{ + int rc = 0; +#ifdef CONFIG_QETH_VLAN + struct vlan_group *vg; + int i; + + if (!(vg = card->vlangrp)) + return rc; + + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++){ + if (vg->vlan_devices[i] == dev){ + rc = QETH_VLAN_CARD; + break; + } + } +#endif + return rc; +} + +static int +qeth_verify_dev(struct net_device *dev) +{ + struct qeth_card *card; + unsigned long flags; + int rc = 0; + + read_lock_irqsave(&qeth_card_list.rwlock, flags); + list_for_each_entry(card, &qeth_card_list.list, list){ + if (card->dev == dev){ + rc = QETH_REAL_CARD; + break; + } + rc = qeth_verify_vlan_dev(dev, card); + if (rc) + break; + } + read_unlock_irqrestore(&qeth_card_list.rwlock, flags); + + return rc; +} + +static struct qeth_card * +qeth_get_card_from_dev(struct net_device *dev) +{ + struct qeth_card *card = NULL; + int rc; + + rc = qeth_verify_dev(dev); + if (rc == QETH_REAL_CARD) + card = (struct qeth_card *)dev->priv; + else if (rc == QETH_VLAN_CARD) + card = (struct qeth_card *) + VLAN_DEV_INFO(dev)->real_dev->priv; + + QETH_DBF_TEXT_(trace, 4, "%d", rc); + return card ; +} + +static void +qeth_tx_timeout(struct net_device *dev) +{ + struct qeth_card *card; + + card = (struct qeth_card *) dev->priv; + card->stats.tx_errors++; + qeth_schedule_recovery(card); +} + +static int +qeth_open(struct net_device *dev) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(trace, 4, "qethopen"); + + card = (struct qeth_card *) dev->priv; + + if (card->state != CARD_STATE_SOFTSETUP) + return -ENODEV; + + if ( (card->options.layer2) && + (!card->info.layer2_mac_registered)) { + QETH_DBF_TEXT(trace,4,"nomacadr"); + return -EPERM; + } + card->dev->flags |= IFF_UP; + netif_start_queue(dev); + card->data.state = CH_STATE_UP; + card->state = CARD_STATE_UP; + + if (!card->lan_online){ + if (netif_carrier_ok(dev)) + netif_carrier_off(dev); + } + return 0; +} + +static int +qeth_stop(struct net_device *dev) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(trace, 4, "qethstop"); + + card = (struct qeth_card *) dev->priv; + + netif_stop_queue(dev); + card->dev->flags &= ~IFF_UP; + if (card->state == CARD_STATE_UP) + card->state = CARD_STATE_SOFTSETUP; + return 0; +} + +static inline int +qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) +{ + int cast_type = RTN_UNSPEC; + + if (skb->dst && skb->dst->neighbour){ + cast_type = skb->dst->neighbour->type; + if ((cast_type == RTN_BROADCAST) || + (cast_type == RTN_MULTICAST) || + (cast_type == RTN_ANYCAST)) + return cast_type; + else + return RTN_UNSPEC; + } + /* try something else */ + if (skb->protocol == ETH_P_IPV6) + return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0; + else if (skb->protocol == ETH_P_IP) + return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; + /* ... */ + if (!memcmp(skb->data, skb->dev->broadcast, 6)) + return RTN_BROADCAST; + else { + u16 hdr_mac; + + hdr_mac = *((u16 *)skb->data); + /* tr multicast? */ + switch (card->info.link_type) { + case QETH_LINK_TYPE_HSTR: + case QETH_LINK_TYPE_LANE_TR: + if ((hdr_mac == QETH_TR_MAC_NC) || + (hdr_mac == QETH_TR_MAC_C)) + return RTN_MULTICAST; + /* eth or so multicast? */ + default: + if ((hdr_mac == QETH_ETH_MAC_V4) || + (hdr_mac == QETH_ETH_MAC_V6)) + return RTN_MULTICAST; + } + } + return cast_type; +} + +static inline int +qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, + int ipv, int cast_type) +{ + if (!ipv && (card->info.type == QETH_CARD_TYPE_OSAE)) + return card->qdio.default_out_queue; + switch (card->qdio.no_out_queues) { + case 4: + if (cast_type && card->info.is_multicast_different) + return card->info.is_multicast_different & + (card->qdio.no_out_queues - 1); + if (card->qdio.do_prio_queueing && (ipv == 4)) { + if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_TOS){ + if (skb->nh.iph->tos & IP_TOS_NOTIMPORTANT) + return 3; + if (skb->nh.iph->tos & IP_TOS_HIGHRELIABILITY) + return 2; + if (skb->nh.iph->tos & IP_TOS_HIGHTHROUGHPUT) + return 1; + if (skb->nh.iph->tos & IP_TOS_LOWDELAY) + return 0; + } + if (card->qdio.do_prio_queueing==QETH_PRIO_Q_ING_PREC) + return 3 - (skb->nh.iph->tos >> 6); + } else if (card->qdio.do_prio_queueing && (ipv == 6)) { + /* TODO: IPv6!!! */ + } + return card->qdio.default_out_queue; + case 1: /* fallthrough for single-out-queue 1920-device */ + default: + return card->qdio.default_out_queue; + } +} + +static inline int +qeth_get_ip_version(struct sk_buff *skb) +{ + switch (skb->protocol) { + case ETH_P_IPV6: + return 6; + case ETH_P_IP: + return 4; + default: + return 0; + } +} + +static inline int +qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb, + struct qeth_hdr **hdr, int ipv) +{ + int rc = 0; +#ifdef CONFIG_QETH_VLAN + u16 *tag; +#endif + + QETH_DBF_TEXT(trace, 6, "prepskb"); + + rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); + if (rc) + return rc; +#ifdef CONFIG_QETH_VLAN + if (card->vlangrp && vlan_tx_tag_present(*skb) && + ((ipv == 6) || card->options.layer2) ) { + /* + * Move the mac addresses (6 bytes src, 6 bytes dest) + * to the beginning of the new header. We are using three + * memcpys instead of one memmove to save cycles. + */ + skb_push(*skb, VLAN_HLEN); + memcpy((*skb)->data, (*skb)->data + 4, 4); + memcpy((*skb)->data + 4, (*skb)->data + 8, 4); + memcpy((*skb)->data + 8, (*skb)->data + 12, 4); + tag = (u16 *)((*skb)->data + 12); + /* + * first two bytes = ETH_P_8021Q (0x8100) + * second two bytes = VLANID + */ + *tag = __constant_htons(ETH_P_8021Q); + *(tag + 1) = htons(vlan_tx_tag_get(*skb)); + } +#endif + *hdr = (struct qeth_hdr *) + qeth_push_skb(card, skb, sizeof(struct qeth_hdr)); + if (hdr == NULL) + return -EINVAL; + return 0; +} + +static inline u8 +qeth_get_qeth_hdr_flags4(int cast_type) +{ + if (cast_type == RTN_MULTICAST) + return QETH_CAST_MULTICAST; + if (cast_type == RTN_BROADCAST) + return QETH_CAST_BROADCAST; + return QETH_CAST_UNICAST; +} + +static inline u8 +qeth_get_qeth_hdr_flags6(int cast_type) +{ + u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6; + if (cast_type == RTN_MULTICAST) + return ct | QETH_CAST_MULTICAST; + if (cast_type == RTN_ANYCAST) + return ct | QETH_CAST_ANYCAST; + if (cast_type == RTN_BROADCAST) + return ct | QETH_CAST_BROADCAST; + return ct | QETH_CAST_UNICAST; +} + +static inline void +qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb) +{ + __u16 hdr_mac; + + if (!memcmp(skb->data+QETH_HEADER_SIZE, + skb->dev->broadcast,6)) { /* broadcast? */ + *(__u32 *)hdr->hdr.l2.flags |= + QETH_LAYER2_FLAG_BROADCAST << 8; + return; + } + hdr_mac=*((__u16*)skb->data); + /* tr multicast? */ + switch (card->info.link_type) { + case QETH_LINK_TYPE_HSTR: + case QETH_LINK_TYPE_LANE_TR: + if ((hdr_mac == QETH_TR_MAC_NC) || + (hdr_mac == QETH_TR_MAC_C) ) + *(__u32 *)hdr->hdr.l2.flags |= + QETH_LAYER2_FLAG_MULTICAST << 8; + else + *(__u32 *)hdr->hdr.l2.flags |= + QETH_LAYER2_FLAG_UNICAST << 8; + break; + /* eth or so multicast? */ + default: + if ( (hdr_mac==QETH_ETH_MAC_V4) || + (hdr_mac==QETH_ETH_MAC_V6) ) + *(__u32 *)hdr->hdr.l2.flags |= + QETH_LAYER2_FLAG_MULTICAST << 8; + else + *(__u32 *)hdr->hdr.l2.flags |= + QETH_LAYER2_FLAG_UNICAST << 8; + } +} + +static inline void +qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int cast_type) +{ + memset(hdr, 0, sizeof(struct qeth_hdr)); + hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + + /* set byte 0 to "0x02" and byte 3 to casting flags */ + if (cast_type==RTN_MULTICAST) + *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_MULTICAST << 8; + else if (cast_type==RTN_BROADCAST) + *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_BROADCAST << 8; + else + qeth_layer2_get_packet_type(card, hdr, skb); + + hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE; +#ifdef CONFIG_QETH_VLAN + /* VSWITCH relies on the VLAN + * information to be present in + * the QDIO header */ + if ((card->vlangrp != NULL) && + vlan_tx_tag_present(skb)) { + *(__u32 *)hdr->hdr.l2.flags |= QETH_LAYER2_FLAG_VLAN << 8; + hdr->hdr.l2.vlan_id = vlan_tx_tag_get(skb); + } +#endif +} + +void +qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type) +{ + QETH_DBF_TEXT(trace, 6, "fillhdr"); + + memset(hdr, 0, sizeof(struct qeth_hdr)); + if (card->options.layer2) { + qeth_layer2_fill_header(card, hdr, skb, cast_type); + return; + } + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + hdr->hdr.l3.ext_flags = 0; +#ifdef CONFIG_QETH_VLAN + /* + * before we're going to overwrite this location with next hop ip. + * v6 uses passthrough, v4 sets the tag in the QDIO header. + */ + if (card->vlangrp && vlan_tx_tag_present(skb)) { + hdr->hdr.l3.ext_flags = (ipv == 4) ? + QETH_HDR_EXT_VLAN_FRAME : + QETH_HDR_EXT_INCLUDE_VLAN_TAG; + hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); + } +#endif /* CONFIG_QETH_VLAN */ + hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr); + if (ipv == 4) { /* IPv4 */ + hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags4(cast_type); + memset(hdr->hdr.l3.dest_addr, 0, 12); + if ((skb->dst) && (skb->dst->neighbour)) { + *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = + *((u32 *) skb->dst->neighbour->primary_key); + } else { + /* fill in destination address used in ip header */ + *((u32 *) (&hdr->hdr.l3.dest_addr[12])) = skb->nh.iph->daddr; + } + } else if (ipv == 6) { /* IPv6 or passthru */ + hdr->hdr.l3.flags = qeth_get_qeth_hdr_flags6(cast_type); + if ((skb->dst) && (skb->dst->neighbour)) { + memcpy(hdr->hdr.l3.dest_addr, + skb->dst->neighbour->primary_key, 16); + } else { + /* fill in destination address used in ip header */ + memcpy(hdr->hdr.l3.dest_addr, &skb->nh.ipv6h->daddr, 16); + } + } else { /* passthrough */ + if (!memcmp(skb->data + sizeof(struct qeth_hdr), + skb->dev->broadcast, 6)) { /* broadcast? */ + hdr->hdr.l3.flags = QETH_CAST_BROADCAST | QETH_HDR_PASSTHRU; + } else { + hdr->hdr.l3.flags = (cast_type == RTN_MULTICAST) ? + QETH_CAST_MULTICAST | QETH_HDR_PASSTHRU : + QETH_CAST_UNICAST | QETH_HDR_PASSTHRU; + } + } +} + +static inline void +__qeth_fill_buffer_frag(struct sk_buff *skb, struct qdio_buffer *buffer, + int *next_element_to_fill) +{ + int length = skb->len; + struct skb_frag_struct *frag; + int fragno; + unsigned long addr; + int element; + int first_lap = 1; + + fragno = skb_shinfo(skb)->nr_frags; /* start with last frag */ + element = *next_element_to_fill + fragno; + while (length > 0) { + if (fragno > 0) { + frag = &skb_shinfo(skb)->frags[fragno - 1]; + addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + + frag->page_offset; + buffer->element[element].addr = (char *)addr; + buffer->element[element].length = frag->size; + length -= frag->size; + if (first_lap) + buffer->element[element].flags = + SBAL_FLAGS_LAST_FRAG; + else + buffer->element[element].flags = + SBAL_FLAGS_MIDDLE_FRAG; + } else { + buffer->element[element].addr = skb->data; + buffer->element[element].length = length; + length = 0; + buffer->element[element].flags = + SBAL_FLAGS_FIRST_FRAG; + } + element--; + fragno--; + first_lap = 0; + } + *next_element_to_fill += skb_shinfo(skb)->nr_frags + 1; +} + +static inline void +__qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, + int *next_element_to_fill) +{ + int length = skb->len; + int length_here; + int element; + char *data; + int first_lap = 1; + + element = *next_element_to_fill; + data = skb->data; + while (length > 0) { + /* length_here is the remaining amount of data in this page */ + length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); + if (length < length_here) + length_here = length; + buffer->element[element].addr = data; + buffer->element[element].length = length_here; + length -= length_here; + if (!length){ + if (first_lap) + buffer->element[element].flags = 0; + else + buffer->element[element].flags = + SBAL_FLAGS_LAST_FRAG; + } else { + if (first_lap) + buffer->element[element].flags = + SBAL_FLAGS_FIRST_FRAG; + else + buffer->element[element].flags = + SBAL_FLAGS_MIDDLE_FRAG; + } + data += length_here; + element++; + first_lap = 0; + } + *next_element_to_fill = element; +} + +static inline int +qeth_fill_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb) +{ + struct qdio_buffer *buffer; + int flush_cnt = 0; + + QETH_DBF_TEXT(trace, 6, "qdfillbf"); + buffer = buf->buffer; + atomic_inc(&skb->users); + skb_queue_tail(&buf->skb_list, skb); + if (skb_shinfo(skb)->nr_frags == 0) + __qeth_fill_buffer(skb, buffer, + (int *)&buf->next_element_to_fill); + else + __qeth_fill_buffer_frag(skb, buffer, + (int *)&buf->next_element_to_fill); + + if (!queue->do_pack) { + QETH_DBF_TEXT(trace, 6, "fillbfnp"); + /* set state to PRIMED -> will be flushed */ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + flush_cnt = 1; + } else { + QETH_DBF_TEXT(trace, 6, "fillbfpa"); +#ifdef CONFIG_QETH_PERF_STATS + queue->card->perf_stats.skbs_sent_pack++; +#endif + if (buf->next_element_to_fill >= + QETH_MAX_BUFFER_ELEMENTS(queue->card)) { + /* + * packed buffer if full -> set state PRIMED + * -> will be flushed + */ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + flush_cnt = 1; + } + } + return flush_cnt; +} + +static inline int +qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + int elements_needed, + struct qeth_eddp_context *ctx) +{ + struct qeth_qdio_out_buffer *buffer; + int buffers_needed = 0; + int flush_cnt = 0; + int index; + + QETH_DBF_TEXT(trace, 6, "dosndpfa"); + + /* spin until we get the queue ... */ + while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED, + &queue->state)); + /* ... now we've got the queue */ + index = queue->next_buf_to_fill; + buffer = &queue->bufs[queue->next_buf_to_fill]; + /* + * check if buffer is empty to make sure that we do not 'overtake' + * ourselves and try to fill a buffer that is already primed + */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { + card->stats.tx_dropped++; + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + return -EBUSY; + } + if (ctx == NULL) + queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + else { + buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx); + if (buffers_needed < 0) { + card->stats.tx_dropped++; + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + return -EBUSY; + } + queue->next_buf_to_fill = + (queue->next_buf_to_fill + buffers_needed) % + QDIO_MAX_BUFFERS_PER_Q; + } + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + if (ctx == NULL) { + qeth_fill_buffer(queue, buffer, skb); + qeth_flush_buffers(queue, 0, index, 1); + } else { + flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index); + WARN_ON(buffers_needed != flush_cnt); + qeth_flush_buffers(queue, 0, index, flush_cnt); + } + return 0; +} + +static inline int +qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + int elements_needed, struct qeth_eddp_context *ctx) +{ + struct qeth_qdio_out_buffer *buffer; + int start_index; + int flush_count = 0; + int do_pack = 0; + int tmp; + int rc = 0; + + QETH_DBF_TEXT(trace, 6, "dosndpkt"); + + /* spin until we get the queue ... */ + while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED, + &queue->state)); + start_index = queue->next_buf_to_fill; + buffer = &queue->bufs[queue->next_buf_to_fill]; + /* + * check if buffer is empty to make sure that we do not 'overtake' + * ourselves and try to fill a buffer that is already primed + */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){ + card->stats.tx_dropped++; + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + return -EBUSY; + } + /* check if we need to switch packing state of this queue */ + qeth_switch_to_packing_if_needed(queue); + if (queue->do_pack){ + do_pack = 1; + if (ctx == NULL) { + /* does packet fit in current buffer? */ + if((QETH_MAX_BUFFER_ELEMENTS(card) - + buffer->next_element_to_fill) < elements_needed){ + /* ... no -> set state PRIMED */ + atomic_set(&buffer->state,QETH_QDIO_BUF_PRIMED); + flush_count++; + queue->next_buf_to_fill = + (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + buffer = &queue->bufs[queue->next_buf_to_fill]; + /* we did a step forward, so check buffer state + * again */ + if (atomic_read(&buffer->state) != + QETH_QDIO_BUF_EMPTY){ + card->stats.tx_dropped++; + qeth_flush_buffers(queue, 0, start_index, flush_count); + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + return -EBUSY; + } + } + } else { + /* check if we have enough elements (including following + * free buffers) to handle eddp context */ + if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){ + printk("eddp tx_dropped 1\n"); + card->stats.tx_dropped++; + rc = -EBUSY; + goto out; + } + } + } + if (ctx == NULL) + tmp = qeth_fill_buffer(queue, buffer, skb); + else { + tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill); + if (tmp < 0) { + printk("eddp tx_dropped 2\n"); + card->stats.tx_dropped++; + rc = - EBUSY; + goto out; + } + } + queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % + QDIO_MAX_BUFFERS_PER_Q; + flush_count += tmp; +out: + if (flush_count) + qeth_flush_buffers(queue, 0, start_index, flush_count); + /* + * queue->state will go from LOCKED -> UNLOCKED or from + * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us + * (switch packing state or flush buffer to get another pci flag out). + * In that case we will enter this loop + */ + while (atomic_dec_return(&queue->state)){ + flush_count = 0; + start_index = queue->next_buf_to_fill; + /* check if we can go back to non-packing state */ + flush_count += qeth_switch_to_nonpacking_if_needed(queue); + /* + * check if we need to flush a packing buffer to get a pci + * flag out on the queue + */ + if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) + flush_count += qeth_flush_buffers_on_no_pci(queue); + if (flush_count) + qeth_flush_buffers(queue, 0, start_index, flush_count); + } + /* at this point the queue is UNLOCKED again */ +#ifdef CONFIG_QETH_PERF_STATS + if (do_pack) + queue->card->perf_stats.bufs_sent_pack += flush_count; +#endif /* CONFIG_QETH_PERF_STATS */ + + return rc; +} + +static inline int +qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) +{ + int ipv = 0; + int cast_type; + struct qeth_qdio_out_q *queue; + struct qeth_hdr *hdr; + int elements_needed = 0; + enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; + struct qeth_eddp_context *ctx = NULL; + int rc; + + QETH_DBF_TEXT(trace, 6, "sendpkt"); + + if (!card->options.layer2) { + ipv = qeth_get_ip_version(skb); + if ((card->dev->hard_header == qeth_fake_header) && ipv) { + if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) { + card->stats.tx_dropped++; + dev_kfree_skb_irq(skb); + return 0; + } + skb_pull(skb, QETH_FAKE_LL_LEN); + } + } + cast_type = qeth_get_cast_type(card, skb); + if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){ + card->stats.tx_dropped++; + card->stats.tx_errors++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + queue = card->qdio.out_qs + [qeth_get_priority_queue(card, skb, ipv, cast_type)]; + + if (skb_shinfo(skb)->tso_size) + large_send = card->options.large_send; + + if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))){ + QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc); + return rc; + } + /*are we able to do TSO ? If so ,prepare and send it from here */ + if ((large_send == QETH_LARGE_SEND_TSO) && + (cast_type == RTN_UNSPEC)) { + rc = qeth_tso_send_packet(card, skb, queue, + ipv, cast_type); + goto do_statistics; + } + + qeth_fill_header(card, hdr, skb, ipv, cast_type); + if (large_send == QETH_LARGE_SEND_EDDP) { + ctx = qeth_eddp_create_context(card, skb, hdr); + if (ctx == NULL) { + PRINT_WARN("could not create eddp context\n"); + return -EINVAL; + } + } else { + elements_needed = qeth_get_elements_no(card,(void*) hdr, skb); + if (!elements_needed) + return -EINVAL; + } + + if (card->info.type != QETH_CARD_TYPE_IQD) + rc = qeth_do_send_packet(card, queue, skb, hdr, + elements_needed, ctx); + else + rc = qeth_do_send_packet_fast(card, queue, skb, hdr, + elements_needed, ctx); +do_statistics: + if (!rc){ + card->stats.tx_packets++; + card->stats.tx_bytes += skb->len; +#ifdef CONFIG_QETH_PERF_STATS + if (skb_shinfo(skb)->tso_size) { + card->perf_stats.large_send_bytes += skb->len; + card->perf_stats.large_send_cnt++; + } + if (skb_shinfo(skb)->nr_frags > 0){ + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += + skb_shinfo(skb)->nr_frags + 1; + } +#endif /* CONFIG_QETH_PERF_STATS */ + } + if (ctx != NULL) { + /* drop creator's reference */ + qeth_eddp_put_context(ctx); + /* free skb; it's not referenced by a buffer */ + if (rc == 0) + dev_kfree_skb_any(skb); + + } + return rc; +} + +static int +qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) +{ + struct qeth_card *card = (struct qeth_card *) dev->priv; + int rc = 0; + + switch(regnum){ + case MII_BMCR: /* Basic mode control register */ + rc = BMCR_FULLDPLX; + if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&& + (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) + rc |= BMCR_SPEED100; + break; + case MII_BMSR: /* Basic mode status register */ + rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | + BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | + BMSR_100BASE4; + break; + case MII_PHYSID1: /* PHYS ID 1 */ + rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | + dev->dev_addr[2]; + rc = (rc >> 5) & 0xFFFF; + break; + case MII_PHYSID2: /* PHYS ID 2 */ + rc = (dev->dev_addr[2] << 10) & 0xFFFF; + break; + case MII_ADVERTISE: /* Advertisement control reg */ + rc = ADVERTISE_ALL; + break; + case MII_LPA: /* Link partner ability reg */ + rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | + LPA_100BASE4 | LPA_LPACK; + break; + case MII_EXPANSION: /* Expansion register */ + break; + case MII_DCOUNTER: /* disconnect counter */ + break; + case MII_FCSCOUNTER: /* false carrier counter */ + break; + case MII_NWAYTEST: /* N-way auto-neg test register */ + break; + case MII_RERRCOUNTER: /* rx error counter */ + rc = card->stats.rx_errors; + break; + case MII_SREVISION: /* silicon revision */ + break; + case MII_RESV1: /* reserved 1 */ + break; + case MII_LBRERROR: /* loopback, rx, bypass error */ + break; + case MII_PHYADDR: /* physical address */ + break; + case MII_RESV2: /* reserved 2 */ + break; + case MII_TPISTATUS: /* TPI status for 10mbps */ + break; + case MII_NCONFIG: /* network interface config */ + break; + default: + rc = 0; + break; + } + return rc; +} + +static void +qeth_mdio_write(struct net_device *dev, int phy_id, int regnum, int value) +{ + switch(regnum){ + case MII_BMCR: /* Basic mode control register */ + case MII_BMSR: /* Basic mode status register */ + case MII_PHYSID1: /* PHYS ID 1 */ + case MII_PHYSID2: /* PHYS ID 2 */ + case MII_ADVERTISE: /* Advertisement control reg */ + case MII_LPA: /* Link partner ability reg */ + case MII_EXPANSION: /* Expansion register */ + case MII_DCOUNTER: /* disconnect counter */ + case MII_FCSCOUNTER: /* false carrier counter */ + case MII_NWAYTEST: /* N-way auto-neg test register */ + case MII_RERRCOUNTER: /* rx error counter */ + case MII_SREVISION: /* silicon revision */ + case MII_RESV1: /* reserved 1 */ + case MII_LBRERROR: /* loopback, rx, bypass error */ + case MII_PHYADDR: /* physical address */ + case MII_RESV2: /* reserved 2 */ + case MII_TPISTATUS: /* TPI status for 10mbps */ + case MII_NCONFIG: /* network interface config */ + default: + break; + } +} + +static inline const char * +qeth_arp_get_error_cause(int *rc) +{ + switch (*rc) { + case QETH_IPA_ARP_RC_FAILED: + *rc = -EIO; + return "operation failed"; + case QETH_IPA_ARP_RC_NOTSUPP: + *rc = -EOPNOTSUPP; + return "operation not supported"; + case QETH_IPA_ARP_RC_OUT_OF_RANGE: + *rc = -EINVAL; + return "argument out of range"; + case QETH_IPA_ARP_RC_Q_NOTSUPP: + *rc = -EOPNOTSUPP; + return "query operation not supported"; + case QETH_IPA_ARP_RC_Q_NO_DATA: + *rc = -ENOENT; + return "no query data available"; + default: + return "unknown error"; + } +} + +static int +qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs, + __u16, long); + +static int +qeth_arp_set_no_entries(struct qeth_card *card, int no_entries) +{ + int tmp; + int rc; + + QETH_DBF_TEXT(trace,3,"arpstnoe"); + + /* TODO: really not supported by GuestLAN? */ + if (card->info.guestlan) + return -EOPNOTSUPP; + if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { + PRINT_WARN("ARP processing not supported " + "on %s!\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_SET_NO_ENTRIES, + no_entries); + if (rc) { + tmp = rc; + PRINT_WARN("Could not set number of ARP entries on %s: " + "%s (0x%x/%d)\n", + QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), + tmp, tmp); + } + return rc; +} + +static inline void +qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, + struct qeth_arp_query_data *qdata, + int entry_size, int uentry_size) +{ + char *entry_ptr; + char *uentry_ptr; + int i; + + entry_ptr = (char *)&qdata->data; + uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset); + for (i = 0; i < qdata->no_entries; ++i){ + /* strip off 32 bytes "media specific information" */ + memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32); + entry_ptr += entry_size; + uentry_ptr += uentry_size; + } +} + +static int +qeth_arp_query_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_arp_query_data *qdata; + struct qeth_arp_query_info *qinfo; + int entry_size; + int uentry_size; + int i; + + QETH_DBF_TEXT(trace,4,"arpquecb"); + + qinfo = (struct qeth_arp_query_info *) reply->param; + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code) { + QETH_DBF_TEXT_(trace,4,"qaer1%i", cmd->hdr.return_code); + return 0; + } + if (cmd->data.setassparms.hdr.return_code) { + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + QETH_DBF_TEXT_(trace,4,"qaer2%i", cmd->hdr.return_code); + return 0; + } + qdata = &cmd->data.setassparms.data.query_arp; + switch(qdata->reply_bits){ + case 5: + uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5); + if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) + uentry_size = sizeof(struct qeth_arp_qi_entry5_short); + break; + case 7: + /* fall through to default */ + default: + /* tr is the same as eth -> entry7 */ + uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7); + if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) + uentry_size = sizeof(struct qeth_arp_qi_entry7_short); + break; + } + /* check if there is enough room in userspace */ + if ((qinfo->udata_len - qinfo->udata_offset) < + qdata->no_entries * uentry_size){ + QETH_DBF_TEXT_(trace, 4, "qaer3%i", -ENOMEM); + cmd->hdr.return_code = -ENOMEM; + PRINT_WARN("query ARP user space buffer is too small for " + "the returned number of ARP entries. " + "Aborting query!\n"); + goto out_error; + } + QETH_DBF_TEXT_(trace, 4, "anore%i", + cmd->data.setassparms.hdr.number_of_replies); + QETH_DBF_TEXT_(trace, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no); + QETH_DBF_TEXT_(trace, 4, "anoen%i", qdata->no_entries); + + if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) { + /* strip off "media specific information" */ + qeth_copy_arp_entries_stripped(qinfo, qdata, entry_size, + uentry_size); + } else + /*copy entries to user buffer*/ + memcpy(qinfo->udata + qinfo->udata_offset, + (char *)&qdata->data, qdata->no_entries*uentry_size); + + qinfo->no_entries += qdata->no_entries; + qinfo->udata_offset += (qdata->no_entries*uentry_size); + /* check if all replies received ... */ + if (cmd->data.setassparms.hdr.seq_no < + cmd->data.setassparms.hdr.number_of_replies) + return 1; + memcpy(qinfo->udata, &qinfo->no_entries, 4); + /* keep STRIP_ENTRIES flag so the user program can distinguish + * stripped entries from normal ones */ + if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) + qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; + memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET,&qdata->reply_bits,2); + return 0; +out_error: + i = 0; + memcpy(qinfo->udata, &i, 4); + return 0; +} + +static int +qeth_send_ipa_arp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + int len, int (*reply_cb)(struct qeth_card *, + struct qeth_reply *, + unsigned long), + void *reply_param) +{ + QETH_DBF_TEXT(trace,4,"sendarp"); + + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, + reply_cb, reply_param); +} + +static int +qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + int len, int (*reply_cb)(struct qeth_card *, + struct qeth_reply *, + unsigned long), + void *reply_param) +{ + u16 s1, s2; + + QETH_DBF_TEXT(trace,4,"sendsnmp"); + + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + /* adjust PDU length fields in IPA_PDU_HEADER */ + s1 = (u32) IPA_PDU_HEADER_SIZE + len; + s2 = (u32) len; + memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); + memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); + return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, + reply_cb, reply_param); +} + +static struct qeth_cmd_buffer * +qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs, + __u16, __u16, enum qeth_prot_versions); +static int +qeth_arp_query(struct qeth_card *card, char *udata) +{ + struct qeth_cmd_buffer *iob; + struct qeth_arp_query_info qinfo = {0, }; + int tmp; + int rc; + + QETH_DBF_TEXT(trace,3,"arpquery"); + + /* + * currently GuestLAN does only deliver all zeros on query arp, + * even though arp processing is supported (according to IPA supp. + * funcs flags); since all zeros is no valueable information, + * we say EOPNOTSUPP for all ARP functions + */ + /*if (card->info.guestlan) + return -EOPNOTSUPP; */ + if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ + IPA_ARP_PROCESSING)) { + PRINT_WARN("ARP processing not supported " + "on %s!\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + /* get size of userspace buffer and mask_bits -> 6 bytes */ + if (copy_from_user(&qinfo, udata, 6)) + return -EFAULT; + if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))) + return -ENOMEM; + memset(qinfo.udata, 0, qinfo.udata_len); + qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_QUERY_INFO, + sizeof(int),QETH_PROT_IPV4); + + rc = qeth_send_ipa_arp_cmd(card, iob, + QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, + qeth_arp_query_cb, (void *)&qinfo); + if (rc) { + tmp = rc; + PRINT_WARN("Error while querying ARP cache on %s: %s " + "(0x%x/%d)\n", + QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), + tmp, tmp); + copy_to_user(udata, qinfo.udata, 4); + } else { + copy_to_user(udata, qinfo.udata, qinfo.udata_len); + } + kfree(qinfo.udata); + return rc; +} + +/** + * SNMP command callback + */ +static int +qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long sdata) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_arp_query_info *qinfo; + struct qeth_snmp_cmd *snmp; + unsigned char *data; + __u16 data_len; + + QETH_DBF_TEXT(trace,3,"snpcmdcb"); + + cmd = (struct qeth_ipa_cmd *) sdata; + data = (unsigned char *)((char *)cmd - reply->offset); + qinfo = (struct qeth_arp_query_info *) reply->param; + snmp = &cmd->data.setadapterparms.data.snmp; + + if (cmd->hdr.return_code) { + QETH_DBF_TEXT_(trace,4,"scer1%i", cmd->hdr.return_code); + return 0; + } + if (cmd->data.setadapterparms.hdr.return_code) { + cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; + QETH_DBF_TEXT_(trace,4,"scer2%i", cmd->hdr.return_code); + return 0; + } + data_len = *((__u16*)QETH_IPA_PDU_LEN_PDU1(data)); + if (cmd->data.setadapterparms.hdr.seq_no == 1) + data_len -= (__u16)((char *)&snmp->data - (char *)cmd); + else + data_len -= (__u16)((char*)&snmp->request - (char *)cmd); + + /* check if there is enough room in userspace */ + if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { + QETH_DBF_TEXT_(trace, 4, "scer3%i", -ENOMEM); + cmd->hdr.return_code = -ENOMEM; + return 0; + } + QETH_DBF_TEXT_(trace, 4, "snore%i", + cmd->data.setadapterparms.hdr.used_total); + QETH_DBF_TEXT_(trace, 4, "sseqn%i", cmd->data.setadapterparms.hdr.seq_no); + /*copy entries to user buffer*/ + if (cmd->data.setadapterparms.hdr.seq_no == 1) { + memcpy(qinfo->udata + qinfo->udata_offset, + (char *)snmp, + data_len + offsetof(struct qeth_snmp_cmd,data)); + qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data); + } else { + memcpy(qinfo->udata + qinfo->udata_offset, + (char *)&snmp->request, data_len); + } + qinfo->udata_offset += data_len; + /* check if all replies received ... */ + QETH_DBF_TEXT_(trace, 4, "srtot%i", + cmd->data.setadapterparms.hdr.used_total); + QETH_DBF_TEXT_(trace, 4, "srseq%i", + cmd->data.setadapterparms.hdr.seq_no); + if (cmd->data.setadapterparms.hdr.seq_no < + cmd->data.setadapterparms.hdr.used_total) + return 1; + return 0; +} + +static struct qeth_cmd_buffer * +qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, + enum qeth_prot_versions ); + +static struct qeth_cmd_buffer * +qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETADAPTERPARMS, + QETH_PROT_IPV4); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setadapterparms.hdr.cmdlength = cmdlen; + cmd->data.setadapterparms.hdr.command_code = command; + cmd->data.setadapterparms.hdr.used_total = 1; + cmd->data.setadapterparms.hdr.seq_no = 1; + + return iob; +} + +/** + * function to send SNMP commands to OSA-E card + */ +static int +qeth_snmp_command(struct qeth_card *card, char *udata) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_snmp_ureq *ureq; + int req_len; + struct qeth_arp_query_info qinfo = {0, }; + int rc = 0; + + QETH_DBF_TEXT(trace,3,"snmpcmd"); + + if (card->info.guestlan) + return -EOPNOTSUPP; + + if ((!qeth_adp_supported(card,IPA_SETADP_SET_SNMP_CONTROL)) && + (!card->options.layer2) ) { + PRINT_WARN("SNMP Query MIBS not supported " + "on %s!\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + /* skip 4 bytes (data_len struct member) to get req_len */ + if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) + return -EFAULT; + ureq = kmalloc(req_len+sizeof(struct qeth_snmp_ureq_hdr), GFP_KERNEL); + if (!ureq) { + QETH_DBF_TEXT(trace, 2, "snmpnome"); + return -ENOMEM; + } + if (copy_from_user(ureq, udata, + req_len+sizeof(struct qeth_snmp_ureq_hdr))){ + kfree(ureq); + return -EFAULT; + } + qinfo.udata_len = ureq->hdr.data_len; + if (!(qinfo.udata = kmalloc(qinfo.udata_len, GFP_KERNEL))){ + kfree(ureq); + return -ENOMEM; + } + memset(qinfo.udata, 0, qinfo.udata_len); + qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, + QETH_SNMP_SETADP_CMDLENGTH + req_len); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); + rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, + qeth_snmp_command_cb, (void *)&qinfo); + if (rc) + PRINT_WARN("SNMP command failed on %s: (0x%x)\n", + QETH_CARD_IFNAME(card), rc); + else + copy_to_user(udata, qinfo.udata, qinfo.udata_len); + + kfree(ureq); + kfree(qinfo.udata); + return rc; +} + +static int +qeth_default_setassparms_cb(struct qeth_card *, struct qeth_reply *, + unsigned long); + +static int +qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, + __u16, long, + int (*reply_cb) + (struct qeth_card *, struct qeth_reply *, unsigned long), + void *reply_param); + +static int +qeth_arp_add_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry) +{ + struct qeth_cmd_buffer *iob; + char buf[16]; + int tmp; + int rc; + + QETH_DBF_TEXT(trace,3,"arpadent"); + + /* + * currently GuestLAN does only deliver all zeros on query arp, + * even though arp processing is supported (according to IPA supp. + * funcs flags); since all zeros is no valueable information, + * we say EOPNOTSUPP for all ARP functions + */ + if (card->info.guestlan) + return -EOPNOTSUPP; + if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { + PRINT_WARN("ARP processing not supported " + "on %s!\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_ADD_ENTRY, + sizeof(struct qeth_arp_cache_entry), + QETH_PROT_IPV4); + rc = qeth_send_setassparms(card, iob, + sizeof(struct qeth_arp_cache_entry), + (unsigned long) entry, + qeth_default_setassparms_cb, NULL); + if (rc) { + tmp = rc; + qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf); + PRINT_WARN("Could not add ARP entry for address %s on %s: " + "%s (0x%x/%d)\n", + buf, QETH_CARD_IFNAME(card), + qeth_arp_get_error_cause(&rc), tmp, tmp); + } + return rc; +} + +static int +qeth_arp_remove_entry(struct qeth_card *card, struct qeth_arp_cache_entry *entry) +{ + struct qeth_cmd_buffer *iob; + char buf[16] = {0, }; + int tmp; + int rc; + + QETH_DBF_TEXT(trace,3,"arprment"); + + /* + * currently GuestLAN does only deliver all zeros on query arp, + * even though arp processing is supported (according to IPA supp. + * funcs flags); since all zeros is no valueable information, + * we say EOPNOTSUPP for all ARP functions + */ + if (card->info.guestlan) + return -EOPNOTSUPP; + if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { + PRINT_WARN("ARP processing not supported " + "on %s!\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + memcpy(buf, entry, 12); + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_REMOVE_ENTRY, + 12, + QETH_PROT_IPV4); + rc = qeth_send_setassparms(card, iob, + 12, (unsigned long)buf, + qeth_default_setassparms_cb, NULL); + if (rc) { + tmp = rc; + memset(buf, 0, 16); + qeth_ipaddr4_to_string((u8 *)entry->ipaddr, buf); + PRINT_WARN("Could not delete ARP entry for address %s on %s: " + "%s (0x%x/%d)\n", + buf, QETH_CARD_IFNAME(card), + qeth_arp_get_error_cause(&rc), tmp, tmp); + } + return rc; +} + +static int +qeth_arp_flush_cache(struct qeth_card *card) +{ + int rc; + int tmp; + + QETH_DBF_TEXT(trace,3,"arpflush"); + + /* + * currently GuestLAN does only deliver all zeros on query arp, + * even though arp processing is supported (according to IPA supp. + * funcs flags); since all zeros is no valueable information, + * we say EOPNOTSUPP for all ARP functions + */ + if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) + return -EOPNOTSUPP; + if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { + PRINT_WARN("ARP processing not supported " + "on %s!\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); + if (rc){ + tmp = rc; + PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n", + QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), + tmp, tmp); + } + return rc; +} + +static int +qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct qeth_card *card = (struct qeth_card *)dev->priv; + struct qeth_arp_cache_entry arp_entry; + struct mii_ioctl_data *mii_data; + int rc = 0; + + if (!card) + return -ENODEV; + + if ((card->state != CARD_STATE_UP) && + (card->state != CARD_STATE_SOFTSETUP)) + return -ENODEV; + + switch (cmd){ + case SIOC_QETH_ARP_SET_NO_ENTRIES: + if ( !capable(CAP_NET_ADMIN) || + (card->options.layer2) ) { + rc = -EPERM; + break; + } + rc = qeth_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); + break; + case SIOC_QETH_ARP_QUERY_INFO: + if ( !capable(CAP_NET_ADMIN) || + (card->options.layer2) ) { + rc = -EPERM; + break; + } + rc = qeth_arp_query(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_ARP_ADD_ENTRY: + if ( !capable(CAP_NET_ADMIN) || + (card->options.layer2) ) { + rc = -EPERM; + break; + } + if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, + sizeof(struct qeth_arp_cache_entry))) + rc = -EFAULT; + else + rc = qeth_arp_add_entry(card, &arp_entry); + break; + case SIOC_QETH_ARP_REMOVE_ENTRY: + if ( !capable(CAP_NET_ADMIN) || + (card->options.layer2) ) { + rc = -EPERM; + break; + } + if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, + sizeof(struct qeth_arp_cache_entry))) + rc = -EFAULT; + else + rc = qeth_arp_remove_entry(card, &arp_entry); + break; + case SIOC_QETH_ARP_FLUSH_CACHE: + if ( !capable(CAP_NET_ADMIN) || + (card->options.layer2) ) { + rc = -EPERM; + break; + } + rc = qeth_arp_flush_cache(card); + break; + case SIOC_QETH_ADP_SET_SNMP_CONTROL: + rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_GET_CARD_TYPE: + if ((card->info.type == QETH_CARD_TYPE_OSAE) && + !card->info.guestlan) + return 1; + return 0; + break; + case SIOCGMIIPHY: + mii_data = if_mii(rq); + mii_data->phy_id = 0; + break; + case SIOCGMIIREG: + mii_data = if_mii(rq); + if (mii_data->phy_id != 0) + rc = -EINVAL; + else + mii_data->val_out = qeth_mdio_read(dev,mii_data->phy_id, + mii_data->reg_num); + break; + case SIOCSMIIREG: + rc = -EOPNOTSUPP; + break; + /* TODO: remove return if qeth_mdio_write does something */ + if (!capable(CAP_NET_ADMIN)){ + rc = -EPERM; + break; + } + mii_data = if_mii(rq); + if (mii_data->phy_id != 0) + rc = -EINVAL; + else + qeth_mdio_write(dev, mii_data->phy_id, mii_data->reg_num, + mii_data->val_in); + break; + default: + rc = -EOPNOTSUPP; + } + if (rc) + QETH_DBF_TEXT_(trace, 2, "ioce%d", rc); + return rc; +} + +static struct net_device_stats * +qeth_get_stats(struct net_device *dev) +{ + struct qeth_card *card; + + card = (struct qeth_card *) (dev->priv); + + QETH_DBF_TEXT(trace,5,"getstat"); + + return &card->stats; +} + +static int +qeth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct qeth_card *card; + char dbf_text[15]; + + card = (struct qeth_card *) (dev->priv); + + QETH_DBF_TEXT(trace,4,"chgmtu"); + sprintf(dbf_text, "%8x", new_mtu); + QETH_DBF_TEXT(trace,4,dbf_text); + + if (new_mtu < 64) + return -EINVAL; + if (new_mtu > 65535) + return -EINVAL; + if ((!qeth_is_supported(card,IPA_IP_FRAGMENTATION)) && + (!qeth_mtu_is_valid(card, new_mtu))) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +#ifdef CONFIG_QETH_VLAN +static void +qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) +{ + struct qeth_card *card; + unsigned long flags; + + QETH_DBF_TEXT(trace,4,"vlanreg"); + + card = (struct qeth_card *) dev->priv; + spin_lock_irqsave(&card->vlanlock, flags); + card->vlangrp = grp; + spin_unlock_irqrestore(&card->vlanlock, flags); +} + +static inline void +qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, + unsigned short vid) +{ + int i; + struct sk_buff *skb; + struct sk_buff_head tmp_list; + + skb_queue_head_init(&tmp_list); + for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){ + while ((skb = skb_dequeue(&buf->skb_list))){ + if (vlan_tx_tag_present(skb) && + (vlan_tx_tag_get(skb) == vid)) { + atomic_dec(&skb->users); + dev_kfree_skb(skb); + } else + skb_queue_tail(&tmp_list, skb); + } + } + while ((skb = skb_dequeue(&tmp_list))) + skb_queue_tail(&buf->skb_list, skb); +} + +static void +qeth_free_vlan_skbs(struct qeth_card *card, unsigned short vid) +{ + int i, j; + + QETH_DBF_TEXT(trace, 4, "frvlskbs"); + for (i = 0; i < card->qdio.no_out_queues; ++i){ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) + qeth_free_vlan_buffer(card, &card->qdio. + out_qs[i]->bufs[j], vid); + } +} + +static void +qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid) +{ + struct in_device *in_dev; + struct in_ifaddr *ifa; + struct qeth_ipaddr *addr; + + QETH_DBF_TEXT(trace, 4, "frvaddr4"); + if (!card->vlangrp) + return; + rcu_read_lock(); + in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]); + if (!in_dev) + goto out; + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + addr = qeth_get_addr_buffer(QETH_PROT_IPV4); + if (addr){ + addr->u.a4.addr = ifa->ifa_address; + addr->u.a4.mask = ifa->ifa_mask; + addr->type = QETH_IP_TYPE_NORMAL; + if (!qeth_delete_ip(card, addr)) + kfree(addr); + } + } +out: + rcu_read_unlock(); +} + +static void +qeth_free_vlan_addresses6(struct qeth_card *card, unsigned short vid) +{ +#ifdef CONFIG_QETH_IPV6 + struct inet6_dev *in6_dev; + struct inet6_ifaddr *ifa; + struct qeth_ipaddr *addr; + + QETH_DBF_TEXT(trace, 4, "frvaddr6"); + if (!card->vlangrp) + return; + in6_dev = in6_dev_get(card->vlangrp->vlan_devices[vid]); + if (!in6_dev) + return; + for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next){ + addr = qeth_get_addr_buffer(QETH_PROT_IPV6); + if (addr){ + memcpy(&addr->u.a6.addr, &ifa->addr, + sizeof(struct in6_addr)); + addr->u.a6.pfxlen = ifa->prefix_len; + addr->type = QETH_IP_TYPE_NORMAL; + if (!qeth_delete_ip(card, addr)) + kfree(addr); + } + } + in6_dev_put(in6_dev); +#endif /* CONFIG_QETH_IPV6 */ +} + +static void +qeth_layer2_send_setdelvlan(struct qeth_card *card, __u16 i, + enum qeth_ipa_cmds ipacmd) +{ + int rc; + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT_(trace, 4, "L2sdv%x",ipacmd); + iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setdelvlan.vlan_id = i; + + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + if (rc) { + PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. " + "Continuing\n",i, QETH_CARD_IFNAME(card), rc); + QETH_DBF_TEXT_(trace, 2, "L2VL%4x", ipacmd); + QETH_DBF_TEXT_(trace, 2, "L2%s", CARD_BUS_ID(card)); + QETH_DBF_TEXT_(trace, 2, "err%d", rc); + } +} + +static void +qeth_layer2_process_vlans(struct qeth_card *card, int clear) +{ + unsigned short i; + + QETH_DBF_TEXT(trace, 3, "L2prcvln"); + + if (!card->vlangrp) + return; + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + if (card->vlangrp->vlan_devices[i] == NULL) + continue; + if (clear) + qeth_layer2_send_setdelvlan(card, i, IPA_CMD_DELVLAN); + else + qeth_layer2_send_setdelvlan(card, i, IPA_CMD_SETVLAN); + } +} + +/*add_vid is layer 2 used only ....*/ +static void +qeth_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) +{ + struct qeth_card *card; + + QETH_DBF_TEXT_(trace, 4, "aid:%d", vid); + + card = (struct qeth_card *) dev->priv; + if (!card->options.layer2) + return; + qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); +} + +/*... kill_vid used for both modes*/ +static void +qeth_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) +{ + struct qeth_card *card; + unsigned long flags; + + QETH_DBF_TEXT_(trace, 4, "kid:%d", vid); + + card = (struct qeth_card *) dev->priv; + /* free all skbs for the vlan device */ + qeth_free_vlan_skbs(card, vid); + spin_lock_irqsave(&card->vlanlock, flags); + /* unregister IP addresses of vlan device */ + qeth_free_vlan_addresses4(card, vid); + qeth_free_vlan_addresses6(card, vid); + if (card->vlangrp) + card->vlangrp->vlan_devices[vid] = NULL; + spin_unlock_irqrestore(&card->vlanlock, flags); + if (card->options.layer2) + qeth_layer2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); + qeth_set_multicast_list(card->dev); +} +#endif + +/** + * set multicast address on card + */ +static void +qeth_set_multicast_list(struct net_device *dev) +{ + struct qeth_card *card = (struct qeth_card *) dev->priv; + + QETH_DBF_TEXT(trace,3,"setmulti"); + qeth_delete_mc_addresses(card); + qeth_add_multicast_ipv4(card); +#ifdef CONFIG_QETH_IPV6 + qeth_add_multicast_ipv6(card); +#endif + if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +} + +static int +qeth_neigh_setup(struct net_device *dev, struct neigh_parms *np) +{ + return 0; +} + +static void +qeth_get_mac_for_ipm(__u32 ipm, char *mac, struct net_device *dev) +{ + if (dev->type == ARPHRD_IEEE802_TR) + ip_tr_mc_map(ipm, mac); + else + ip_eth_mc_map(ipm, mac); +} + +static struct qeth_ipaddr * +qeth_get_addr_buffer(enum qeth_prot_versions prot) +{ + struct qeth_ipaddr *addr; + + addr = kmalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); + if (addr == NULL) { + PRINT_WARN("Not enough memory to add address\n"); + return NULL; + } + memset(addr,0,sizeof(struct qeth_ipaddr)); + addr->type = QETH_IP_TYPE_NORMAL; + addr->proto = prot; + return addr; +} + +static void +qeth_delete_mc_addresses(struct qeth_card *card) +{ + struct qeth_ipaddr *iptodo; + unsigned long flags; + + QETH_DBF_TEXT(trace,4,"delmc"); + iptodo = qeth_get_addr_buffer(QETH_PROT_IPV4); + if (!iptodo) { + QETH_DBF_TEXT(trace, 2, "dmcnomem"); + return; + } + iptodo->type = QETH_IP_TYPE_DEL_ALL_MC; + spin_lock_irqsave(&card->ip_lock, flags); + if (!__qeth_insert_ip_todo(card, iptodo, 0)) + kfree(iptodo); + spin_unlock_irqrestore(&card->ip_lock, flags); +} + +static inline void +qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) +{ + struct qeth_ipaddr *ipm; + struct ip_mc_list *im4; + char buf[MAX_ADDR_LEN]; + + QETH_DBF_TEXT(trace,4,"addmc"); + for (im4 = in4_dev->mc_list; im4; im4 = im4->next) { + qeth_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev); + ipm = qeth_get_addr_buffer(QETH_PROT_IPV4); + if (!ipm) + continue; + ipm->u.a4.addr = im4->multiaddr; + memcpy(ipm->mac,buf,OSA_ADDR_LEN); + ipm->is_multicast = 1; + if (!qeth_add_ip(card,ipm)) + kfree(ipm); + } +} + +static inline void +qeth_add_vlan_mc(struct qeth_card *card) +{ +#ifdef CONFIG_QETH_VLAN + struct in_device *in_dev; + struct vlan_group *vg; + int i; + + QETH_DBF_TEXT(trace,4,"addmcvl"); + if ( ((card->options.layer2 == 0) && + (!qeth_is_supported(card,IPA_FULL_VLAN))) || + (card->vlangrp == NULL) ) + return ; + + vg = card->vlangrp; + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + if (vg->vlan_devices[i] == NULL || + !(vg->vlan_devices[i]->flags & IFF_UP)) + continue; + in_dev = in_dev_get(vg->vlan_devices[i]); + if (!in_dev) + continue; + read_lock(&in_dev->mc_list_lock); + qeth_add_mc(card,in_dev); + read_unlock(&in_dev->mc_list_lock); + in_dev_put(in_dev); + } +#endif +} + +static void +qeth_add_multicast_ipv4(struct qeth_card *card) +{ + struct in_device *in4_dev; + + QETH_DBF_TEXT(trace,4,"chkmcv4"); + in4_dev = in_dev_get(card->dev); + if (in4_dev == NULL) + return; + read_lock(&in4_dev->mc_list_lock); + qeth_add_mc(card, in4_dev); + qeth_add_vlan_mc(card); + read_unlock(&in4_dev->mc_list_lock); + in_dev_put(in4_dev); +} + +#ifdef CONFIG_QETH_IPV6 +static inline void +qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) +{ + struct qeth_ipaddr *ipm; + struct ifmcaddr6 *im6; + char buf[MAX_ADDR_LEN]; + + QETH_DBF_TEXT(trace,4,"addmc6"); + for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { + ndisc_mc_map(&im6->mca_addr, buf, in6_dev->dev, 0); + ipm = qeth_get_addr_buffer(QETH_PROT_IPV6); + if (!ipm) + continue; + ipm->is_multicast = 1; + memcpy(ipm->mac,buf,OSA_ADDR_LEN); + memcpy(&ipm->u.a6.addr,&im6->mca_addr.s6_addr, + sizeof(struct in6_addr)); + if (!qeth_add_ip(card,ipm)) + kfree(ipm); + } +} + +static inline void +qeth_add_vlan_mc6(struct qeth_card *card) +{ +#ifdef CONFIG_QETH_VLAN + struct inet6_dev *in_dev; + struct vlan_group *vg; + int i; + + QETH_DBF_TEXT(trace,4,"admc6vl"); + if ( ((card->options.layer2 == 0) && + (!qeth_is_supported(card,IPA_FULL_VLAN))) || + (card->vlangrp == NULL)) + return ; + + vg = card->vlangrp; + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + if (vg->vlan_devices[i] == NULL || + !(vg->vlan_devices[i]->flags & IFF_UP)) + continue; + in_dev = in6_dev_get(vg->vlan_devices[i]); + if (!in_dev) + continue; + read_lock(&in_dev->lock); + qeth_add_mc6(card,in_dev); + read_unlock(&in_dev->lock); + in6_dev_put(in_dev); + } +#endif /* CONFIG_QETH_VLAN */ +} + +static void +qeth_add_multicast_ipv6(struct qeth_card *card) +{ + struct inet6_dev *in6_dev; + + QETH_DBF_TEXT(trace,4,"chkmcv6"); + if ((card->options.layer2 == 0) && + (!qeth_is_supported(card, IPA_IPV6)) ) + return ; + + in6_dev = in6_dev_get(card->dev); + if (in6_dev == NULL) + return; + read_lock(&in6_dev->lock); + qeth_add_mc6(card, in6_dev); + qeth_add_vlan_mc6(card); + read_unlock(&in6_dev->lock); + in6_dev_put(in6_dev); +} +#endif /* CONFIG_QETH_IPV6 */ + +static int +qeth_layer2_send_setdelmac(struct qeth_card *card, __u8 *mac, + enum qeth_ipa_cmds ipacmd, + int (*reply_cb) (struct qeth_card *, + struct qeth_reply*, + unsigned long)) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(trace, 2, "L2sdmac"); + iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setdelmac.mac_length = OSA_ADDR_LEN; + memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN); + return qeth_send_ipa_cmd(card, iob, reply_cb, NULL); +} + +static int +qeth_layer2_send_setgroupmac_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u8 *mac; + + QETH_DBF_TEXT(trace, 2, "L2Sgmacb"); + cmd = (struct qeth_ipa_cmd *) data; + mac = &cmd->data.setdelmac.mac[0]; + /* MAC already registered, needed in couple/uncouple case */ + if (cmd->hdr.return_code == 0x2005) { + PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \ + "already existing on %s \n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + QETH_CARD_IFNAME(card)); + cmd->hdr.return_code = 0; + } + if (cmd->hdr.return_code) + PRINT_ERR("Could not set group MAC " \ + "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + QETH_CARD_IFNAME(card),cmd->hdr.return_code); + return 0; +} + +static int +qeth_layer2_send_setgroupmac(struct qeth_card *card, __u8 *mac) +{ + QETH_DBF_TEXT(trace, 2, "L2Sgmac"); + return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETGMAC, + qeth_layer2_send_setgroupmac_cb); +} + +static int +qeth_layer2_send_delgroupmac_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u8 *mac; + + QETH_DBF_TEXT(trace, 2, "L2Dgmacb"); + cmd = (struct qeth_ipa_cmd *) data; + mac = &cmd->data.setdelmac.mac[0]; + if (cmd->hdr.return_code) + PRINT_ERR("Could not delete group MAC " \ + "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], + QETH_CARD_IFNAME(card), cmd->hdr.return_code); + return 0; +} + +static int +qeth_layer2_send_delgroupmac(struct qeth_card *card, __u8 *mac) +{ + QETH_DBF_TEXT(trace, 2, "L2Dgmac"); + return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELGMAC, + qeth_layer2_send_delgroupmac_cb); +} + +static int +qeth_layer2_send_setmac_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace, 2, "L2Smaccb"); + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code) { + QETH_DBF_TEXT_(trace, 2, "L2er%x", cmd->hdr.return_code); + PRINT_WARN("Error in registering MAC address on " \ + "device %s: x%x\n", CARD_BUS_ID(card), + cmd->hdr.return_code); + card->info.layer2_mac_registered = 0; + cmd->hdr.return_code = -EIO; + } else { + card->info.layer2_mac_registered = 1; + memcpy(card->dev->dev_addr,cmd->data.setdelmac.mac, + OSA_ADDR_LEN); + PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " + "successfully registered on device %s\n", + card->dev->dev_addr[0], card->dev->dev_addr[1], + card->dev->dev_addr[2], card->dev->dev_addr[3], + card->dev->dev_addr[4], card->dev->dev_addr[5], + card->dev->name); + } + return 0; +} + +static int +qeth_layer2_send_setmac(struct qeth_card *card, __u8 *mac) +{ + QETH_DBF_TEXT(trace, 2, "L2Setmac"); + return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, + qeth_layer2_send_setmac_cb); +} + +static int +qeth_layer2_send_delmac_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace, 2, "L2Dmaccb"); + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code) { + PRINT_WARN("Error in deregistering MAC address on " \ + "device %s: x%x\n", CARD_BUS_ID(card), + cmd->hdr.return_code); + QETH_DBF_TEXT_(trace, 2, "err%d", cmd->hdr.return_code); + cmd->hdr.return_code = -EIO; + return 0; + } + card->info.layer2_mac_registered = 0; + + return 0; +} +static int +qeth_layer2_send_delmac(struct qeth_card *card, __u8 *mac) +{ + QETH_DBF_TEXT(trace, 2, "L2Delmac"); + if (!card->info.layer2_mac_registered) + return 0; + return qeth_layer2_send_setdelmac(card, mac, IPA_CMD_DELVMAC, + qeth_layer2_send_delmac_cb); +} + +static int +qeth_layer2_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct qeth_card *card; + int rc = 0; + + QETH_DBF_TEXT(trace, 3, "setmac"); + + if (qeth_verify_dev(dev) != QETH_REAL_CARD) { + QETH_DBF_TEXT(trace, 3, "setmcINV"); + return -EOPNOTSUPP; + } + card = (struct qeth_card *) dev->priv; + + if (!card->options.layer2) { + PRINT_WARN("Setting MAC address on %s is not supported" + "in Layer 3 mode.\n", dev->name); + QETH_DBF_TEXT(trace, 3, "setmcLY3"); + return -EOPNOTSUPP; + } + QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card)); + QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN); + rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]); + if (!rc) + rc = qeth_layer2_send_setmac(card, addr->sa_data); + return rc; +} + +static void +qeth_fill_ipacmd_header(struct qeth_card *card, struct qeth_ipa_cmd *cmd, + __u8 command, enum qeth_prot_versions prot) +{ + memset(cmd, 0, sizeof (struct qeth_ipa_cmd)); + cmd->hdr.command = command; + cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; + cmd->hdr.seqno = card->seqno.ipa; + cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); + cmd->hdr.rel_adapter_no = (__u8) card->info.portno; + if (card->options.layer2) + cmd->hdr.prim_version_no = 2; + else + cmd->hdr.prim_version_no = 1; + cmd->hdr.param_count = 1; + cmd->hdr.prot_version = prot; + cmd->hdr.ipa_supported = 0; + cmd->hdr.ipa_enabled = 0; +} + +static struct qeth_cmd_buffer * +qeth_get_ipacmd_buffer(struct qeth_card *card, enum qeth_ipa_cmds ipacmd, + enum qeth_prot_versions prot) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + iob = qeth_wait_for_buffer(&card->write); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + qeth_fill_ipacmd_header(card, cmd, ipacmd, prot); + + return iob; +} + +static int +qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"setdelmc"); + + iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + memcpy(&cmd->data.setdelipm.mac,addr->mac, OSA_ADDR_LEN); + if (addr->proto == QETH_PROT_IPV6) + memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr, + sizeof(struct in6_addr)); + else + memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr,4); + + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + + return rc; +} +static inline void +qeth_fill_netmask(u8 *netmask, unsigned int len) +{ + int i,j; + for (i=0;i<16;i++) { + j=(len)-(i*8); + if (j >= 8) + netmask[i] = 0xff; + else if (j > 0) + netmask[i] = (u8)(0xFF00>>j); + else + netmask[i] = 0; + } +} + +static int +qeth_send_setdelip(struct qeth_card *card, struct qeth_ipaddr *addr, + int ipacmd, unsigned int flags) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + __u8 netmask[16]; + + QETH_DBF_TEXT(trace,4,"setdelip"); + QETH_DBF_TEXT_(trace,4,"flags%02X", flags); + + iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + if (addr->proto == QETH_PROT_IPV6) { + memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, + sizeof(struct in6_addr)); + qeth_fill_netmask(netmask,addr->u.a6.pfxlen); + memcpy(cmd->data.setdelip6.mask, netmask, + sizeof(struct in6_addr)); + cmd->data.setdelip6.flags = flags; + } else { + memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); + memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); + cmd->data.setdelip4.flags = flags; + } + + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + + return rc; +} + +static int +qeth_layer2_register_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + if (!addr->is_multicast) + return 0; + QETH_DBF_TEXT(trace, 2, "setgmac"); + QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN); + return qeth_layer2_send_setgroupmac(card, &addr->mac[0]); +} + +static int +qeth_layer2_deregister_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + if (!addr->is_multicast) + return 0; + QETH_DBF_TEXT(trace, 2, "delgmac"); + QETH_DBF_HEX(trace,3,&addr->mac[0],OSA_ADDR_LEN); + return qeth_layer2_send_delgroupmac(card, &addr->mac[0]); +} + +static int +qeth_layer3_register_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + char buf[50]; + int rc; + int cnt = 3; + + if (addr->proto == QETH_PROT_IPV4) { + QETH_DBF_TEXT(trace, 2,"setaddr4"); + QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int)); + } else if (addr->proto == QETH_PROT_IPV6) { + QETH_DBF_TEXT(trace, 2, "setaddr6"); + QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8); + QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8); + } else { + QETH_DBF_TEXT(trace, 2, "setaddr?"); + QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr)); + } + do { + if (addr->is_multicast) + rc = qeth_send_setdelmc(card, addr, IPA_CMD_SETIPM); + else + rc = qeth_send_setdelip(card, addr, IPA_CMD_SETIP, + addr->set_flags); + if (rc) + QETH_DBF_TEXT(trace, 2, "failed"); + } while ((--cnt > 0) && rc); + if (rc){ + QETH_DBF_TEXT(trace, 2, "FAILED"); + qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); + PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n", + buf, rc, rc); + } + return rc; +} + +static int +qeth_layer3_deregister_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + //char buf[50]; + int rc; + + if (addr->proto == QETH_PROT_IPV4) { + QETH_DBF_TEXT(trace, 2,"deladdr4"); + QETH_DBF_HEX(trace, 3, &addr->u.a4.addr, sizeof(int)); + } else if (addr->proto == QETH_PROT_IPV6) { + QETH_DBF_TEXT(trace, 2, "deladdr6"); + QETH_DBF_HEX(trace,3,&addr->u.a6.addr,8); + QETH_DBF_HEX(trace,3,((char *)&addr->u.a6.addr)+8,8); + } else { + QETH_DBF_TEXT(trace, 2, "deladdr?"); + QETH_DBF_HEX(trace, 3, addr, sizeof(struct qeth_ipaddr)); + } + if (addr->is_multicast) + rc = qeth_send_setdelmc(card, addr, IPA_CMD_DELIPM); + else + rc = qeth_send_setdelip(card, addr, IPA_CMD_DELIP, + addr->del_flags); + if (rc) { + QETH_DBF_TEXT(trace, 2, "failed"); + /* TODO: re-activate this warning as soon as we have a + * clean mirco code + qeth_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); + PRINT_WARN("Could not deregister IP address %s (rc=%x)\n", + buf, rc); + */ + } + return rc; +} + +static int +qeth_register_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr) +{ + if (card->options.layer2) + return qeth_layer2_register_addr_entry(card, addr); + + return qeth_layer3_register_addr_entry(card, addr); +} + +static int +qeth_deregister_addr_entry(struct qeth_card *card, struct qeth_ipaddr *addr) +{ + if (card->options.layer2) + return qeth_layer2_deregister_addr_entry(card, addr); + + return qeth_layer3_deregister_addr_entry(card, addr); +} + +static u32 +qeth_ethtool_get_tx_csum(struct net_device *dev) +{ + /* We may need to say that we support tx csum offload if + * we do EDDP or TSO. There are discussions going on to + * enforce rules in the stack and in ethtool that make + * SG and TSO depend on HW_CSUM. At the moment there are + * no such rules.... + * If we say yes here, we have to checksum outbound packets + * any time. */ + return 0; +} + +static int +qeth_ethtool_set_tx_csum(struct net_device *dev, u32 data) +{ + return -EINVAL; +} + +static u32 +qeth_ethtool_get_rx_csum(struct net_device *dev) +{ + struct qeth_card *card = (struct qeth_card *)dev->priv; + + return (card->options.checksum_type == HW_CHECKSUMMING); +} + +static int +qeth_ethtool_set_rx_csum(struct net_device *dev, u32 data) +{ + struct qeth_card *card = (struct qeth_card *)dev->priv; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + if (data) + card->options.checksum_type = HW_CHECKSUMMING; + else + card->options.checksum_type = SW_CHECKSUMMING; + return 0; +} + +static u32 +qeth_ethtool_get_sg(struct net_device *dev) +{ + struct qeth_card *card = (struct qeth_card *)dev->priv; + + return ((card->options.large_send != QETH_LARGE_SEND_NO) && + (dev->features & NETIF_F_SG)); +} + +static int +qeth_ethtool_set_sg(struct net_device *dev, u32 data) +{ + struct qeth_card *card = (struct qeth_card *)dev->priv; + + if (data) { + if (card->options.large_send != QETH_LARGE_SEND_NO) + dev->features |= NETIF_F_SG; + else { + dev->features &= ~NETIF_F_SG; + return -EINVAL; + } + } else + dev->features &= ~NETIF_F_SG; + return 0; +} + +static u32 +qeth_ethtool_get_tso(struct net_device *dev) +{ + struct qeth_card *card = (struct qeth_card *)dev->priv; + + return ((card->options.large_send != QETH_LARGE_SEND_NO) && + (dev->features & NETIF_F_TSO)); +} + +static int +qeth_ethtool_set_tso(struct net_device *dev, u32 data) +{ + struct qeth_card *card = (struct qeth_card *)dev->priv; + + if (data) { + if (card->options.large_send != QETH_LARGE_SEND_NO) + dev->features |= NETIF_F_TSO; + else { + dev->features &= ~NETIF_F_TSO; + return -EINVAL; + } + } else + dev->features &= ~NETIF_F_TSO; + return 0; +} + +static struct ethtool_ops qeth_ethtool_ops = { + .get_tx_csum = qeth_ethtool_get_tx_csum, + .set_tx_csum = qeth_ethtool_set_tx_csum, + .get_rx_csum = qeth_ethtool_get_rx_csum, + .set_rx_csum = qeth_ethtool_set_rx_csum, + .get_sg = qeth_ethtool_get_sg, + .set_sg = qeth_ethtool_set_sg, + .get_tso = qeth_ethtool_get_tso, + .set_tso = qeth_ethtool_set_tso, +}; + +static int +qeth_netdev_init(struct net_device *dev) +{ + struct qeth_card *card; + + card = (struct qeth_card *) dev->priv; + + QETH_DBF_TEXT(trace,3,"initdev"); + + dev->tx_timeout = &qeth_tx_timeout; + dev->watchdog_timeo = QETH_TX_TIMEOUT; + dev->open = qeth_open; + dev->stop = qeth_stop; + dev->hard_start_xmit = qeth_hard_start_xmit; + dev->do_ioctl = qeth_do_ioctl; + dev->get_stats = qeth_get_stats; + dev->change_mtu = qeth_change_mtu; + dev->neigh_setup = qeth_neigh_setup; + dev->set_multicast_list = qeth_set_multicast_list; +#ifdef CONFIG_QETH_VLAN + dev->vlan_rx_register = qeth_vlan_rx_register; + dev->vlan_rx_kill_vid = qeth_vlan_rx_kill_vid; + dev->vlan_rx_add_vid = qeth_vlan_rx_add_vid; +#endif + dev->hard_header = card->orig_hard_header; + if (qeth_get_netdev_flags(card) & IFF_NOARP) { + dev->rebuild_header = NULL; + dev->hard_header = NULL; + if (card->options.fake_ll) + dev->hard_header = qeth_fake_header; + dev->header_cache_update = NULL; + dev->hard_header_cache = NULL; + } +#ifdef CONFIG_QETH_IPV6 + /*IPv6 address autoconfiguration stuff*/ + if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) + card->dev->dev_id = card->info.unique_id & 0xffff; +#endif + dev->hard_header_parse = NULL; + dev->set_mac_address = qeth_layer2_set_mac_address; + dev->flags |= qeth_get_netdev_flags(card); + if ((card->options.fake_broadcast) || + (card->info.broadcast_capable)) + dev->flags |= IFF_BROADCAST; + dev->hard_header_len = + qeth_get_hlen(card->info.link_type) + card->options.add_hhlen; + dev->addr_len = OSA_ADDR_LEN; + dev->mtu = card->info.initial_mtu; + + SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops); + + SET_MODULE_OWNER(dev); + return 0; +} + +static void +qeth_init_func_level(struct qeth_card *card) +{ + if (card->ipato.enabled) { + if (card->info.type == QETH_CARD_TYPE_IQD) + card->info.func_level = + QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT; + else + card->info.func_level = + QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT; + } else { + if (card->info.type == QETH_CARD_TYPE_IQD) + card->info.func_level = + QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT; + else + card->info.func_level = + QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT; + } +} + +/** + * hardsetup card, initialize MPC and QDIO stuff + */ +static int +qeth_hardsetup_card(struct qeth_card *card) +{ + int retries = 3; + int rc; + + QETH_DBF_TEXT(setup, 2, "hrdsetup"); + +retry: + if (retries < 3){ + PRINT_WARN("Retrying to do IDX activates.\n"); + ccw_device_set_offline(CARD_DDEV(card)); + ccw_device_set_offline(CARD_WDEV(card)); + ccw_device_set_offline(CARD_RDEV(card)); + ccw_device_set_online(CARD_RDEV(card)); + ccw_device_set_online(CARD_WDEV(card)); + ccw_device_set_online(CARD_DDEV(card)); + } + rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE); + if (rc == -ERESTARTSYS) { + QETH_DBF_TEXT(setup, 2, "break1"); + return rc; + } else if (rc) { + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + if ((rc = qeth_get_unitaddr(card))){ + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + return rc; + } + qeth_init_tokens(card); + qeth_init_func_level(card); + rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); + if (rc == -ERESTARTSYS) { + QETH_DBF_TEXT(setup, 2, "break2"); + return rc; + } else if (rc) { + QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); + if (rc == -ERESTARTSYS) { + QETH_DBF_TEXT(setup, 2, "break3"); + return rc; + } else if (rc) { + QETH_DBF_TEXT_(setup, 2, "4err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + if ((rc = qeth_mpc_initialize(card))){ + QETH_DBF_TEXT_(setup, 2, "5err%d", rc); + goto out; + } + /*network device will be recovered*/ + if (card->dev) { + card->dev->hard_header = card->orig_hard_header; + return 0; + } + /* at first set_online allocate netdev */ + card->dev = qeth_get_netdevice(card->info.type, + card->info.link_type); + if (!card->dev){ + qeth_qdio_clear_card(card, card->info.type == + QETH_CARD_TYPE_OSAE); + rc = -ENODEV; + QETH_DBF_TEXT_(setup, 2, "6err%d", rc); + goto out; + } + card->dev->priv = card; + card->orig_hard_header = card->dev->hard_header; + card->dev->type = qeth_get_arphdr_type(card->info.type, + card->info.link_type); + card->dev->init = qeth_netdev_init; + return 0; +out: + PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc); + return rc; +} + +static int +qeth_default_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"defadpcb"); + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code == 0){ + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; +#ifdef CONFIG_QETH_IPV6 + if (cmd->hdr.prot_version == QETH_PROT_IPV6) + card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; +#endif + } + if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM && + cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { + card->info.csum_mask = cmd->data.setassparms.data.flags_32bit; + QETH_DBF_TEXT_(trace, 3, "csum:%d", card->info.csum_mask); + } + return 0; +} + +static int +qeth_default_setadapterparms_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"defadpcb"); + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code == 0) + cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; + return 0; +} + +static int +qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,3,"quyadpcb"); + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) + card->info.link_type = + cmd->data.setadapterparms.data.query_cmds_supp.lan_type; + card->options.adp.supported_funcs = + cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; + return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); +} + +static int +qeth_query_setadapterparms(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(trace,3,"queryadp"); + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, + sizeof(struct qeth_ipacmd_setadpparms)); + rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); + return rc; +} + +static int +qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"chgmaccb"); + + cmd = (struct qeth_ipa_cmd *) data; + memcpy(card->dev->dev_addr, + &cmd->data.setadapterparms.data.change_addr.addr,OSA_ADDR_LEN); + qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + return 0; +} + +static int +qeth_setadpparms_change_macaddr(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"chgmac"); + + iob = qeth_get_adapter_cmd(card,IPA_SETADP_ALTER_MAC_ADDRESS, + sizeof(struct qeth_ipacmd_setadpparms)); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; + cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN; + memcpy(&cmd->data.setadapterparms.data.change_addr.addr, + card->dev->dev_addr, OSA_ADDR_LEN); + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, + NULL); + return rc; +} + +static int +qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"adpmode"); + + iob = qeth_get_adapter_cmd(card, command, + sizeof(struct qeth_ipacmd_setadpparms)); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setadapterparms.data.mode = mode; + rc = qeth_send_ipa_cmd(card, iob, qeth_default_setadapterparms_cb, + NULL); + return rc; +} + +static inline int +qeth_setadapter_hstr(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,4,"adphstr"); + + if (qeth_adp_supported(card,IPA_SETADP_SET_BROADCAST_MODE)) { + rc = qeth_send_setadp_mode(card, IPA_SETADP_SET_BROADCAST_MODE, + card->options.broadcast_mode); + if (rc) + PRINT_WARN("couldn't set broadcast mode on " + "device %s: x%x\n", + CARD_BUS_ID(card), rc); + rc = qeth_send_setadp_mode(card, IPA_SETADP_ALTER_MAC_ADDRESS, + card->options.macaddr_mode); + if (rc) + PRINT_WARN("couldn't set macaddr mode on " + "device %s: x%x\n", CARD_BUS_ID(card), rc); + return rc; + } + if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) + PRINT_WARN("set adapter parameters not available " + "to set broadcast mode, using ALLRINGS " + "on device %s:\n", CARD_BUS_ID(card)); + if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) + PRINT_WARN("set adapter parameters not available " + "to set macaddr mode, using NONCANONICAL " + "on device %s:\n", CARD_BUS_ID(card)); + return 0; +} + +static int +qeth_setadapter_parms(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(setup, 2, "setadprm"); + + if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)){ + PRINT_WARN("set adapter parameters not supported " + "on device %s.\n", + CARD_BUS_ID(card)); + QETH_DBF_TEXT(setup, 2, " notsupp"); + return 0; + } + rc = qeth_query_setadapterparms(card); + if (rc) { + PRINT_WARN("couldn't set adapter parameters on device %s: " + "x%x\n", CARD_BUS_ID(card), rc); + return rc; + } + if (qeth_adp_supported(card,IPA_SETADP_ALTER_MAC_ADDRESS)) { + rc = qeth_setadpparms_change_macaddr(card); + if (rc) + PRINT_WARN("couldn't get MAC address on " + "device %s: x%x\n", + CARD_BUS_ID(card), rc); + } + + if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || + (card->info.link_type == QETH_LINK_TYPE_LANE_TR)) + rc = qeth_setadapter_hstr(card); + + return rc; +} + +static int +qeth_layer2_initialize(struct qeth_card *card) +{ + int rc = 0; + + + QETH_DBF_TEXT(setup, 2, "doL2init"); + QETH_DBF_TEXT_(setup, 2, "doL2%s", CARD_BUS_ID(card)); + + rc = qeth_setadpparms_change_macaddr(card); + if (rc) { + PRINT_WARN("couldn't get MAC address on " + "device %s: x%x\n", + CARD_BUS_ID(card), rc); + QETH_DBF_TEXT_(setup, 2,"1err%d",rc); + return rc; + } + QETH_DBF_HEX(setup,2, card->dev->dev_addr, OSA_ADDR_LEN); + + rc = qeth_layer2_send_setmac(card, &card->dev->dev_addr[0]); + if (rc) + QETH_DBF_TEXT_(setup, 2,"2err%d",rc); + return 0; +} + + +static int +qeth_send_startstoplan(struct qeth_card *card, enum qeth_ipa_cmds ipacmd, + enum qeth_prot_versions prot) +{ + int rc; + struct qeth_cmd_buffer *iob; + + iob = qeth_get_ipacmd_buffer(card,ipacmd,prot); + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + + return rc; +} + +static int +qeth_send_startlan(struct qeth_card *card, enum qeth_prot_versions prot) +{ + int rc; + + QETH_DBF_TEXT_(setup, 2, "strtlan%i", prot); + + rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, prot); + return rc; +} + +static int +qeth_send_stoplan(struct qeth_card *card) +{ + int rc = 0; + + /* + * TODO: according to the IPA format document page 14, + * TCP/IP (we!) never issue a STOPLAN + * is this right ?!? + */ + QETH_DBF_TEXT(trace, 2, "stoplan"); + + rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, QETH_PROT_IPV4); + return rc; +} + +static int +qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(setup, 2, "qipasscb"); + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.prot_version == QETH_PROT_IPV4) { + card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; + card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + } else { +#ifdef CONFIG_QETH_IPV6 + card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; + card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; +#endif + } + QETH_DBF_TEXT(setup, 2, "suppenbl"); + QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_supported); + QETH_DBF_TEXT_(setup, 2, "%x",cmd->hdr.ipa_enabled); + return 0; +} + +static int +qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT_(setup, 2, "qipassi%i", prot); + if (card->options.layer2) { + QETH_DBF_TEXT(setup, 2, "noprmly2"); + return -EPERM; + } + + iob = qeth_get_ipacmd_buffer(card,IPA_CMD_QIPASSIST,prot); + rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); + return rc; +} + +static struct qeth_cmd_buffer * +qeth_get_setassparms_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, __u16 len, + enum qeth_prot_versions prot) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"getasscm"); + iob = qeth_get_ipacmd_buffer(card,IPA_CMD_SETASSPARMS,prot); + + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setassparms.hdr.assist_no = ipa_func; + cmd->data.setassparms.hdr.length = 8 + len; + cmd->data.setassparms.hdr.command_code = cmd_code; + cmd->data.setassparms.hdr.return_code = 0; + cmd->data.setassparms.hdr.seq_no = 0; + + return iob; +} + +static int +qeth_send_setassparms(struct qeth_card *card, struct qeth_cmd_buffer *iob, + __u16 len, long data, + int (*reply_cb) + (struct qeth_card *,struct qeth_reply *,unsigned long), + void *reply_param) +{ + int rc; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,4,"sendassp"); + + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + if (len <= sizeof(__u32)) + cmd->data.setassparms.data.flags_32bit = (__u32) data; + else if (len > sizeof(__u32)) + memcpy(&cmd->data.setassparms.data, (void *) data, len); + + rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); + return rc; +} + +#ifdef CONFIG_QETH_IPV6 +static int +qeth_send_simple_setassparms_ipv6(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, __u16 cmd_code) + +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(trace,4,"simassp6"); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, + 0, QETH_PROT_IPV6); + rc = qeth_send_setassparms(card, iob, 0, 0, + qeth_default_setassparms_cb, NULL); + return rc; +} +#endif + +static int +qeth_send_simple_setassparms(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, long data) +{ + int rc; + int length = 0; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(trace,4,"simassp4"); + if (data) + length = sizeof(__u32); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, + length, QETH_PROT_IPV4); + rc = qeth_send_setassparms(card, iob, length, data, + qeth_default_setassparms_cb, NULL); + return rc; +} + +static inline int +qeth_start_ipa_arp_processing(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"ipaarp"); + + if (!qeth_is_supported(card,IPA_ARP_PROCESSING)) { + PRINT_WARN("ARP processing not supported " + "on %s!\n", QETH_CARD_IFNAME(card)); + return 0; + } + rc = qeth_send_simple_setassparms(card,IPA_ARP_PROCESSING, + IPA_CMD_ASS_START, 0); + if (rc) { + PRINT_WARN("Could not start ARP processing " + "assist on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + } + return rc; +} + +static int +qeth_start_ipa_ip_fragmentation(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"ipaipfrg"); + + if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { + PRINT_INFO("Hardware IP fragmentation not supported on %s\n", + QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, + IPA_CMD_ASS_START, 0); + if (rc) { + PRINT_WARN("Could not start Hardware IP fragmentation " + "assist on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + } else + PRINT_INFO("Hardware IP fragmentation enabled \n"); + return rc; +} + +static int +qeth_start_ipa_source_mac(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"stsrcmac"); + + if (!card->options.fake_ll) + return -EOPNOTSUPP; + + if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { + PRINT_INFO("Inbound source address not " + "supported on %s\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC, + IPA_CMD_ASS_START, 0); + if (rc) + PRINT_WARN("Could not start inbound source " + "assist on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + return rc; +} + +static int +qeth_start_ipa_vlan(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(trace,3,"strtvlan"); + +#ifdef CONFIG_QETH_VLAN + if (!qeth_is_supported(card, IPA_FULL_VLAN)) { + PRINT_WARN("VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO, + IPA_CMD_ASS_START,0); + if (rc) { + PRINT_WARN("Could not start vlan " + "assist on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + } else { + PRINT_INFO("VLAN enabled \n"); + card->dev->features |= + NETIF_F_HW_VLAN_FILTER | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + } +#endif /* QETH_VLAN */ + return rc; +} + +static int +qeth_start_ipa_multicast(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"stmcast"); + + if (!qeth_is_supported(card, IPA_MULTICASTING)) { + PRINT_WARN("Multicast not supported on %s\n", + QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING, + IPA_CMD_ASS_START,0); + if (rc) { + PRINT_WARN("Could not start multicast " + "assist on %s: rc=%i\n", + QETH_CARD_IFNAME(card), rc); + } else { + PRINT_INFO("Multicast enabled\n"); + card->dev->flags |= IFF_MULTICAST; + } + return rc; +} + +#ifdef CONFIG_QETH_IPV6 +static int +qeth_softsetup_ipv6(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"softipv6"); + + netif_stop_queue(card->dev); + rc = qeth_send_startlan(card, QETH_PROT_IPV6); + if (rc) { + PRINT_ERR("IPv6 startlan failed on %s\n", + QETH_CARD_IFNAME(card)); + return rc; + } + netif_wake_queue(card->dev); + rc = qeth_query_ipassists(card,QETH_PROT_IPV6); + if (rc) { + PRINT_ERR("IPv6 query ipassist failed on %s\n", + QETH_CARD_IFNAME(card)); + return rc; + } + rc = qeth_send_simple_setassparms(card, IPA_IPV6, + IPA_CMD_ASS_START, 3); + if (rc) { + PRINT_WARN("IPv6 start assist (version 4) failed " + "on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + return rc; + } + rc = qeth_send_simple_setassparms_ipv6(card, IPA_IPV6, + IPA_CMD_ASS_START); + if (rc) { + PRINT_WARN("IPV6 start assist (version 6) failed " + "on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + return rc; + } + rc = qeth_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, + IPA_CMD_ASS_START); + if (rc) { + PRINT_WARN("Could not enable passthrough " + "on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + return rc; + } + PRINT_INFO("IPV6 enabled \n"); + return 0; +} + +#endif + +static int +qeth_start_ipa_ipv6(struct qeth_card *card) +{ + int rc = 0; +#ifdef CONFIG_QETH_IPV6 + QETH_DBF_TEXT(trace,3,"strtipv6"); + + if (!qeth_is_supported(card, IPA_IPV6)) { + PRINT_WARN("IPv6 not supported on %s\n", + QETH_CARD_IFNAME(card)); + return 0; + } + rc = qeth_softsetup_ipv6(card); +#endif + return rc ; +} + +static int +qeth_start_ipa_broadcast(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"stbrdcst"); + card->info.broadcast_capable = 0; + if (!qeth_is_supported(card, IPA_FILTERING)) { + PRINT_WARN("Broadcast not supported on %s\n", + QETH_CARD_IFNAME(card)); + rc = -EOPNOTSUPP; + goto out; + } + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_START, 0); + if (rc) { + PRINT_WARN("Could not enable broadcasting filtering " + "on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + goto out; + } + + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_CONFIGURE, 1); + if (rc) { + PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n", + QETH_CARD_IFNAME(card), rc); + goto out; + } + card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; + PRINT_INFO("Broadcast enabled \n"); + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_ENABLE, 1); + if (rc) { + PRINT_WARN("Could not set up broadcast echo filtering on " + "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc); + goto out; + } + card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; +out: + if (card->info.broadcast_capable) + card->dev->flags |= IFF_BROADCAST; + else + card->dev->flags &= ~IFF_BROADCAST; + return rc; +} + +static int +qeth_send_checksum_command(struct qeth_card *card) +{ + int rc; + + rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, + IPA_CMD_ASS_START, 0); + if (rc) { + PRINT_WARN("Starting Inbound HW Checksumming failed on %s: " + "0x%x,\ncontinuing using Inbound SW Checksumming\n", + QETH_CARD_IFNAME(card), rc); + return rc; + } + rc = qeth_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, + IPA_CMD_ASS_ENABLE, + card->info.csum_mask); + if (rc) { + PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: " + "0x%x,\ncontinuing using Inbound SW Checksumming\n", + QETH_CARD_IFNAME(card), rc); + return rc; + } + return 0; +} + +static int +qeth_start_ipa_checksum(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(trace,3,"strtcsum"); + + if (card->options.checksum_type == NO_CHECKSUMMING) { + PRINT_WARN("Using no checksumming on %s.\n", + QETH_CARD_IFNAME(card)); + return 0; + } + if (card->options.checksum_type == SW_CHECKSUMMING) { + PRINT_WARN("Using SW checksumming on %s.\n", + QETH_CARD_IFNAME(card)); + return 0; + } + if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { + PRINT_WARN("Inbound HW Checksumming not " + "supported on %s,\ncontinuing " + "using Inbound SW Checksumming\n", + QETH_CARD_IFNAME(card)); + card->options.checksum_type = SW_CHECKSUMMING; + return 0; + } + rc = qeth_send_checksum_command(card); + if (!rc) { + PRINT_INFO("HW Checksumming (inbound) enabled \n"); + } + return rc; +} + +static int +qeth_start_ipa_tso(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"sttso"); + + if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { + PRINT_WARN("Outbound TSO not supported on %s\n", + QETH_CARD_IFNAME(card)); + rc = -EOPNOTSUPP; + } else { + rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_START,0); + if (rc) + PRINT_WARN("Could not start outbound TSO " + "assist on %s: rc=%i\n", + QETH_CARD_IFNAME(card), rc); + else + PRINT_INFO("Outbound TSO enabled\n"); + } + if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)){ + card->options.large_send = QETH_LARGE_SEND_NO; + card->dev->features &= ~ (NETIF_F_TSO | NETIF_F_SG); + } + return rc; +} + +static int +qeth_start_ipassists(struct qeth_card *card) +{ + QETH_DBF_TEXT(trace,3,"strtipas"); + qeth_start_ipa_arp_processing(card); /* go on*/ + qeth_start_ipa_ip_fragmentation(card); /* go on*/ + qeth_start_ipa_source_mac(card); /* go on*/ + qeth_start_ipa_vlan(card); /* go on*/ + qeth_start_ipa_multicast(card); /* go on*/ + qeth_start_ipa_ipv6(card); /* go on*/ + qeth_start_ipa_broadcast(card); /* go on*/ + qeth_start_ipa_checksum(card); /* go on*/ + qeth_start_ipa_tso(card); /* go on*/ + return 0; +} + +static int +qeth_send_setrouting(struct qeth_card *card, enum qeth_routing_types type, + enum qeth_prot_versions prot) +{ + int rc; + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(trace,4,"setroutg"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.setrtg.type = (type); + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + + return rc; + +} + +static void +qeth_correct_routing_type(struct qeth_card *card, enum qeth_routing_types *type, + enum qeth_prot_versions prot) +{ + if (card->info.type == QETH_CARD_TYPE_IQD) { + switch (*type) { + case NO_ROUTER: + case PRIMARY_CONNECTOR: + case SECONDARY_CONNECTOR: + case MULTICAST_ROUTER: + return; + default: + goto out_inval; + } + } else { + switch (*type) { + case NO_ROUTER: + case PRIMARY_ROUTER: + case SECONDARY_ROUTER: + return; + case MULTICAST_ROUTER: + if (qeth_is_ipafunc_supported(card, prot, + IPA_OSA_MC_ROUTER)) + return; + default: + goto out_inval; + } + } +out_inval: + PRINT_WARN("Routing type '%s' not supported for interface %s.\n" + "Router status set to 'no router'.\n", + ((*type == PRIMARY_ROUTER)? "primary router" : + (*type == SECONDARY_ROUTER)? "secondary router" : + (*type == PRIMARY_CONNECTOR)? "primary connector" : + (*type == SECONDARY_CONNECTOR)? "secondary connector" : + (*type == MULTICAST_ROUTER)? "multicast router" : + "unknown"), + card->dev->name); + *type = NO_ROUTER; +} + +int +qeth_setrouting_v4(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(trace,3,"setrtg4"); + + qeth_correct_routing_type(card, &card->options.route4.type, + QETH_PROT_IPV4); + + rc = qeth_send_setrouting(card, card->options.route4.type, + QETH_PROT_IPV4); + if (rc) { + card->options.route4.type = NO_ROUTER; + PRINT_WARN("Error (0x%04x) while setting routing type on %s. " + "Type set to 'no router'.\n", + rc, QETH_CARD_IFNAME(card)); + } + return rc; +} + +int +qeth_setrouting_v6(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(trace,3,"setrtg6"); +#ifdef CONFIG_QETH_IPV6 + + qeth_correct_routing_type(card, &card->options.route6.type, + QETH_PROT_IPV6); + + if ((card->options.route6.type == NO_ROUTER) || + ((card->info.type == QETH_CARD_TYPE_OSAE) && + (card->options.route6.type == MULTICAST_ROUTER) && + !qeth_is_supported6(card,IPA_OSA_MC_ROUTER))) + return 0; + rc = qeth_send_setrouting(card, card->options.route6.type, + QETH_PROT_IPV6); + if (rc) { + card->options.route6.type = NO_ROUTER; + PRINT_WARN("Error (0x%04x) while setting routing type on %s. " + "Type set to 'no router'.\n", + rc, QETH_CARD_IFNAME(card)); + } +#endif + return rc; +} + +int +qeth_set_large_send(struct qeth_card *card) +{ + int rc = 0; + + if (card->dev == NULL) + return 0; + + netif_stop_queue(card->dev); + switch (card->options.large_send) { + case QETH_LARGE_SEND_EDDP: + card->dev->features |= NETIF_F_TSO | NETIF_F_SG; + break; + case QETH_LARGE_SEND_TSO: + if (qeth_is_supported(card, IPA_OUTBOUND_TSO)){ + card->dev->features |= NETIF_F_TSO | NETIF_F_SG; + } else { + PRINT_WARN("TSO not supported on %s. " + "large_send set to 'no'.\n", + card->dev->name); + card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); + card->options.large_send = QETH_LARGE_SEND_NO; + rc = -EOPNOTSUPP; + } + break; + default: /* includes QETH_LARGE_SEND_NO */ + card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); + break; + } + + netif_wake_queue(card->dev); + return rc; +} + +/* + * softsetup card: init IPA stuff + */ +static int +qeth_softsetup_card(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(setup, 2, "softsetp"); + + if ((rc = qeth_send_startlan(card, QETH_PROT_IPV4))){ + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + if (rc == 0xe080){ + PRINT_WARN("LAN on card %s if offline! " + "Continuing softsetup.\n", + CARD_BUS_ID(card)); + card->lan_online = 0; + } else + return rc; + } else + card->lan_online = 1; + if (card->options.layer2) { + card->dev->features |= + NETIF_F_HW_VLAN_FILTER | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX; + card->dev->flags|=IFF_MULTICAST|IFF_BROADCAST; + card->info.broadcast_capable=1; + if ((rc = qeth_layer2_initialize(card))) { + QETH_DBF_TEXT_(setup, 2, "L2err%d", rc); + return rc; + } +#ifdef CONFIG_QETH_VLAN + qeth_layer2_process_vlans(card, 0); +#endif + goto out; + } + if ((card->options.large_send == QETH_LARGE_SEND_EDDP) || + (card->options.large_send == QETH_LARGE_SEND_TSO)) + card->dev->features |= NETIF_F_TSO | NETIF_F_SG; + else + card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG); + + if ((rc = qeth_setadapter_parms(card))) + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + if ((rc = qeth_start_ipassists(card))) + QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + if ((rc = qeth_setrouting_v4(card))) + QETH_DBF_TEXT_(setup, 2, "4err%d", rc); + if ((rc = qeth_setrouting_v6(card))) + QETH_DBF_TEXT_(setup, 2, "5err%d", rc); +out: + netif_stop_queue(card->dev); + return 0; +} + +#ifdef CONFIG_QETH_IPV6 +static int +qeth_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code == 0) + card->info.unique_id = *((__u16 *) + &cmd->data.create_destroy_addr.unique_id[6]); + else { + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; + PRINT_WARN("couldn't get a unique id from the card on device " + "%s (result=x%x), using default id. ipv6 " + "autoconfig on other lpars may lead to duplicate " + "ip addresses. please use manually " + "configured ones.\n", + CARD_BUS_ID(card), cmd->hdr.return_code); + } + return 0; +} +#endif + +static int +qeth_put_unique_id(struct qeth_card *card) +{ + + int rc = 0; +#ifdef CONFIG_QETH_IPV6 + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(trace,2,"puniqeid"); + + if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == + UNIQUE_ID_NOT_BY_CARD) + return -1; + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR, + QETH_PROT_IPV6); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = + card->info.unique_id; + memcpy(&cmd->data.create_destroy_addr.unique_id[0], + card->dev->dev_addr, OSA_ADDR_LEN); + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); +#else + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; +#endif + return rc; +} + +/** + * Clear IP List + */ +static void +qeth_clear_ip_list(struct qeth_card *card, int clean, int recover) +{ + struct qeth_ipaddr *addr, *tmp; + unsigned long flags; + + QETH_DBF_TEXT(trace,4,"clearip"); + spin_lock_irqsave(&card->ip_lock, flags); + /* clear todo list */ + list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry){ + list_del(&addr->entry); + kfree(addr); + } + + while (!list_empty(&card->ip_list)) { + addr = list_entry(card->ip_list.next, + struct qeth_ipaddr, entry); + list_del_init(&addr->entry); + if (clean) { + spin_unlock_irqrestore(&card->ip_lock, flags); + qeth_deregister_addr_entry(card, addr); + spin_lock_irqsave(&card->ip_lock, flags); + } + if (!recover || addr->is_multicast) { + kfree(addr); + continue; + } + list_add_tail(&addr->entry, card->ip_tbd_list); + } + spin_unlock_irqrestore(&card->ip_lock, flags); +} + +static void +qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, + int clear_start_mask) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_allowed_mask = threads; + if (clear_start_mask) + card->thread_start_mask &= threads; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} + +static inline int +qeth_threads_running(struct qeth_card *card, unsigned long threads) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + rc = (card->thread_running_mask & threads); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +static int +qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) +{ + return wait_event_interruptible(card->wait_q, + qeth_threads_running(card, threads) == 0); +} + +static int +qeth_stop_card(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(setup ,2,"stopcard"); + QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); + + qeth_set_allowed_threads(card, 0, 1); + if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)) + return -ERESTARTSYS; + if (card->read.state == CH_STATE_UP && + card->write.state == CH_STATE_UP && + (card->state == CARD_STATE_UP)) { + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + if (!card->use_hard_stop) { + __u8 *mac = &card->dev->dev_addr[0]; + rc = qeth_layer2_send_delmac(card, mac); + QETH_DBF_TEXT_(setup, 2, "Lerr%d", rc); + if ((rc = qeth_send_stoplan(card))) + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + } + card->state = CARD_STATE_SOFTSETUP; + } + if (card->state == CARD_STATE_SOFTSETUP) { +#ifdef CONFIG_QETH_VLAN + if (card->options.layer2) + qeth_layer2_process_vlans(card, 1); +#endif + qeth_clear_ip_list(card, !card->use_hard_stop, 1); + qeth_clear_ipacmd_list(card); + card->state = CARD_STATE_HARDSETUP; + } + if (card->state == CARD_STATE_HARDSETUP) { + if ((!card->use_hard_stop) && + (!card->options.layer2)) + if ((rc = qeth_put_unique_id(card))) + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + qeth_qdio_clear_card(card, 0); + qeth_clear_qdio_buffers(card); + qeth_clear_working_pool_list(card); + card->state = CARD_STATE_DOWN; + } + if (card->state == CARD_STATE_DOWN) { + qeth_clear_cmd_buffers(&card->read); + qeth_clear_cmd_buffers(&card->write); + } + card->use_hard_stop = 0; + return rc; +} + + +static int +qeth_get_unique_id(struct qeth_card *card) +{ + int rc = 0; +#ifdef CONFIG_QETH_IPV6 + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(setup, 2, "guniqeid"); + + if (!qeth_is_supported(card,IPA_IPV6)) { + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; + return 0; + } + + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, + QETH_PROT_IPV6); + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = + card->info.unique_id; + + rc = qeth_send_ipa_cmd(card, iob, qeth_get_unique_id_cb, NULL); +#else + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; +#endif + return rc; +} +static void +qeth_print_status_with_portname(struct qeth_card *card) +{ + char dbf_text[15]; + int i; + + sprintf(dbf_text, "%s", card->info.portname + 1); + for (i = 0; i < 8; i++) + dbf_text[i] = + (char) _ebcasc[(__u8) dbf_text[i]]; + dbf_text[8] = 0; + printk("qeth: Device %s/%s/%s is a%s card%s%s%s\n" + "with link type %s (portname: %s)\n", + CARD_RDEV_ID(card), + CARD_WDEV_ID(card), + CARD_DDEV_ID(card), + qeth_get_cardname(card), + (card->info.mcl_level[0]) ? " (level: " : "", + (card->info.mcl_level[0]) ? card->info.mcl_level : "", + (card->info.mcl_level[0]) ? ")" : "", + qeth_get_cardname_short(card), + dbf_text); + +} + +static void +qeth_print_status_no_portname(struct qeth_card *card) +{ + if (card->info.portname[0]) + printk("qeth: Device %s/%s/%s is a%s " + "card%s%s%s\nwith link type %s " + "(no portname needed by interface).\n", + CARD_RDEV_ID(card), + CARD_WDEV_ID(card), + CARD_DDEV_ID(card), + qeth_get_cardname(card), + (card->info.mcl_level[0]) ? " (level: " : "", + (card->info.mcl_level[0]) ? card->info.mcl_level : "", + (card->info.mcl_level[0]) ? ")" : "", + qeth_get_cardname_short(card)); + else + printk("qeth: Device %s/%s/%s is a%s " + "card%s%s%s\nwith link type %s.\n", + CARD_RDEV_ID(card), + CARD_WDEV_ID(card), + CARD_DDEV_ID(card), + qeth_get_cardname(card), + (card->info.mcl_level[0]) ? " (level: " : "", + (card->info.mcl_level[0]) ? card->info.mcl_level : "", + (card->info.mcl_level[0]) ? ")" : "", + qeth_get_cardname_short(card)); +} + +static void +qeth_print_status_message(struct qeth_card *card) +{ + switch (card->info.type) { + case QETH_CARD_TYPE_OSAE: + /* VM will use a non-zero first character + * to indicate a HiperSockets like reporting + * of the level OSA sets the first character to zero + * */ + if (!card->info.mcl_level[0]) { + sprintf(card->info.mcl_level,"%02x%02x", + card->info.mcl_level[2], + card->info.mcl_level[3]); + + card->info.mcl_level[QETH_MCL_LENGTH] = 0; + break; + } + /* fallthrough */ + case QETH_CARD_TYPE_IQD: + card->info.mcl_level[0] = (char) _ebcasc[(__u8) + card->info.mcl_level[0]]; + card->info.mcl_level[1] = (char) _ebcasc[(__u8) + card->info.mcl_level[1]]; + card->info.mcl_level[2] = (char) _ebcasc[(__u8) + card->info.mcl_level[2]]; + card->info.mcl_level[3] = (char) _ebcasc[(__u8) + card->info.mcl_level[3]]; + card->info.mcl_level[QETH_MCL_LENGTH] = 0; + break; + default: + memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); + } + if (card->info.portname_required) + qeth_print_status_with_portname(card); + else + qeth_print_status_no_portname(card); +} + +static int +qeth_register_netdev(struct qeth_card *card) +{ + QETH_DBF_TEXT(setup, 3, "regnetd"); + if (card->dev->reg_state != NETREG_UNINITIALIZED) { + qeth_netdev_init(card->dev); + return 0; + } + /* sysfs magic */ + SET_NETDEV_DEV(card->dev, &card->gdev->dev); + return register_netdev(card->dev); +} + +static void +qeth_start_again(struct qeth_card *card) +{ + QETH_DBF_TEXT(setup ,2, "startag"); + + rtnl_lock(); + dev_open(card->dev); + rtnl_unlock(); + /* this also sets saved unicast addresses */ + qeth_set_multicast_list(card->dev); +} + + +/* Layer 2 specific stuff */ +#define IGNORE_PARAM_EQ(option,value,reset_value,msg) \ + if (card->options.option == value) { \ + PRINT_ERR("%s not supported with layer 2 " \ + "functionality, ignoring option on read" \ + "channel device %s .\n",msg,CARD_RDEV_ID(card)); \ + card->options.option = reset_value; \ + } +#define IGNORE_PARAM_NEQ(option,value,reset_value,msg) \ + if (card->options.option != value) { \ + PRINT_ERR("%s not supported with layer 2 " \ + "functionality, ignoring option on read" \ + "channel device %s .\n",msg,CARD_RDEV_ID(card)); \ + card->options.option = reset_value; \ + } + + +static void qeth_make_parameters_consistent(struct qeth_card *card) +{ + + if (card->options.layer2) { + if (card->info.type == QETH_CARD_TYPE_IQD) { + PRINT_ERR("Device %s does not support " \ + "layer 2 functionality. " \ + "Ignoring layer2 option.\n",CARD_BUS_ID(card)); + } + IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER, + "Routing options are"); +#ifdef CONFIG_QETH_IPV6 + IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER, + "Routing options are"); +#endif + IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING, + QETH_CHECKSUM_DEFAULT, + "Checksumming options are"); + IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS, + QETH_TR_BROADCAST_ALLRINGS, + "Broadcast mode options are"); + IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL, + QETH_TR_MACADDR_NONCANONICAL, + "Canonical MAC addr options are"); + IGNORE_PARAM_NEQ(fake_broadcast, 0, 0, + "Broadcast faking options are"); + IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN, + DEFAULT_ADD_HHLEN,"Option add_hhlen is"); + IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is"); + } +} + + +static int +qeth_set_online(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = gdev->dev.driver_data; + int rc = 0; + enum qeth_card_states recover_flag; + + BUG_ON(!card); + QETH_DBF_TEXT(setup ,2, "setonlin"); + QETH_DBF_HEX(setup, 2, &card, sizeof(void *)); + + qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); + if (qeth_wait_for_threads(card, ~QETH_RECOVER_THREAD)){ + PRINT_WARN("set_online of card %s interrupted by user!\n", + CARD_BUS_ID(card)); + return -ERESTARTSYS; + } + + recover_flag = card->state; + if ((rc = ccw_device_set_online(CARD_RDEV(card))) || + (rc = ccw_device_set_online(CARD_WDEV(card))) || + (rc = ccw_device_set_online(CARD_DDEV(card)))){ + QETH_DBF_TEXT_(setup, 2, "1err%d", rc); + return -EIO; + } + + if (card->options.layer2) + qeth_make_parameters_consistent(card); + + if ((rc = qeth_hardsetup_card(card))){ + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); + goto out_remove; + } + card->state = CARD_STATE_HARDSETUP; + + if (!(rc = qeth_query_ipassists(card,QETH_PROT_IPV4))) + rc = qeth_get_unique_id(card); + + if (rc && card->options.layer2 == 0) { + QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + goto out_remove; + } + qeth_print_status_message(card); + if ((rc = qeth_register_netdev(card))){ + QETH_DBF_TEXT_(setup, 2, "4err%d", rc); + goto out_remove; + } + if ((rc = qeth_softsetup_card(card))){ + QETH_DBF_TEXT_(setup, 2, "5err%d", rc); + goto out_remove; + } + card->state = CARD_STATE_SOFTSETUP; + + if ((rc = qeth_init_qdio_queues(card))){ + QETH_DBF_TEXT_(setup, 2, "6err%d", rc); + goto out_remove; + } +/*maybe it was set offline without ifconfig down + * we can also use this state for recovery purposes*/ + qeth_set_allowed_threads(card, 0xffffffff, 0); + if (recover_flag == CARD_STATE_RECOVER) + qeth_start_again(card); + qeth_notify_processes(); + return 0; +out_remove: + card->use_hard_stop = 1; + qeth_stop_card(card); + ccw_device_set_offline(CARD_DDEV(card)); + ccw_device_set_offline(CARD_WDEV(card)); + ccw_device_set_offline(CARD_RDEV(card)); + if (recover_flag == CARD_STATE_RECOVER) + card->state = CARD_STATE_RECOVER; + else + card->state = CARD_STATE_DOWN; + return -ENODEV; +} + +static struct ccw_device_id qeth_ids[] = { + {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE}, + {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD}, + {}, +}; +MODULE_DEVICE_TABLE(ccw, qeth_ids); + +struct device *qeth_root_dev = NULL; + +struct ccwgroup_driver qeth_ccwgroup_driver = { + .owner = THIS_MODULE, + .name = "qeth", + .driver_id = 0xD8C5E3C8, + .probe = qeth_probe_device, + .remove = qeth_remove_device, + .set_online = qeth_set_online, + .set_offline = qeth_set_offline, +}; + +struct ccw_driver qeth_ccw_driver = { + .name = "qeth", + .ids = qeth_ids, + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, +}; + + +static void +qeth_unregister_dbf_views(void) +{ + if (qeth_dbf_setup) + debug_unregister(qeth_dbf_setup); + if (qeth_dbf_qerr) + debug_unregister(qeth_dbf_qerr); + if (qeth_dbf_sense) + debug_unregister(qeth_dbf_sense); + if (qeth_dbf_misc) + debug_unregister(qeth_dbf_misc); + if (qeth_dbf_data) + debug_unregister(qeth_dbf_data); + if (qeth_dbf_control) + debug_unregister(qeth_dbf_control); + if (qeth_dbf_trace) + debug_unregister(qeth_dbf_trace); +} +static int +qeth_register_dbf_views(void) +{ + qeth_dbf_setup = debug_register(QETH_DBF_SETUP_NAME, + QETH_DBF_SETUP_INDEX, + QETH_DBF_SETUP_NR_AREAS, + QETH_DBF_SETUP_LEN); + qeth_dbf_misc = debug_register(QETH_DBF_MISC_NAME, + QETH_DBF_MISC_INDEX, + QETH_DBF_MISC_NR_AREAS, + QETH_DBF_MISC_LEN); + qeth_dbf_data = debug_register(QETH_DBF_DATA_NAME, + QETH_DBF_DATA_INDEX, + QETH_DBF_DATA_NR_AREAS, + QETH_DBF_DATA_LEN); + qeth_dbf_control = debug_register(QETH_DBF_CONTROL_NAME, + QETH_DBF_CONTROL_INDEX, + QETH_DBF_CONTROL_NR_AREAS, + QETH_DBF_CONTROL_LEN); + qeth_dbf_sense = debug_register(QETH_DBF_SENSE_NAME, + QETH_DBF_SENSE_INDEX, + QETH_DBF_SENSE_NR_AREAS, + QETH_DBF_SENSE_LEN); + qeth_dbf_qerr = debug_register(QETH_DBF_QERR_NAME, + QETH_DBF_QERR_INDEX, + QETH_DBF_QERR_NR_AREAS, + QETH_DBF_QERR_LEN); + qeth_dbf_trace = debug_register(QETH_DBF_TRACE_NAME, + QETH_DBF_TRACE_INDEX, + QETH_DBF_TRACE_NR_AREAS, + QETH_DBF_TRACE_LEN); + + if ((qeth_dbf_setup == NULL) || (qeth_dbf_misc == NULL) || + (qeth_dbf_data == NULL) || (qeth_dbf_control == NULL) || + (qeth_dbf_sense == NULL) || (qeth_dbf_qerr == NULL) || + (qeth_dbf_trace == NULL)) { + qeth_unregister_dbf_views(); + return -ENOMEM; + } + debug_register_view(qeth_dbf_setup, &debug_hex_ascii_view); + debug_set_level(qeth_dbf_setup, QETH_DBF_SETUP_LEVEL); + + debug_register_view(qeth_dbf_misc, &debug_hex_ascii_view); + debug_set_level(qeth_dbf_misc, QETH_DBF_MISC_LEVEL); + + debug_register_view(qeth_dbf_data, &debug_hex_ascii_view); + debug_set_level(qeth_dbf_data, QETH_DBF_DATA_LEVEL); + + debug_register_view(qeth_dbf_control, &debug_hex_ascii_view); + debug_set_level(qeth_dbf_control, QETH_DBF_CONTROL_LEVEL); + + debug_register_view(qeth_dbf_sense, &debug_hex_ascii_view); + debug_set_level(qeth_dbf_sense, QETH_DBF_SENSE_LEVEL); + + debug_register_view(qeth_dbf_qerr, &debug_hex_ascii_view); + debug_set_level(qeth_dbf_qerr, QETH_DBF_QERR_LEVEL); + + debug_register_view(qeth_dbf_trace, &debug_hex_ascii_view); + debug_set_level(qeth_dbf_trace, QETH_DBF_TRACE_LEVEL); + + return 0; +} + +#ifdef CONFIG_QETH_IPV6 +extern struct neigh_table arp_tbl; +static struct neigh_ops *arp_direct_ops; +static int (*qeth_old_arp_constructor) (struct neighbour *); + +static struct neigh_ops arp_direct_ops_template = { + .family = AF_INET, + .destructor = NULL, + .solicit = NULL, + .error_report = NULL, + .output = dev_queue_xmit, + .connected_output = dev_queue_xmit, + .hh_output = dev_queue_xmit, + .queue_xmit = dev_queue_xmit +}; + +static int +qeth_arp_constructor(struct neighbour *neigh) +{ + struct net_device *dev = neigh->dev; + struct in_device *in_dev; + struct neigh_parms *parms; + struct qeth_card *card; + + card = qeth_get_card_from_dev(dev); + if (card == NULL) + goto out; + if((card->options.layer2) || + (card->dev->hard_header == qeth_fake_header)) + goto out; + + rcu_read_lock(); + in_dev = rcu_dereference(__in_dev_get(dev)); + if (in_dev == NULL) { + rcu_read_unlock(); + return -EINVAL; + } + + parms = in_dev->arp_parms; + __neigh_parms_put(neigh->parms); + neigh->parms = neigh_parms_clone(parms); + rcu_read_unlock(); + + neigh->type = inet_addr_type(*(u32 *) neigh->primary_key); + neigh->nud_state = NUD_NOARP; + neigh->ops = arp_direct_ops; + neigh->output = neigh->ops->queue_xmit; + return 0; +out: + return qeth_old_arp_constructor(neigh); +} +#endif /*CONFIG_QETH_IPV6*/ + +/* + * IP address takeover related functions + */ +static void +qeth_clear_ipato_list(struct qeth_card *card) +{ + struct qeth_ipato_entry *ipatoe, *tmp; + unsigned long flags; + + spin_lock_irqsave(&card->ip_lock, flags); + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { + list_del(&ipatoe->entry); + kfree(ipatoe); + } + spin_unlock_irqrestore(&card->ip_lock, flags); +} + +int +qeth_add_ipato_entry(struct qeth_card *card, struct qeth_ipato_entry *new) +{ + struct qeth_ipato_entry *ipatoe; + unsigned long flags; + int rc = 0; + + QETH_DBF_TEXT(trace, 2, "addipato"); + spin_lock_irqsave(&card->ip_lock, flags); + list_for_each_entry(ipatoe, &card->ipato.entries, entry){ + if (ipatoe->proto != new->proto) + continue; + if (!memcmp(ipatoe->addr, new->addr, + (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && + (ipatoe->mask_bits == new->mask_bits)){ + PRINT_WARN("ipato entry already exists!\n"); + rc = -EEXIST; + break; + } + } + if (!rc) { + list_add_tail(&new->entry, &card->ipato.entries); + } + spin_unlock_irqrestore(&card->ip_lock, flags); + return rc; +} + +void +qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto, + u8 *addr, int mask_bits) +{ + struct qeth_ipato_entry *ipatoe, *tmp; + unsigned long flags; + + QETH_DBF_TEXT(trace, 2, "delipato"); + spin_lock_irqsave(&card->ip_lock, flags); + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry){ + if (ipatoe->proto != proto) + continue; + if (!memcmp(ipatoe->addr, addr, + (proto == QETH_PROT_IPV4)? 4:16) && + (ipatoe->mask_bits == mask_bits)){ + list_del(&ipatoe->entry); + kfree(ipatoe); + } + } + spin_unlock_irqrestore(&card->ip_lock, flags); +} + +static inline void +qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) +{ + int i, j; + u8 octet; + + for (i = 0; i < len; ++i){ + octet = addr[i]; + for (j = 7; j >= 0; --j){ + bits[i*8 + j] = octet & 1; + octet >>= 1; + } + } +} + +static int +qeth_is_addr_covered_by_ipato(struct qeth_card *card, struct qeth_ipaddr *addr) +{ + struct qeth_ipato_entry *ipatoe; + u8 addr_bits[128] = {0, }; + u8 ipatoe_bits[128] = {0, }; + int rc = 0; + + if (!card->ipato.enabled) + return 0; + + qeth_convert_addr_to_bits((u8 *) &addr->u, addr_bits, + (addr->proto == QETH_PROT_IPV4)? 4:16); + list_for_each_entry(ipatoe, &card->ipato.entries, entry){ + if (addr->proto != ipatoe->proto) + continue; + qeth_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, + (ipatoe->proto==QETH_PROT_IPV4) ? + 4:16); + if (addr->proto == QETH_PROT_IPV4) + rc = !memcmp(addr_bits, ipatoe_bits, + min(32, ipatoe->mask_bits)); + else + rc = !memcmp(addr_bits, ipatoe_bits, + min(128, ipatoe->mask_bits)); + if (rc) + break; + } + /* invert? */ + if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) + rc = !rc; + else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) + rc = !rc; + + return rc; +} + +/* + * VIPA related functions + */ +int +qeth_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr) +{ + struct qeth_ipaddr *ipaddr; + unsigned long flags; + int rc = 0; + + ipaddr = qeth_get_addr_buffer(proto); + if (ipaddr){ + if (proto == QETH_PROT_IPV4){ + QETH_DBF_TEXT(trace, 2, "addvipa4"); + memcpy(&ipaddr->u.a4.addr, addr, 4); + ipaddr->u.a4.mask = 0; +#ifdef CONFIG_QETH_IPV6 + } else if (proto == QETH_PROT_IPV6){ + QETH_DBF_TEXT(trace, 2, "addvipa6"); + memcpy(&ipaddr->u.a6.addr, addr, 16); + ipaddr->u.a6.pfxlen = 0; +#endif + } + ipaddr->type = QETH_IP_TYPE_VIPA; + ipaddr->set_flags = QETH_IPA_SETIP_VIPA_FLAG; + ipaddr->del_flags = QETH_IPA_DELIP_VIPA_FLAG; + } else + return -ENOMEM; + spin_lock_irqsave(&card->ip_lock, flags); + if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) || + __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) + rc = -EEXIST; + spin_unlock_irqrestore(&card->ip_lock, flags); + if (rc){ + PRINT_WARN("Cannot add VIPA. Address already exists!\n"); + return rc; + } + if (!qeth_add_ip(card, ipaddr)) + kfree(ipaddr); + if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); + return rc; +} + +void +qeth_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr) +{ + struct qeth_ipaddr *ipaddr; + + ipaddr = qeth_get_addr_buffer(proto); + if (ipaddr){ + if (proto == QETH_PROT_IPV4){ + QETH_DBF_TEXT(trace, 2, "delvipa4"); + memcpy(&ipaddr->u.a4.addr, addr, 4); + ipaddr->u.a4.mask = 0; +#ifdef CONFIG_QETH_IPV6 + } else if (proto == QETH_PROT_IPV6){ + QETH_DBF_TEXT(trace, 2, "delvipa6"); + memcpy(&ipaddr->u.a6.addr, addr, 16); + ipaddr->u.a6.pfxlen = 0; +#endif + } + ipaddr->type = QETH_IP_TYPE_VIPA; + } else + return; + if (!qeth_delete_ip(card, ipaddr)) + kfree(ipaddr); + if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +} + +/* + * proxy ARP related functions + */ +int +qeth_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr) +{ + struct qeth_ipaddr *ipaddr; + unsigned long flags; + int rc = 0; + + ipaddr = qeth_get_addr_buffer(proto); + if (ipaddr){ + if (proto == QETH_PROT_IPV4){ + QETH_DBF_TEXT(trace, 2, "addrxip4"); + memcpy(&ipaddr->u.a4.addr, addr, 4); + ipaddr->u.a4.mask = 0; +#ifdef CONFIG_QETH_IPV6 + } else if (proto == QETH_PROT_IPV6){ + QETH_DBF_TEXT(trace, 2, "addrxip6"); + memcpy(&ipaddr->u.a6.addr, addr, 16); + ipaddr->u.a6.pfxlen = 0; +#endif + } + ipaddr->type = QETH_IP_TYPE_RXIP; + ipaddr->set_flags = QETH_IPA_SETIP_TAKEOVER_FLAG; + ipaddr->del_flags = 0; + } else + return -ENOMEM; + spin_lock_irqsave(&card->ip_lock, flags); + if (__qeth_address_exists_in_list(&card->ip_list, ipaddr, 0) || + __qeth_address_exists_in_list(card->ip_tbd_list, ipaddr, 0)) + rc = -EEXIST; + spin_unlock_irqrestore(&card->ip_lock, flags); + if (rc){ + PRINT_WARN("Cannot add RXIP. Address already exists!\n"); + return rc; + } + if (!qeth_add_ip(card, ipaddr)) + kfree(ipaddr); + if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); + return 0; +} + +void +qeth_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr) +{ + struct qeth_ipaddr *ipaddr; + + ipaddr = qeth_get_addr_buffer(proto); + if (ipaddr){ + if (proto == QETH_PROT_IPV4){ + QETH_DBF_TEXT(trace, 2, "addrxip4"); + memcpy(&ipaddr->u.a4.addr, addr, 4); + ipaddr->u.a4.mask = 0; +#ifdef CONFIG_QETH_IPV6 + } else if (proto == QETH_PROT_IPV6){ + QETH_DBF_TEXT(trace, 2, "addrxip6"); + memcpy(&ipaddr->u.a6.addr, addr, 16); + ipaddr->u.a6.pfxlen = 0; +#endif + } + ipaddr->type = QETH_IP_TYPE_RXIP; + } else + return; + if (!qeth_delete_ip(card, ipaddr)) + kfree(ipaddr); + if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +} + +/** + * IP event handler + */ +static int +qeth_ip_event(struct notifier_block *this, + unsigned long event,void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *dev =(struct net_device *) ifa->ifa_dev->dev; + struct qeth_ipaddr *addr; + struct qeth_card *card; + + QETH_DBF_TEXT(trace,3,"ipevent"); + card = qeth_get_card_from_dev(dev); + if (!card) + return NOTIFY_DONE; + if (card->options.layer2) + return NOTIFY_DONE; + + addr = qeth_get_addr_buffer(QETH_PROT_IPV4); + if (addr != NULL) { + addr->u.a4.addr = ifa->ifa_address; + addr->u.a4.mask = ifa->ifa_mask; + addr->type = QETH_IP_TYPE_NORMAL; + } else + goto out; + + switch(event) { + case NETDEV_UP: + if (!qeth_add_ip(card, addr)) + kfree(addr); + break; + case NETDEV_DOWN: + if (!qeth_delete_ip(card, addr)) + kfree(addr); + break; + default: + break; + } + if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +out: + return NOTIFY_DONE; +} + +static struct notifier_block qeth_ip_notifier = { + qeth_ip_event, + 0 +}; + +#ifdef CONFIG_QETH_IPV6 +/** + * IPv6 event handler + */ +static int +qeth_ip6_event(struct notifier_block *this, + unsigned long event,void *ptr) +{ + + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + struct net_device *dev = (struct net_device *)ifa->idev->dev; + struct qeth_ipaddr *addr; + struct qeth_card *card; + + QETH_DBF_TEXT(trace,3,"ip6event"); + + card = qeth_get_card_from_dev(dev); + if (!card) + return NOTIFY_DONE; + if (!qeth_is_supported(card, IPA_IPV6)) + return NOTIFY_DONE; + + addr = qeth_get_addr_buffer(QETH_PROT_IPV6); + if (addr != NULL) { + memcpy(&addr->u.a6.addr, &ifa->addr, sizeof(struct in6_addr)); + addr->u.a6.pfxlen = ifa->prefix_len; + addr->type = QETH_IP_TYPE_NORMAL; + } else + goto out; + + switch(event) { + case NETDEV_UP: + if (!qeth_add_ip(card, addr)) + kfree(addr); + break; + case NETDEV_DOWN: + if (!qeth_delete_ip(card, addr)) + kfree(addr); + break; + default: + break; + } + if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +out: + return NOTIFY_DONE; +} + +static struct notifier_block qeth_ip6_notifier = { + qeth_ip6_event, + 0 +}; +#endif + +static int +qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + + struct device *entry; + struct qeth_card *card; + + down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); + list_for_each_entry(entry, &qeth_ccwgroup_driver.driver.devices, + driver_list) { + card = (struct qeth_card *) entry->driver_data; + qeth_clear_ip_list(card, 0, 0); + qeth_qdio_clear_card(card, 0); + } + up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); + return NOTIFY_DONE; +} + + +static struct notifier_block qeth_reboot_notifier = { + qeth_reboot_event, + 0 +}; + +static int +qeth_register_notifiers(void) +{ + int r; + + QETH_DBF_TEXT(trace,5,"regnotif"); + if ((r = register_reboot_notifier(&qeth_reboot_notifier))) + return r; + if ((r = register_inetaddr_notifier(&qeth_ip_notifier))) + goto out_reboot; +#ifdef CONFIG_QETH_IPV6 + if ((r = register_inet6addr_notifier(&qeth_ip6_notifier))) + goto out_ipv4; +#endif + return 0; + +#ifdef CONFIG_QETH_IPV6 +out_ipv4: + unregister_inetaddr_notifier(&qeth_ip_notifier); +#endif +out_reboot: + unregister_reboot_notifier(&qeth_reboot_notifier); + return r; +} + +/** + * unregister all event notifiers + */ +static void +qeth_unregister_notifiers(void) +{ + + QETH_DBF_TEXT(trace,5,"unregnot"); + BUG_ON(unregister_reboot_notifier(&qeth_reboot_notifier)); + BUG_ON(unregister_inetaddr_notifier(&qeth_ip_notifier)); +#ifdef CONFIG_QETH_IPV6 + BUG_ON(unregister_inet6addr_notifier(&qeth_ip6_notifier)); +#endif /* QETH_IPV6 */ + +} + +#ifdef CONFIG_QETH_IPV6 +static int +qeth_ipv6_init(void) +{ + qeth_old_arp_constructor = arp_tbl.constructor; + write_lock(&arp_tbl.lock); + arp_tbl.constructor = qeth_arp_constructor; + write_unlock(&arp_tbl.lock); + + arp_direct_ops = (struct neigh_ops*) + kmalloc(sizeof(struct neigh_ops), GFP_KERNEL); + if (!arp_direct_ops) + return -ENOMEM; + + memcpy(arp_direct_ops, &arp_direct_ops_template, + sizeof(struct neigh_ops)); + + return 0; +} + +static void +qeth_ipv6_uninit(void) +{ + write_lock(&arp_tbl.lock); + arp_tbl.constructor = qeth_old_arp_constructor; + write_unlock(&arp_tbl.lock); + kfree(arp_direct_ops); +} +#endif /* CONFIG_QETH_IPV6 */ + +static void +qeth_sysfs_unregister(void) +{ + qeth_remove_driver_attributes(); + ccw_driver_unregister(&qeth_ccw_driver); + ccwgroup_driver_unregister(&qeth_ccwgroup_driver); + s390_root_dev_unregister(qeth_root_dev); +} +/** + * register qeth at sysfs + */ +static int +qeth_sysfs_register(void) +{ + int rc=0; + + rc = ccwgroup_driver_register(&qeth_ccwgroup_driver); + if (rc) + return rc; + rc = ccw_driver_register(&qeth_ccw_driver); + if (rc) + return rc; + rc = qeth_create_driver_attributes(); + if (rc) + return rc; + qeth_root_dev = s390_root_dev_register("qeth"); + if (IS_ERR(qeth_root_dev)) { + rc = PTR_ERR(qeth_root_dev); + return rc; + } + return 0; +} + +/*** + * init function + */ +static int __init +qeth_init(void) +{ + int rc=0; + + qeth_eyecatcher(); + PRINT_INFO("loading %s (%s/%s/%s/%s/%s/%s/%s %s %s)\n", + version, VERSION_QETH_C, VERSION_QETH_H, + VERSION_QETH_MPC_H, VERSION_QETH_MPC_C, + VERSION_QETH_FS_H, VERSION_QETH_PROC_C, + VERSION_QETH_SYS_C, QETH_VERSION_IPV6, + QETH_VERSION_VLAN); + + INIT_LIST_HEAD(&qeth_card_list.list); + INIT_LIST_HEAD(&qeth_notify_list); + spin_lock_init(&qeth_notify_lock); + rwlock_init(&qeth_card_list.rwlock); + + if (qeth_register_dbf_views()) + goto out_err; + if (qeth_sysfs_register()) + goto out_sysfs; + +#ifdef CONFIG_QETH_IPV6 + if (qeth_ipv6_init()) { + PRINT_ERR("Out of memory during ipv6 init.\n"); + goto out_sysfs; + } +#endif /* QETH_IPV6 */ + if (qeth_register_notifiers()) + goto out_ipv6; + if (qeth_create_procfs_entries()) + goto out_notifiers; + + return rc; + +out_notifiers: + qeth_unregister_notifiers(); +out_ipv6: +#ifdef CONFIG_QETH_IPV6 + qeth_ipv6_uninit(); +#endif /* QETH_IPV6 */ +out_sysfs: + qeth_sysfs_unregister(); + qeth_unregister_dbf_views(); +out_err: + PRINT_ERR("Initialization failed"); + return rc; +} + +static void +__exit qeth_exit(void) +{ + struct qeth_card *card, *tmp; + unsigned long flags; + + QETH_DBF_TEXT(trace,1, "cleanup."); + + /* + * Weed would not need to clean up our devices here, because the + * common device layer calls qeth_remove_device for each device + * as soon as we unregister our driver (done in qeth_sysfs_unregister). + * But we do cleanup here so we can do a "soft" shutdown of our cards. + * qeth_remove_device called by the common device layer would otherwise + * do a "hard" shutdown (card->use_hard_stop is set to one in + * qeth_remove_device). + */ +again: + read_lock_irqsave(&qeth_card_list.rwlock, flags); + list_for_each_entry_safe(card, tmp, &qeth_card_list.list, list){ + read_unlock_irqrestore(&qeth_card_list.rwlock, flags); + qeth_set_offline(card->gdev); + qeth_remove_device(card->gdev); + goto again; + } + read_unlock_irqrestore(&qeth_card_list.rwlock, flags); +#ifdef CONFIG_QETH_IPV6 + qeth_ipv6_uninit(); +#endif + qeth_unregister_notifiers(); + qeth_remove_procfs_entries(); + qeth_sysfs_unregister(); + qeth_unregister_dbf_views(); + printk("qeth: removed\n"); +} + +EXPORT_SYMBOL(qeth_eyecatcher); +module_init(qeth_init); +module_exit(qeth_exit); +MODULE_AUTHOR("Frank Pavlic <pavlic@de.ibm.com>"); +MODULE_DESCRIPTION("Linux on zSeries OSA Express and HiperSockets support\n" \ + "Copyright 2000,2003 IBM Corporation\n"); + +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c new file mode 100644 index 000000000000..f685ecc7da99 --- /dev/null +++ b/drivers/s390/net/qeth_mpc.c @@ -0,0 +1,168 @@ +/* + * linux/drivers/s390/net/qeth_mpc.c + * + * Linux on zSeries OSA Express and HiperSockets support + * + * Copyright 2000,2003 IBM Corporation + * Author(s): Frank Pavlic <pavlic@de.ibm.com> + * Thomas Spatzier <tspat@de.ibm.com> + * + */ +#include <asm/cio.h> +#include "qeth_mpc.h" + +const char *VERSION_QETH_MPC_C = "$Revision: 1.11 $"; + +unsigned char IDX_ACTIVATE_READ[]={ + 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00, + 0x19,0x01,0x01,0x80, 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x00,0xc8,0xc1, + 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00, + 0x00,0x00 +}; + +unsigned char IDX_ACTIVATE_WRITE[]={ + 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00, + 0x15,0x01,0x01,0x80, 0x00,0x00,0x00,0x00, + 0xff,0xff,0x00,0x00, 0x00,0x00,0xc8,0xc1, + 0xd3,0xd3,0xd6,0xd3, 0xc5,0x40,0x00,0x00, + 0x00,0x00 +}; + +unsigned char CM_ENABLE[]={ + 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x01, + 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x63, + 0x10,0x00,0x00,0x01, + 0x00,0x00,0x00,0x00, + 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x23, + 0x00,0x00,0x23,0x05, 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x23, 0x00,0x00,0x00,0x40, + 0x00,0x0c,0x41,0x02, 0x00,0x17,0x00,0x00, + 0x00,0x00,0x00,0x00, + 0x00,0x0b,0x04,0x01, + 0x7e,0x04,0x05,0x00, 0x01,0x01,0x0f, + 0x00, + 0x0c,0x04,0x02,0xff, 0xff,0xff,0xff,0xff, + 0xff,0xff,0xff +}; + +unsigned char CM_SETUP[]={ + 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x02, + 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x64, + 0x10,0x00,0x00,0x01, + 0x00,0x00,0x00,0x00, + 0x81,0x7e,0x00,0x01, 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x24, + 0x00,0x00,0x24,0x05, 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x24, 0x00,0x00,0x00,0x40, + 0x00,0x0c,0x41,0x04, 0x00,0x18,0x00,0x00, + 0x00,0x00,0x00,0x00, + 0x00,0x09,0x04,0x04, + 0x05,0x00,0x01,0x01, 0x11, + 0x00,0x09,0x04, + 0x05,0x05,0x00,0x00, 0x00,0x00, + 0x00,0x06, + 0x04,0x06,0xc8,0x00 +}; + +unsigned char ULP_ENABLE[]={ + 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x03, + 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6b, + 0x10,0x00,0x00,0x01, + 0x00,0x00,0x00,0x00, + 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x01, + 0x00,0x00,0x00,0x00, 0x00,0x24,0x00,0x2b, + 0x00,0x00,0x2b,0x05, 0x20,0x01,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x2b, 0x00,0x00,0x00,0x40, + 0x00,0x0c,0x41,0x02, 0x00,0x1f,0x00,0x00, + 0x00,0x00,0x00,0x00, + 0x00,0x0b,0x04,0x01, + 0x03,0x04,0x05,0x00, 0x01,0x01,0x12, + 0x00, + 0x14,0x04,0x0a,0x00, 0x20,0x00,0x00,0xff, + 0xff,0x00,0x08,0xc8, 0xe8,0xc4,0xf1,0xc7, + 0xf1,0x00,0x00 +}; + +unsigned char ULP_SETUP[]={ + 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x04, + 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x6c, + 0x10,0x00,0x00,0x01, + 0x00,0x00,0x00,0x00, + 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x02, + 0x00,0x00,0x00,0x01, 0x00,0x24,0x00,0x2c, + 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x2c, 0x00,0x00,0x00,0x40, + 0x00,0x0c,0x41,0x04, 0x00,0x20,0x00,0x00, + 0x00,0x00,0x00,0x00, + 0x00,0x09,0x04,0x04, + 0x05,0x00,0x01,0x01, 0x14, + 0x00,0x09,0x04, + 0x05,0x05,0x30,0x01, 0x00,0x00, + 0x00,0x06, + 0x04,0x06,0x40,0x00, + 0x00,0x08,0x04,0x0b, + 0x00,0x00,0x00,0x00 +}; + +unsigned char DM_ACT[]={ + 0x00,0xe0,0x00,0x00, 0x00,0x00,0x00,0x05, + 0x00,0x00,0x00,0x14, 0x00,0x00,0x00,0x55, + 0x10,0x00,0x00,0x01, + 0x00,0x00,0x00,0x00, + 0x41,0x7e,0x00,0x01, 0x00,0x00,0x00,0x03, + 0x00,0x00,0x00,0x02, 0x00,0x24,0x00,0x15, + 0x00,0x00,0x2c,0x05, 0x20,0x01,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, + 0x01,0x00,0x00,0x15, 0x00,0x00,0x00,0x40, + 0x00,0x0c,0x43,0x60, 0x00,0x09,0x00,0x00, + 0x00,0x00,0x00,0x00, + 0x00,0x09,0x04,0x04, + 0x05,0x40,0x01,0x01, 0x00 +}; + +unsigned char IPA_PDU_HEADER[]={ + 0x00,0xe0,0x00,0x00, 0x77,0x77,0x77,0x77, + 0x00,0x00,0x00,0x14, 0x00,0x00, + (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))/256, + (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd))%256, + 0x10,0x00,0x00,0x01, 0x00,0x00,0x00,0x00, + 0xc1,0x03,0x00,0x01, 0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00, 0x00,0x24, + sizeof(struct qeth_ipa_cmd)/256, + sizeof(struct qeth_ipa_cmd)%256, + 0x00, + sizeof(struct qeth_ipa_cmd)/256, + sizeof(struct qeth_ipa_cmd),0x05, 0x77,0x77,0x77,0x77, + 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, + 0x01,0x00, + sizeof(struct qeth_ipa_cmd)/256, + sizeof(struct qeth_ipa_cmd)%256, + 0x00,0x00,0x00,0x40, +}; + +unsigned char WRITE_CCW[]={ + 0x01,CCW_FLAG_SLI,0,0, + 0,0,0,0 +}; + +unsigned char READ_CCW[]={ + 0x02,CCW_FLAG_SLI,0,0, + 0,0,0,0 +}; + + + + + + + + + + + diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h new file mode 100644 index 000000000000..3d916b5c5d09 --- /dev/null +++ b/drivers/s390/net/qeth_mpc.h @@ -0,0 +1,538 @@ +/* + * linux/drivers/s390/net/qeth_mpc.h + * + * Linux on zSeries OSA Express and HiperSockets support + * + * Copyright 2000,2003 IBM Corporation + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * Thomas Spatzier <tspat@de.ibm.com> + * Frank Pavlic <pavlic@de.ibm.com> + * + */ +#ifndef __QETH_MPC_H__ +#define __QETH_MPC_H__ + +#include <asm/qeth.h> + +#define VERSION_QETH_MPC_H "$Revision: 1.43 $" + +extern const char *VERSION_QETH_MPC_C; + +#define IPA_PDU_HEADER_SIZE 0x40 +#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer+0x0e) +#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer+0x26) +#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer+0x2a) +#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer+0x3a) + +extern unsigned char IPA_PDU_HEADER[]; +#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer+0x2c) + +#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd)) + +#define QETH_SEQ_NO_LENGTH 4 +#define QETH_MPC_TOKEN_LENGTH 4 +#define QETH_MCL_LENGTH 4 +#define OSA_ADDR_LEN 6 + +#define QETH_TIMEOUT (10 * HZ) +#define QETH_IPA_TIMEOUT (45 * HZ) +#define QETH_IDX_COMMAND_SEQNO 0xffff0000 +#define SR_INFO_LEN 16 + +#define QETH_CLEAR_CHANNEL_PARM -10 +#define QETH_HALT_CHANNEL_PARM -11 + +/*****************************************************************************/ +/* IP Assist related definitions */ +/*****************************************************************************/ +#define IPA_CMD_INITIATOR_HOST 0x00 +#define IPA_CMD_INITIATOR_HYDRA 0x01 +#define IPA_CMD_PRIM_VERSION_NO 0x01 + +enum qeth_card_types { + QETH_CARD_TYPE_UNKNOWN = 0, + QETH_CARD_TYPE_OSAE = 10, + QETH_CARD_TYPE_IQD = 1234, +}; + +#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 +/* only the first two bytes are looked at in qeth_get_cardname_short */ +enum qeth_link_types { + QETH_LINK_TYPE_FAST_ETH = 0x01, + QETH_LINK_TYPE_HSTR = 0x02, + QETH_LINK_TYPE_GBIT_ETH = 0x03, + QETH_LINK_TYPE_10GBIT_ETH = 0x10, + QETH_LINK_TYPE_LANE_ETH100 = 0x81, + QETH_LINK_TYPE_LANE_TR = 0x82, + QETH_LINK_TYPE_LANE_ETH1000 = 0x83, + QETH_LINK_TYPE_LANE = 0x88, + QETH_LINK_TYPE_ATM_NATIVE = 0x90, +}; + +enum qeth_tr_macaddr_modes { + QETH_TR_MACADDR_NONCANONICAL = 0, + QETH_TR_MACADDR_CANONICAL = 1, +}; + +enum qeth_tr_broadcast_modes { + QETH_TR_BROADCAST_ALLRINGS = 0, + QETH_TR_BROADCAST_LOCAL = 1, +}; + +/* these values match CHECKSUM_* in include/linux/skbuff.h */ +enum qeth_checksum_types { + SW_CHECKSUMMING = 0, /* TODO: set to bit flag used in IPA Command */ + HW_CHECKSUMMING = 1, + NO_CHECKSUMMING = 2, +}; +#define QETH_CHECKSUM_DEFAULT SW_CHECKSUMMING + +/* + * Routing stuff + */ +#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */ +enum qeth_routing_types { + NO_ROUTER = 0, /* TODO: set to bit flag used in IPA Command */ + PRIMARY_ROUTER = 1, + SECONDARY_ROUTER = 2, + MULTICAST_ROUTER = 3, + PRIMARY_CONNECTOR = 4, + SECONDARY_CONNECTOR = 5, +}; + + +/* IPA Commands */ +enum qeth_ipa_cmds { + IPA_CMD_STARTLAN = 0x01, + IPA_CMD_STOPLAN = 0x02, + IPA_CMD_SETVMAC = 0x21, + IPA_CMD_DELVMAC = 0x22, + IPA_CMD_SETGMAC = 0x23, + IPA_CMD_DELGMAC = 0x24, + IPA_CMD_SETVLAN = 0x25, + IPA_CMD_DELVLAN = 0x26, + IPA_CMD_SETIP = 0xb1, + IPA_CMD_DELIP = 0xb7, + IPA_CMD_QIPASSIST = 0xb2, + IPA_CMD_SETASSPARMS = 0xb3, + IPA_CMD_SETIPM = 0xb4, + IPA_CMD_DELIPM = 0xb5, + IPA_CMD_SETRTG = 0xb6, + IPA_CMD_SETADAPTERPARMS = 0xb8, + IPA_CMD_IPFRAME = 0xb9, + IPA_CMD_ADD_ADDR_ENTRY = 0xc1, + IPA_CMD_DELETE_ADDR_ENTRY = 0xc2, + IPA_CMD_CREATE_ADDR = 0xc3, + IPA_CMD_DESTROY_ADDR = 0xc4, + IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1, + IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2, +}; + +enum qeth_ip_ass_cmds { + IPA_CMD_ASS_START = 0x0001, + IPA_CMD_ASS_STOP = 0x0002, + IPA_CMD_ASS_CONFIGURE = 0x0003, + IPA_CMD_ASS_ENABLE = 0x0004, +}; + +enum qeth_arp_process_subcmds { + IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003, + IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004, + IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005, + IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006, + IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007, + IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104, + IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204, +}; + +/* Return Codes for IPA Commands */ +enum qeth_ipa_return_codes { + IPA_RC_SUCCESS = 0x0000, + IPA_RC_NOTSUPP = 0x0001, + IPA_RC_NO_ACCESS = 0x0002, + IPA_RC_FAILED = 0x0003, + IPA_RC_DATA_MISMATCH = 0xe001, + IPA_RC_INVALID_LAN_TYPE = 0xe003, + IPA_RC_INVALID_LAN_NO = 0xe004, + IPA_RC_IPADDR_ALREADY_REG = 0xe005, + IPA_RC_IPADDR_TABLE_FULL = 0xe006, + IPA_RC_IPADDR_ALREADY_USED = 0xe00a, + IPA_RC_ASSNO_NOT_SUPP = 0xe00d, + IPA_RC_ASSCMD_START_FAILED = 0xe00e, + IPA_RC_ASSCMD_PART_SUCCESS = 0xe00f, + IPA_RC_IPADDR_NOT_DEFINED = 0xe010, + IPA_RC_LAN_OFFLINE = 0xe080, +}; + +/* IPA function flags; each flag marks availability of respective function */ +enum qeth_ipa_funcs { + IPA_ARP_PROCESSING = 0x00000001L, + IPA_INBOUND_CHECKSUM = 0x00000002L, + IPA_OUTBOUND_CHECKSUM = 0x00000004L, + IPA_IP_FRAGMENTATION = 0x00000008L, + IPA_FILTERING = 0x00000010L, + IPA_IPV6 = 0x00000020L, + IPA_MULTICASTING = 0x00000040L, + IPA_IP_REASSEMBLY = 0x00000080L, + IPA_QUERY_ARP_COUNTERS = 0x00000100L, + IPA_QUERY_ARP_ADDR_INFO = 0x00000200L, + IPA_SETADAPTERPARMS = 0x00000400L, + IPA_VLAN_PRIO = 0x00000800L, + IPA_PASSTHRU = 0x00001000L, + IPA_FULL_VLAN = 0x00004000L, + IPA_SOURCE_MAC = 0x00010000L, + IPA_OSA_MC_ROUTER = 0x00020000L, + IPA_QUERY_ARP_ASSIST = 0x00040000L, + IPA_INBOUND_TSO = 0x00080000L, + IPA_OUTBOUND_TSO = 0x00100000L, +}; + +/* SETIP/DELIP IPA Command: ***************************************************/ +enum qeth_ipa_setdelip_flags { + QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */ + QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */ + QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */ + QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L, + QETH_IPA_DELIP_VIPA_FLAG = 0x40L, + QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L, +}; + +/* SETADAPTER IPA Command: ****************************************************/ +enum qeth_ipa_setadp_cmd { + IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x01, + IPA_SETADP_ALTER_MAC_ADDRESS = 0x02, + IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x04, + IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x08, + IPA_SETADP_SET_ADDRESSING_MODE = 0x10, + IPA_SETADP_SET_CONFIG_PARMS = 0x20, + IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x40, + IPA_SETADP_SET_BROADCAST_MODE = 0x80, + IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, + IPA_SETADP_SET_SNMP_CONTROL = 0x0200, + IPA_SETADP_READ_SNMP_PARMS = 0x0400, + IPA_SETADP_WRITE_SNMP_PARMS = 0x0800, + IPA_SETADP_QUERY_CARD_INFO = 0x1000, +}; +enum qeth_ipa_mac_ops { + CHANGE_ADDR_READ_MAC = 0, + CHANGE_ADDR_REPLACE_MAC = 1, + CHANGE_ADDR_ADD_MAC = 2, + CHANGE_ADDR_DEL_MAC = 4, + CHANGE_ADDR_RESET_MAC = 8, +}; +enum qeth_ipa_addr_ops { + CHANGE_ADDR_READ_ADDR = 0, + CHANGE_ADDR_ADD_ADDR = 1, + CHANGE_ADDR_DEL_ADDR = 2, + CHANGE_ADDR_FLUSH_ADDR_TABLE = 4, + + +}; +/* (SET)DELIP(M) IPA stuff ***************************************************/ +struct qeth_ipacmd_setdelip4 { + __u8 ip_addr[4]; + __u8 mask[4]; + __u32 flags; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setdelip6 { + __u8 ip_addr[16]; + __u8 mask[16]; + __u32 flags; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setdelipm { + __u8 mac[6]; + __u8 padding[2]; + __u8 ip6[12]; + __u8 ip4[4]; +} __attribute__ ((packed)); + +struct qeth_ipacmd_layer2setdelmac { + __u32 mac_length; + __u8 mac[6]; +} __attribute__ ((packed)); + +struct qeth_ipacmd_layer2setdelvlan { + __u16 vlan_id; +} __attribute__ ((packed)); + + +struct qeth_ipacmd_setassparms_hdr { + __u32 assist_no; + __u16 length; + __u16 command_code; + __u16 return_code; + __u8 number_of_replies; + __u8 seq_no; +} __attribute__((packed)); + +struct qeth_arp_query_data { + __u16 request_bits; + __u16 reply_bits; + __u32 no_entries; + char data; +} __attribute__((packed)); + +/* used as parameter for arp_query reply */ +struct qeth_arp_query_info { + __u32 udata_len; + __u16 mask_bits; + __u32 udata_offset; + __u32 no_entries; + char *udata; +}; + +/* SETASSPARMS IPA Command: */ +struct qeth_ipacmd_setassparms { + struct qeth_ipacmd_setassparms_hdr hdr; + union { + __u32 flags_32bit; + struct qeth_arp_cache_entry add_arp_entry; + struct qeth_arp_query_data query_arp; + __u8 ip[16]; + } data; +} __attribute__ ((packed)); + + +/* SETRTG IPA Command: ****************************************************/ +struct qeth_set_routing { + __u8 type; +}; + +/* SETADAPTERPARMS IPA Command: *******************************************/ +struct qeth_query_cmds_supp { + __u32 no_lantypes_supp; + __u8 lan_type; + __u8 reserved1[3]; + __u32 supported_cmds; + __u8 reserved2[8]; +} __attribute__ ((packed)); + +struct qeth_change_addr { + __u32 cmd; + __u32 addr_size; + __u32 no_macs; + __u8 addr[OSA_ADDR_LEN]; +} __attribute__ ((packed)); + + +struct qeth_snmp_cmd { + __u8 token[16]; + __u32 request; + __u32 interface; + __u32 returncode; + __u32 firmwarelevel; + __u32 seqno; + __u8 data; +} __attribute__ ((packed)); + +struct qeth_snmp_ureq_hdr { + __u32 data_len; + __u32 req_len; + __u32 reserved1; + __u32 reserved2; +} __attribute__ ((packed)); + +struct qeth_snmp_ureq { + struct qeth_snmp_ureq_hdr hdr; + struct qeth_snmp_cmd cmd; +} __attribute__((packed)); + +struct qeth_ipacmd_setadpparms_hdr { + __u32 supp_hw_cmds; + __u32 reserved1; + __u16 cmdlength; + __u16 reserved2; + __u32 command_code; + __u16 return_code; + __u8 used_total; + __u8 seq_no; + __u32 reserved3; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setadpparms { + struct qeth_ipacmd_setadpparms_hdr hdr; + union { + struct qeth_query_cmds_supp query_cmds_supp; + struct qeth_change_addr change_addr; + struct qeth_snmp_cmd snmp; + __u32 mode; + } data; +} __attribute__ ((packed)); + +/* IPFRAME IPA Command: ***************************************************/ +/* TODO: define in analogy to commands define above */ + +/* ADD_ADDR_ENTRY IPA Command: ********************************************/ +/* TODO: define in analogy to commands define above */ + +/* DELETE_ADDR_ENTRY IPA Command: *****************************************/ +/* TODO: define in analogy to commands define above */ + +/* CREATE_ADDR IPA Command: ***********************************************/ +struct qeth_create_destroy_address { + __u8 unique_id[8]; +} __attribute__ ((packed)); + +/* REGISTER_LOCAL_ADDR IPA Command: ***************************************/ +/* TODO: define in analogy to commands define above */ + +/* UNREGISTER_LOCAL_ADDR IPA Command: *************************************/ +/* TODO: define in analogy to commands define above */ + +/* Header for each IPA command */ +struct qeth_ipacmd_hdr { + __u8 command; + __u8 initiator; + __u16 seqno; + __u16 return_code; + __u8 adapter_type; + __u8 rel_adapter_no; + __u8 prim_version_no; + __u8 param_count; + __u16 prot_version; + __u32 ipa_supported; + __u32 ipa_enabled; +} __attribute__ ((packed)); + +/* The IPA command itself */ +struct qeth_ipa_cmd { + struct qeth_ipacmd_hdr hdr; + union { + struct qeth_ipacmd_setdelip4 setdelip4; + struct qeth_ipacmd_setdelip6 setdelip6; + struct qeth_ipacmd_setdelipm setdelipm; + struct qeth_ipacmd_setassparms setassparms; + struct qeth_ipacmd_layer2setdelmac setdelmac; + struct qeth_ipacmd_layer2setdelvlan setdelvlan; + struct qeth_create_destroy_address create_destroy_addr; + struct qeth_ipacmd_setadpparms setadapterparms; + struct qeth_set_routing setrtg; + } data; +} __attribute__ ((packed)); + +/* + * special command for ARP processing. + * this is not included in setassparms command before, because we get + * problem with the size of struct qeth_ipacmd_setassparms otherwise + */ +enum qeth_ipa_arp_return_codes { + QETH_IPA_ARP_RC_SUCCESS = 0x0000, + QETH_IPA_ARP_RC_FAILED = 0x0001, + QETH_IPA_ARP_RC_NOTSUPP = 0x0002, + QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003, + QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004, + QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, +}; + +#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ + sizeof(struct qeth_ipacmd_setassparms_hdr)) +#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \ + QETH_SETASS_BASE_LEN) +#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ + sizeof(struct qeth_ipacmd_setadpparms_hdr)) +#define QETH_SNMP_SETADP_CMDLENGTH 16 + +#define QETH_ARP_DATA_SIZE 3968 +#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8) +/* Helper functions */ +#define IS_IPA_REPLY(cmd) (cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) + +/*****************************************************************************/ +/* END OF IP Assist related definitions */ +/*****************************************************************************/ + + +extern unsigned char WRITE_CCW[]; +extern unsigned char READ_CCW[]; + +extern unsigned char CM_ENABLE[]; +#define CM_ENABLE_SIZE 0x63 +#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer+0x2c) +#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53) +#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer+0x5b) + +#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer)+ 0x13) + + +extern unsigned char CM_SETUP[]; +#define CM_SETUP_SIZE 0x64 +#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer+0x2c) +#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51) +#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a) + +#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x1a) + +extern unsigned char ULP_ENABLE[]; +#define ULP_ENABLE_SIZE 0x6b +#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer+0x61) +#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer+0x2c) +#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer+0x53) +#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer+0x62) +#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x13) +#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \ + (PDU_ENCAPSULATION(buffer)+ 0x1f) +#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x17) +#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \ + (PDU_ENCAPSULATION(buffer)+ 0x2b) +/* Layer 2 defintions */ +#define QETH_PROT_LAYER2 0x08 +#define QETH_PROT_TCPIP 0x03 +#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50) +#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19) + +extern unsigned char ULP_SETUP[]; +#define ULP_SETUP_SIZE 0x6c +#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer+0x2c) +#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer+0x51) +#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer+0x5a) +#define QETH_ULP_SETUP_CUA(buffer) (buffer+0x68) +#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer+0x6a) + +#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer)+0x1a) + + +extern unsigned char DM_ACT[]; +#define DM_ACT_SIZE 0x55 +#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer+0x2c) +#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer+0x51) + + + +#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer+4) +#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer+0x1c) +#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer+0x20) + +extern unsigned char IDX_ACTIVATE_READ[]; +extern unsigned char IDX_ACTIVATE_WRITE[]; + +#define IDX_ACTIVATE_SIZE 0x22 +#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer+0x0c) +#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b]&0x80) +#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer+0x10) +#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer+0x16) +#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer+0x1e) +#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer+0x20) +#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08]&3)==2) +#define QETH_IDX_REPLY_LEVEL(buffer) (buffer+0x12) + +#define PDU_ENCAPSULATION(buffer) \ + (buffer + *(buffer + (*(buffer+0x0b)) + \ + *(buffer + *(buffer+0x0b)+0x11) +0x07)) + +#define IS_IPA(buffer) \ + ((buffer) && \ + ( *(buffer + ((*(buffer+0x0b))+4) )==0xc1) ) + +#define ADDR_FRAME_TYPE_DIX 1 +#define ADDR_FRAME_TYPE_802_3 2 +#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10 +#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20 + +#endif diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c new file mode 100644 index 000000000000..04719196fd20 --- /dev/null +++ b/drivers/s390/net/qeth_proc.c @@ -0,0 +1,495 @@ +/* + * + * linux/drivers/s390/net/qeth_fs.c ($Revision: 1.13 $) + * + * Linux on zSeries OSA Express and HiperSockets support + * This file contains code related to procfs. + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Thomas Spatzier <tspat@de.ibm.com> + * + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/list.h> +#include <linux/rwsem.h> + +#include "qeth.h" +#include "qeth_mpc.h" +#include "qeth_fs.h" + +const char *VERSION_QETH_PROC_C = "$Revision: 1.13 $"; + +/***** /proc/qeth *****/ +#define QETH_PROCFILE_NAME "qeth" +static struct proc_dir_entry *qeth_procfile; + +static void * +qeth_procfile_seq_start(struct seq_file *s, loff_t *offset) +{ + struct list_head *next_card = NULL; + int i = 0; + + down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); + + if (*offset == 0) + return SEQ_START_TOKEN; + + /* get card at pos *offset */ + list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices) + if (++i == *offset) + return next_card; + + return NULL; +} + +static void +qeth_procfile_seq_stop(struct seq_file *s, void* it) +{ + up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); +} + +static void * +qeth_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) +{ + struct list_head *next_card = NULL; + struct list_head *current_card; + + if (it == SEQ_START_TOKEN) { + next_card = qeth_ccwgroup_driver.driver.devices.next; + if (next_card->next == next_card) /* list empty */ + return NULL; + (*offset)++; + } else { + current_card = (struct list_head *)it; + if (current_card->next == &qeth_ccwgroup_driver.driver.devices) + return NULL; /* end of list reached */ + next_card = current_card->next; + (*offset)++; + } + + return next_card; +} + +static inline const char * +qeth_get_router_str(struct qeth_card *card, int ipv) +{ + int routing_type = 0; + + if (ipv == 4){ + routing_type = card->options.route4.type; + } else { +#ifdef CONFIG_QETH_IPV6 + routing_type = card->options.route6.type; +#else + return "n/a"; +#endif /* CONFIG_QETH_IPV6 */ + } + + if (routing_type == PRIMARY_ROUTER) + return "pri"; + else if (routing_type == SECONDARY_ROUTER) + return "sec"; + else if (routing_type == MULTICAST_ROUTER) { + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return "mc+"; + return "mc"; + } else if (routing_type == PRIMARY_CONNECTOR) { + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return "p+c"; + return "p.c"; + } else if (routing_type == SECONDARY_CONNECTOR) { + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return "s+c"; + return "s.c"; + } else if (routing_type == NO_ROUTER) + return "no"; + else + return "unk"; +} + +static int +qeth_procfile_seq_show(struct seq_file *s, void *it) +{ + struct device *device; + struct qeth_card *card; + char tmp[12]; /* for qeth_get_prioq_str */ + + if (it == SEQ_START_TOKEN){ + seq_printf(s, "devices CHPID interface " + "cardtype port chksum prio-q'ing rtr4 " + "rtr6 fsz cnt\n"); + seq_printf(s, "-------------------------- ----- ---------- " + "-------------- ---- ------ ---------- ---- " + "---- ----- -----\n"); + } else { + device = list_entry(it, struct device, driver_list); + card = device->driver_data; + seq_printf(s, "%s/%s/%s x%02X %-10s %-14s %-4i ", + CARD_RDEV_ID(card), + CARD_WDEV_ID(card), + CARD_DDEV_ID(card), + card->info.chpid, + QETH_CARD_IFNAME(card), + qeth_get_cardname_short(card), + card->info.portno); + if (card->lan_online) + seq_printf(s, "%-6s %-10s %-4s %-4s %-5s %-5i\n", + qeth_get_checksum_str(card), + qeth_get_prioq_str(card, tmp), + qeth_get_router_str(card, 4), + qeth_get_router_str(card, 6), + qeth_get_bufsize_str(card), + card->qdio.in_buf_pool.buf_count); + else + seq_printf(s, " +++ LAN OFFLINE +++\n"); + } + return 0; +} + +static struct seq_operations qeth_procfile_seq_ops = { + .start = qeth_procfile_seq_start, + .stop = qeth_procfile_seq_stop, + .next = qeth_procfile_seq_next, + .show = qeth_procfile_seq_show, +}; + +static int +qeth_procfile_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &qeth_procfile_seq_ops); +} + +static struct file_operations qeth_procfile_fops = { + .owner = THIS_MODULE, + .open = qeth_procfile_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/***** /proc/qeth_perf *****/ +#define QETH_PERF_PROCFILE_NAME "qeth_perf" +static struct proc_dir_entry *qeth_perf_procfile; + +#ifdef CONFIG_QETH_PERF_STATS + +static void * +qeth_perf_procfile_seq_start(struct seq_file *s, loff_t *offset) +{ + struct list_head *next_card = NULL; + int i = 0; + + down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); + /* get card at pos *offset */ + list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){ + if (i == *offset) + return next_card; + i++; + } + return NULL; +} + +static void +qeth_perf_procfile_seq_stop(struct seq_file *s, void* it) +{ + up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); +} + +static void * +qeth_perf_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) +{ + struct list_head *current_card = (struct list_head *)it; + + if (current_card->next == &qeth_ccwgroup_driver.driver.devices) + return NULL; /* end of list reached */ + (*offset)++; + return current_card->next; +} + +static int +qeth_perf_procfile_seq_show(struct seq_file *s, void *it) +{ + struct device *device; + struct qeth_card *card; + + device = list_entry(it, struct device, driver_list); + card = device->driver_data; + seq_printf(s, "For card with devnos %s/%s/%s (%s):\n", + CARD_RDEV_ID(card), + CARD_WDEV_ID(card), + CARD_DDEV_ID(card), + QETH_CARD_IFNAME(card) + ); + seq_printf(s, " Skb's/buffers received : %li/%i\n" + " Skb's/buffers sent : %li/%i\n\n", + card->stats.rx_packets, card->perf_stats.bufs_rec, + card->stats.tx_packets, card->perf_stats.bufs_sent + ); + seq_printf(s, " Skb's/buffers sent without packing : %li/%i\n" + " Skb's/buffers sent with packing : %i/%i\n\n", + card->stats.tx_packets - card->perf_stats.skbs_sent_pack, + card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack, + card->perf_stats.skbs_sent_pack, + card->perf_stats.bufs_sent_pack + ); + seq_printf(s, " Skbs sent in SG mode : %i\n" + " Skb fragments sent in SG mode : %i\n\n", + card->perf_stats.sg_skbs_sent, + card->perf_stats.sg_frags_sent); + seq_printf(s, " large_send tx (in Kbytes) : %i\n" + " large_send count : %i\n\n", + card->perf_stats.large_send_bytes >> 10, + card->perf_stats.large_send_cnt); + seq_printf(s, " Packing state changes no pkg.->packing : %i/%i\n" + " Watermarks L/H : %i/%i\n" + " Current buffer usage (outbound q's) : " + "%i/%i/%i/%i\n\n", + card->perf_stats.sc_dp_p, card->perf_stats.sc_p_dp, + QETH_LOW_WATERMARK_PACK, QETH_HIGH_WATERMARK_PACK, + atomic_read(&card->qdio.out_qs[0]->used_buffers), + (card->qdio.no_out_queues > 1)? + atomic_read(&card->qdio.out_qs[1]->used_buffers) + : 0, + (card->qdio.no_out_queues > 2)? + atomic_read(&card->qdio.out_qs[2]->used_buffers) + : 0, + (card->qdio.no_out_queues > 3)? + atomic_read(&card->qdio.out_qs[3]->used_buffers) + : 0 + ); + seq_printf(s, " Inbound handler time (in us) : %i\n" + " Inbound handler count : %i\n" + " Inbound do_QDIO time (in us) : %i\n" + " Inbound do_QDIO count : %i\n\n" + " Outbound handler time (in us) : %i\n" + " Outbound handler count : %i\n\n" + " Outbound time (in us, incl QDIO) : %i\n" + " Outbound count : %i\n" + " Outbound do_QDIO time (in us) : %i\n" + " Outbound do_QDIO count : %i\n\n", + card->perf_stats.inbound_time, + card->perf_stats.inbound_cnt, + card->perf_stats.inbound_do_qdio_time, + card->perf_stats.inbound_do_qdio_cnt, + card->perf_stats.outbound_handler_time, + card->perf_stats.outbound_handler_cnt, + card->perf_stats.outbound_time, + card->perf_stats.outbound_cnt, + card->perf_stats.outbound_do_qdio_time, + card->perf_stats.outbound_do_qdio_cnt + ); + return 0; +} + +static struct seq_operations qeth_perf_procfile_seq_ops = { + .start = qeth_perf_procfile_seq_start, + .stop = qeth_perf_procfile_seq_stop, + .next = qeth_perf_procfile_seq_next, + .show = qeth_perf_procfile_seq_show, +}; + +static int +qeth_perf_procfile_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &qeth_perf_procfile_seq_ops); +} + +static struct file_operations qeth_perf_procfile_fops = { + .owner = THIS_MODULE, + .open = qeth_perf_procfile_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +#define qeth_perf_procfile_created qeth_perf_procfile +#else +#define qeth_perf_procfile_created 1 +#endif /* CONFIG_QETH_PERF_STATS */ + +/***** /proc/qeth_ipa_takeover *****/ +#define QETH_IPATO_PROCFILE_NAME "qeth_ipa_takeover" +static struct proc_dir_entry *qeth_ipato_procfile; + +static void * +qeth_ipato_procfile_seq_start(struct seq_file *s, loff_t *offset) +{ + struct list_head *next_card = NULL; + int i = 0; + + down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); + /* TODO: finish this */ + /* + * maybe SEQ_SATRT_TOKEN can be returned for offset 0 + * output driver settings then; + * else output setting for respective card + */ + /* get card at pos *offset */ + list_for_each(next_card, &qeth_ccwgroup_driver.driver.devices){ + if (i == *offset) + return next_card; + i++; + } + return NULL; +} + +static void +qeth_ipato_procfile_seq_stop(struct seq_file *s, void* it) +{ + up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); +} + +static void * +qeth_ipato_procfile_seq_next(struct seq_file *s, void *it, loff_t *offset) +{ + struct list_head *current_card = (struct list_head *)it; + + /* TODO: finish this */ + /* + * maybe SEQ_SATRT_TOKEN can be returned for offset 0 + * output driver settings then; + * else output setting for respective card + */ + if (current_card->next == &qeth_ccwgroup_driver.driver.devices) + return NULL; /* end of list reached */ + (*offset)++; + return current_card->next; +} + +static int +qeth_ipato_procfile_seq_show(struct seq_file *s, void *it) +{ + struct device *device; + struct qeth_card *card; + + /* TODO: finish this */ + /* + * maybe SEQ_SATRT_TOKEN can be returned for offset 0 + * output driver settings then; + * else output setting for respective card + */ + device = list_entry(it, struct device, driver_list); + card = device->driver_data; + + return 0; +} + +static struct seq_operations qeth_ipato_procfile_seq_ops = { + .start = qeth_ipato_procfile_seq_start, + .stop = qeth_ipato_procfile_seq_stop, + .next = qeth_ipato_procfile_seq_next, + .show = qeth_ipato_procfile_seq_show, +}; + +static int +qeth_ipato_procfile_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &qeth_ipato_procfile_seq_ops); +} + +static struct file_operations qeth_ipato_procfile_fops = { + .owner = THIS_MODULE, + .open = qeth_ipato_procfile_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +int __init +qeth_create_procfs_entries(void) +{ + qeth_procfile = create_proc_entry(QETH_PROCFILE_NAME, + S_IFREG | 0444, NULL); + if (qeth_procfile) + qeth_procfile->proc_fops = &qeth_procfile_fops; + +#ifdef CONFIG_QETH_PERF_STATS + qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME, + S_IFREG | 0444, NULL); + if (qeth_perf_procfile) + qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops; +#endif /* CONFIG_QETH_PERF_STATS */ + + qeth_ipato_procfile = create_proc_entry(QETH_IPATO_PROCFILE_NAME, + S_IFREG | 0444, NULL); + if (qeth_ipato_procfile) + qeth_ipato_procfile->proc_fops = &qeth_ipato_procfile_fops; + + if (qeth_procfile && + qeth_ipato_procfile && + qeth_perf_procfile_created) + return 0; + else + return -ENOMEM; +} + +void __exit +qeth_remove_procfs_entries(void) +{ + if (qeth_procfile) + remove_proc_entry(QETH_PROCFILE_NAME, NULL); + if (qeth_perf_procfile) + remove_proc_entry(QETH_PERF_PROCFILE_NAME, NULL); + if (qeth_ipato_procfile) + remove_proc_entry(QETH_IPATO_PROCFILE_NAME, NULL); +} + + +/* ONLY FOR DEVELOPMENT! -> make it as module */ +/* +static void +qeth_create_sysfs_entries(void) +{ + struct device *dev; + + down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); + + list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices, + driver_list) + qeth_create_device_attributes(dev); + + up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); +} + +static void +qeth_remove_sysfs_entries(void) +{ + struct device *dev; + + down_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); + + list_for_each_entry(dev, &qeth_ccwgroup_driver.driver.devices, + driver_list) + qeth_remove_device_attributes(dev); + + up_read(&qeth_ccwgroup_driver.driver.bus->subsys.rwsem); +} + +static int __init +qeth_fs_init(void) +{ + printk(KERN_INFO "qeth_fs_init\n"); + qeth_create_procfs_entries(); + qeth_create_sysfs_entries(); + + return 0; +} + +static void __exit +qeth_fs_exit(void) +{ + printk(KERN_INFO "qeth_fs_exit\n"); + qeth_remove_procfs_entries(); + qeth_remove_sysfs_entries(); +} + + +module_init(qeth_fs_init); +module_exit(qeth_fs_exit); + +MODULE_LICENSE("GPL"); +*/ diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c new file mode 100644 index 000000000000..240348398211 --- /dev/null +++ b/drivers/s390/net/qeth_sys.c @@ -0,0 +1,1788 @@ +/* + * + * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.51 $) + * + * Linux on zSeries OSA Express and HiperSockets support + * This file contains code related to sysfs. + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Thomas Spatzier <tspat@de.ibm.com> + * Frank Pavlic <pavlic@de.ibm.com> + * + */ +#include <linux/list.h> +#include <linux/rwsem.h> + +#include <asm/ebcdic.h> + +#include "qeth.h" +#include "qeth_mpc.h" +#include "qeth_fs.h" + +const char *VERSION_QETH_SYS_C = "$Revision: 1.51 $"; + +/*****************************************************************************/ +/* */ +/* /sys-fs stuff UNDER DEVELOPMENT !!! */ +/* */ +/*****************************************************************************/ +//low/high watermark + +static ssize_t +qeth_dev_state_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + if (!card) + return -EINVAL; + + switch (card->state) { + case CARD_STATE_DOWN: + return sprintf(buf, "DOWN\n"); + case CARD_STATE_HARDSETUP: + return sprintf(buf, "HARDSETUP\n"); + case CARD_STATE_SOFTSETUP: + return sprintf(buf, "SOFTSETUP\n"); + case CARD_STATE_UP: + if (card->lan_online) + return sprintf(buf, "UP (LAN ONLINE)\n"); + else + return sprintf(buf, "UP (LAN OFFLINE)\n"); + case CARD_STATE_RECOVER: + return sprintf(buf, "RECOVER\n"); + default: + return sprintf(buf, "UNKNOWN\n"); + } +} + +static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL); + +static ssize_t +qeth_dev_chpid_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + if (!card) + return -EINVAL; + + return sprintf(buf, "%02X\n", card->info.chpid); +} + +static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL); + +static ssize_t +qeth_dev_if_name_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + if (!card) + return -EINVAL; + return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card)); +} + +static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL); + +static ssize_t +qeth_dev_card_type_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + if (!card) + return -EINVAL; + + return sprintf(buf, "%s\n", qeth_get_cardname_short(card)); +} + +static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL); + +static ssize_t +qeth_dev_portno_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->info.portno); +} + +static ssize_t +qeth_dev_portno_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + unsigned int portno; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + portno = simple_strtoul(buf, &tmp, 16); + if ((portno < 0) || (portno > MAX_PORTNO)){ + PRINT_WARN("portno 0x%X is out of range\n", portno); + return -EINVAL; + } + + card->info.portno = portno; + return count; +} + +static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store); + +static ssize_t +qeth_dev_portname_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + char portname[9] = {0, }; + + if (!card) + return -EINVAL; + + if (card->info.portname_required) { + memcpy(portname, card->info.portname + 1, 8); + EBCASC(portname, 8); + return sprintf(buf, "%s\n", portname); + } else + return sprintf(buf, "no portname required\n"); +} + +static ssize_t +qeth_dev_portname_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + tmp = strsep((char **) &buf, "\n"); + if ((strlen(tmp) > 8) || (strlen(tmp) < 2)) + return -EINVAL; + + card->info.portname[0] = strlen(tmp); + /* for beauty reasons */ + for (i = 1; i < 9; i++) + card->info.portname[i] = ' '; + strcpy(card->info.portname + 1, tmp); + ASCEBC(card->info.portname + 1, 8); + + return count; +} + +static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show, + qeth_dev_portname_store); + +static ssize_t +qeth_dev_checksum_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return sprintf(buf, "%s checksumming\n", qeth_get_checksum_str(card)); +} + +static ssize_t +qeth_dev_checksum_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + tmp = strsep((char **) &buf, "\n"); + if (!strcmp(tmp, "sw_checksumming")) + card->options.checksum_type = SW_CHECKSUMMING; + else if (!strcmp(tmp, "hw_checksumming")) + card->options.checksum_type = HW_CHECKSUMMING; + else if (!strcmp(tmp, "no_checksumming")) + card->options.checksum_type = NO_CHECKSUMMING; + else { + PRINT_WARN("Unknown checksumming type '%s'\n", tmp); + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR(checksumming, 0644, qeth_dev_checksum_show, + qeth_dev_checksum_store); + +static ssize_t +qeth_dev_prioqing_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_PREC: + return sprintf(buf, "%s\n", "by precedence"); + case QETH_PRIO_Q_ING_TOS: + return sprintf(buf, "%s\n", "by type of service"); + default: + return sprintf(buf, "always queue %i\n", + card->qdio.default_out_queue); + } +} + +static ssize_t +qeth_dev_prioqing_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + /* check if 1920 devices are supported , + * if though we have to permit priority queueing + */ + if (card->qdio.no_out_queues == 1) { + PRINT_WARN("Priority queueing disabled due " + "to hardware limitations!\n"); + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + return -EPERM; + } + + tmp = strsep((char **) &buf, "\n"); + if (!strcmp(tmp, "prio_queueing_prec")) + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC; + else if (!strcmp(tmp, "prio_queueing_tos")) + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS; + else if (!strcmp(tmp, "no_prio_queueing:0")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 0; + } else if (!strcmp(tmp, "no_prio_queueing:1")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 1; + } else if (!strcmp(tmp, "no_prio_queueing:2")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 2; + } else if (!strcmp(tmp, "no_prio_queueing:3")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 3; + } else if (!strcmp(tmp, "no_prio_queueing")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else { + PRINT_WARN("Unknown queueing type '%s'\n", tmp); + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show, + qeth_dev_prioqing_store); + +static ssize_t +qeth_dev_bufcnt_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count); +} + +static ssize_t +qeth_dev_bufcnt_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int cnt, old_cnt; + int rc; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + old_cnt = card->qdio.in_buf_pool.buf_count; + cnt = simple_strtoul(buf, &tmp, 10); + cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN : + ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); + if (old_cnt != cnt) { + if ((rc = qeth_realloc_buffer_pool(card, cnt))) + PRINT_WARN("Error (%d) while setting " + "buffer count.\n", rc); + } + return count; +} + +static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, + qeth_dev_bufcnt_store); + +static inline ssize_t +qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, + char *buf) +{ + switch (route->type) { + case PRIMARY_ROUTER: + return sprintf(buf, "%s\n", "primary router"); + case SECONDARY_ROUTER: + return sprintf(buf, "%s\n", "secondary router"); + case MULTICAST_ROUTER: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "multicast router+"); + else + return sprintf(buf, "%s\n", "multicast router"); + case PRIMARY_CONNECTOR: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "primary connector+"); + else + return sprintf(buf, "%s\n", "primary connector"); + case SECONDARY_CONNECTOR: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "secondary connector+"); + else + return sprintf(buf, "%s\n", "secondary connector"); + default: + return sprintf(buf, "%s\n", "no"); + } +} + +static ssize_t +qeth_dev_route4_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_route_show(card, &card->options.route4, buf); +} + +static inline ssize_t +qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, + enum qeth_prot_versions prot, const char *buf, size_t count) +{ + enum qeth_routing_types old_route_type = route->type; + char *tmp; + int rc; + + tmp = strsep((char **) &buf, "\n"); + + if (!strcmp(tmp, "no_router")){ + route->type = NO_ROUTER; + } else if (!strcmp(tmp, "primary_connector")) { + route->type = PRIMARY_CONNECTOR; + } else if (!strcmp(tmp, "secondary_connector")) { + route->type = SECONDARY_CONNECTOR; + } else if (!strcmp(tmp, "multicast_router")) { + route->type = MULTICAST_ROUTER; + } else if (!strcmp(tmp, "primary_router")) { + route->type = PRIMARY_ROUTER; + } else if (!strcmp(tmp, "secondary_router")) { + route->type = SECONDARY_ROUTER; + } else if (!strcmp(tmp, "multicast_router")) { + route->type = MULTICAST_ROUTER; + } else { + PRINT_WARN("Invalid routing type '%s'.\n", tmp); + return -EINVAL; + } + if (((card->state == CARD_STATE_SOFTSETUP) || + (card->state == CARD_STATE_UP)) && + (old_route_type != route->type)){ + if (prot == QETH_PROT_IPV4) + rc = qeth_setrouting_v4(card); + else if (prot == QETH_PROT_IPV6) + rc = qeth_setrouting_v6(card); + } + return count; +} + +static ssize_t +qeth_dev_route4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_route_store(card, &card->options.route4, + QETH_PROT_IPV4, buf, count); +} + +static DEVICE_ATTR(route4, 0644, qeth_dev_route4_show, qeth_dev_route4_store); + +#ifdef CONFIG_QETH_IPV6 +static ssize_t +qeth_dev_route6_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (!qeth_is_supported(card, IPA_IPV6)) + return sprintf(buf, "%s\n", "n/a"); + + return qeth_dev_route_show(card, &card->options.route6, buf); +} + +static ssize_t +qeth_dev_route6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (!qeth_is_supported(card, IPA_IPV6)){ + PRINT_WARN("IPv6 not supported for interface %s.\n" + "Routing status no changed.\n", + QETH_CARD_IFNAME(card)); + return -ENOTSUPP; + } + + return qeth_dev_route_store(card, &card->options.route6, + QETH_PROT_IPV6, buf, count); +} + +static DEVICE_ATTR(route6, 0644, qeth_dev_route6_show, qeth_dev_route6_store); +#endif + +static ssize_t +qeth_dev_add_hhlen_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.add_hhlen); +} + +static ssize_t +qeth_dev_add_hhlen_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + i = simple_strtoul(buf, &tmp, 10); + if ((i < 0) || (i > MAX_ADD_HHLEN)) { + PRINT_WARN("add_hhlen out of range\n"); + return -EINVAL; + } + card->options.add_hhlen = i; + + return count; +} + +static DEVICE_ATTR(add_hhlen, 0644, qeth_dev_add_hhlen_show, + qeth_dev_add_hhlen_store); + +static ssize_t +qeth_dev_fake_ll_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.fake_ll? 1:0); +} + +static ssize_t +qeth_dev_fake_ll_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + i = simple_strtoul(buf, &tmp, 16); + if ((i != 0) && (i != 1)) { + PRINT_WARN("fake_ll: write 0 or 1 to this file!\n"); + return -EINVAL; + } + card->options.fake_ll = i; + return count; +} + +static DEVICE_ATTR(fake_ll, 0644, qeth_dev_fake_ll_show, + qeth_dev_fake_ll_store); + +static ssize_t +qeth_dev_fake_broadcast_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0); +} + +static ssize_t +qeth_dev_fake_broadcast_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + i = simple_strtoul(buf, &tmp, 16); + if ((i == 0) || (i == 1)) + card->options.fake_broadcast = i; + else { + PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n"); + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR(fake_broadcast, 0644, qeth_dev_fake_broadcast_show, + qeth_dev_fake_broadcast_store); + +static ssize_t +qeth_dev_recover_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if (card->state != CARD_STATE_UP) + return -EPERM; + + i = simple_strtoul(buf, &tmp, 16); + if (i == 1) + qeth_schedule_recovery(card); + + return count; +} + +static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store); + +static ssize_t +qeth_dev_broadcast_mode_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || + (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) + return sprintf(buf, "n/a\n"); + + return sprintf(buf, "%s\n", (card->options.broadcast_mode == + QETH_TR_BROADCAST_ALLRINGS)? + "all rings":"local"); +} + +static ssize_t +qeth_dev_broadcast_mode_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || + (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){ + PRINT_WARN("Device is not a tokenring device!\n"); + return -EINVAL; + } + + tmp = strsep((char **) &buf, "\n"); + + if (!strcmp(tmp, "local")){ + card->options.broadcast_mode = QETH_TR_BROADCAST_LOCAL; + return count; + } else if (!strcmp(tmp, "all_rings")) { + card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; + return count; + } else { + PRINT_WARN("broadcast_mode: invalid mode %s!\n", + tmp); + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR(broadcast_mode, 0644, qeth_dev_broadcast_mode_show, + qeth_dev_broadcast_mode_store); + +static ssize_t +qeth_dev_canonical_macaddr_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || + (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) + return sprintf(buf, "n/a\n"); + + return sprintf(buf, "%i\n", (card->options.macaddr_mode == + QETH_TR_MACADDR_CANONICAL)? 1:0); +} + +static ssize_t +qeth_dev_canonical_macaddr_store(struct device *dev, const char *buf, + size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || + (card->info.link_type == QETH_LINK_TYPE_LANE_TR))){ + PRINT_WARN("Device is not a tokenring device!\n"); + return -EINVAL; + } + + i = simple_strtoul(buf, &tmp, 16); + if ((i == 0) || (i == 1)) + card->options.macaddr_mode = i? + QETH_TR_MACADDR_CANONICAL : + QETH_TR_MACADDR_NONCANONICAL; + else { + PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n"); + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR(canonical_macaddr, 0644, qeth_dev_canonical_macaddr_show, + qeth_dev_canonical_macaddr_store); + +static ssize_t +qeth_dev_layer2_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.layer2 ? 1:0); +} + +static ssize_t +qeth_dev_layer2_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if (((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) || + (card->info.type != QETH_CARD_TYPE_OSAE)) + return -EPERM; + + i = simple_strtoul(buf, &tmp, 16); + if ((i == 0) || (i == 1)) + card->options.layer2 = i; + else { + PRINT_WARN("layer2: write 0 or 1 to this file!\n"); + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, + qeth_dev_layer2_store); + +static ssize_t +qeth_dev_large_send_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + switch (card->options.large_send) { + case QETH_LARGE_SEND_NO: + return sprintf(buf, "%s\n", "no"); + case QETH_LARGE_SEND_EDDP: + return sprintf(buf, "%s\n", "EDDP"); + case QETH_LARGE_SEND_TSO: + return sprintf(buf, "%s\n", "TSO"); + default: + return sprintf(buf, "%s\n", "N/A"); + } +} + +static ssize_t +qeth_dev_large_send_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + enum qeth_large_send_types type; + int rc = 0; + char *tmp; + + if (!card) + return -EINVAL; + + tmp = strsep((char **) &buf, "\n"); + + if (!strcmp(tmp, "no")){ + type = QETH_LARGE_SEND_NO; + } else if (!strcmp(tmp, "EDDP")) { + type = QETH_LARGE_SEND_EDDP; + } else if (!strcmp(tmp, "TSO")) { + type = QETH_LARGE_SEND_TSO; + } else { + PRINT_WARN("large_send: invalid mode %s!\n", tmp); + return -EINVAL; + } + if (card->options.large_send == type) + return count; + card->options.large_send = type; + if ((rc = qeth_set_large_send(card))) + return rc; + + return count; +} + +static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, + qeth_dev_large_send_store); + +static ssize_t +qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value ) +{ + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", value); +} + +static ssize_t +qeth_dev_blkt_store(struct qeth_card *card, const char *buf, size_t count, + int *value, int max_value) +{ + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + i = simple_strtoul(buf, &tmp, 10); + if (i <= max_value) { + *value = i; + } else { + PRINT_WARN("blkt total time: write values between" + " 0 and %d to this file!\n", max_value); + return -EINVAL; + } + return count; +} + +static ssize_t +qeth_dev_blkt_total_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total); +} + + +static ssize_t +qeth_dev_blkt_total_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.time_total,1000); +} + + + +static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show, + qeth_dev_blkt_total_store); + +static ssize_t +qeth_dev_blkt_inter_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet); +} + + +static ssize_t +qeth_dev_blkt_inter_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.inter_packet,100); +} + +static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show, + qeth_dev_blkt_inter_store); + +static ssize_t +qeth_dev_blkt_inter_jumbo_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + return qeth_dev_blkt_show(buf, card, + card->info.blkt.inter_packet_jumbo); +} + + +static ssize_t +qeth_dev_blkt_inter_jumbo_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.inter_packet_jumbo,100); +} + +static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show, + qeth_dev_blkt_inter_jumbo_store); + +static struct device_attribute * qeth_blkt_device_attrs[] = { + &dev_attr_total, + &dev_attr_inter, + &dev_attr_inter_jumbo, + NULL, +}; + +static struct attribute_group qeth_device_blkt_group = { + .name = "blkt", + .attrs = (struct attribute **)qeth_blkt_device_attrs, +}; + +static struct device_attribute * qeth_device_attrs[] = { + &dev_attr_state, + &dev_attr_chpid, + &dev_attr_if_name, + &dev_attr_card_type, + &dev_attr_portno, + &dev_attr_portname, + &dev_attr_checksumming, + &dev_attr_priority_queueing, + &dev_attr_buffer_count, + &dev_attr_route4, +#ifdef CONFIG_QETH_IPV6 + &dev_attr_route6, +#endif + &dev_attr_add_hhlen, + &dev_attr_fake_ll, + &dev_attr_fake_broadcast, + &dev_attr_recover, + &dev_attr_broadcast_mode, + &dev_attr_canonical_macaddr, + &dev_attr_layer2, + &dev_attr_large_send, + NULL, +}; + +static struct attribute_group qeth_device_attr_group = { + .attrs = (struct attribute **)qeth_device_attrs, +}; + + +#define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \ +struct device_attribute dev_attr_##_id = { \ + .attr = {.name=__stringify(_name), .mode=_mode, .owner=THIS_MODULE },\ + .show = _show, \ + .store = _store, \ +}; + +int +qeth_check_layer2(struct qeth_card *card) +{ + if (card->options.layer2) + return -EPERM; + return 0; +} + + +static ssize_t +qeth_dev_ipato_enable_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (qeth_check_layer2(card)) + return -EPERM; + return sprintf(buf, "%i\n", card->ipato.enabled? 1:0); +} + +static ssize_t +qeth_dev_ipato_enable_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + + if (!card) + return -EINVAL; + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + if (qeth_check_layer2(card)) + return -EPERM; + + tmp = strsep((char **) &buf, "\n"); + if (!strcmp(tmp, "toggle")){ + card->ipato.enabled = (card->ipato.enabled)? 0 : 1; + } else if (!strcmp(tmp, "1")){ + card->ipato.enabled = 1; + } else if (!strcmp(tmp, "0")){ + card->ipato.enabled = 0; + } else { + PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to " + "this file\n"); + return -EINVAL; + } + return count; +} + +static QETH_DEVICE_ATTR(ipato_enable, enable, 0644, + qeth_dev_ipato_enable_show, + qeth_dev_ipato_enable_store); + +static ssize_t +qeth_dev_ipato_invert4_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (qeth_check_layer2(card)) + return -EPERM; + + return sprintf(buf, "%i\n", card->ipato.invert4? 1:0); +} + +static ssize_t +qeth_dev_ipato_invert4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + + if (!card) + return -EINVAL; + + if (qeth_check_layer2(card)) + return -EPERM; + + tmp = strsep((char **) &buf, "\n"); + if (!strcmp(tmp, "toggle")){ + card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; + } else if (!strcmp(tmp, "1")){ + card->ipato.invert4 = 1; + } else if (!strcmp(tmp, "0")){ + card->ipato.invert4 = 0; + } else { + PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to " + "this file\n"); + return -EINVAL; + } + return count; +} + +static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, + qeth_dev_ipato_invert4_show, + qeth_dev_ipato_invert4_store); + +static inline ssize_t +qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, + enum qeth_prot_versions proto) +{ + struct qeth_ipato_entry *ipatoe; + unsigned long flags; + char addr_str[40]; + int entry_len; /* length of 1 entry string, differs between v4 and v6 */ + int i = 0; + + if (qeth_check_layer2(card)) + return -EPERM; + + entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; + /* add strlen for "/<mask>\n" */ + entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; + spin_lock_irqsave(&card->ip_lock, flags); + list_for_each_entry(ipatoe, &card->ipato.entries, entry){ + if (ipatoe->proto != proto) + continue; + /* String must not be longer than PAGE_SIZE. So we check if + * string length gets near PAGE_SIZE. Then we can savely display + * the next IPv6 address (worst case, compared to IPv4) */ + if ((PAGE_SIZE - i) <= entry_len) + break; + qeth_ipaddr_to_string(proto, ipatoe->addr, addr_str); + i += snprintf(buf + i, PAGE_SIZE - i, + "%s/%i\n", addr_str, ipatoe->mask_bits); + } + spin_unlock_irqrestore(&card->ip_lock, flags); + i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + + return i; +} + +static ssize_t +qeth_dev_ipato_add4_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); +} + +static inline int +qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, + u8 *addr, int *mask_bits) +{ + const char *start, *end; + char *tmp; + char buffer[49] = {0, }; + + start = buf; + /* get address string */ + end = strchr(start, '/'); + if (!end){ + PRINT_WARN("Invalid format for ipato_addx/delx. " + "Use <ip addr>/<mask bits>\n"); + return -EINVAL; + } + strncpy(buffer, start, end - start); + if (qeth_string_to_ipaddr(buffer, proto, addr)){ + PRINT_WARN("Invalid IP address format!\n"); + return -EINVAL; + } + start = end + 1; + *mask_bits = simple_strtoul(start, &tmp, 10); + + return 0; +} + +static inline ssize_t +qeth_dev_ipato_add_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + struct qeth_ipato_entry *ipatoe; + u8 addr[16]; + int mask_bits; + int rc; + + if (qeth_check_layer2(card)) + return -EPERM; + if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits))) + return rc; + + if (!(ipatoe = kmalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL))){ + PRINT_WARN("No memory to allocate ipato entry\n"); + return -ENOMEM; + } + memset(ipatoe, 0, sizeof(struct qeth_ipato_entry)); + ipatoe->proto = proto; + memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); + ipatoe->mask_bits = mask_bits; + + if ((rc = qeth_add_ipato_entry(card, ipatoe))){ + kfree(ipatoe); + return rc; + } + + return count; +} + +static ssize_t +qeth_dev_ipato_add4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, + qeth_dev_ipato_add4_show, + qeth_dev_ipato_add4_store); + +static inline ssize_t +qeth_dev_ipato_del_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16]; + int mask_bits; + int rc; + + if (qeth_check_layer2(card)) + return -EPERM; + if ((rc = qeth_parse_ipatoe(buf, proto, addr, &mask_bits))) + return rc; + + qeth_del_ipato_entry(card, proto, addr, mask_bits); + + return count; +} + +static ssize_t +qeth_dev_ipato_del4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL, + qeth_dev_ipato_del4_store); + +#ifdef CONFIG_QETH_IPV6 +static ssize_t +qeth_dev_ipato_invert6_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (qeth_check_layer2(card)) + return -EPERM; + + return sprintf(buf, "%i\n", card->ipato.invert6? 1:0); +} + +static ssize_t +qeth_dev_ipato_invert6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + char *tmp; + + if (!card) + return -EINVAL; + + if (qeth_check_layer2(card)) + return -EPERM; + + tmp = strsep((char **) &buf, "\n"); + if (!strcmp(tmp, "toggle")){ + card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; + } else if (!strcmp(tmp, "1")){ + card->ipato.invert6 = 1; + } else if (!strcmp(tmp, "0")){ + card->ipato.invert6 = 0; + } else { + PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to " + "this file\n"); + return -EINVAL; + } + return count; +} + +static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644, + qeth_dev_ipato_invert6_show, + qeth_dev_ipato_invert6_store); + + +static ssize_t +qeth_dev_ipato_add6_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV6); +} + +static ssize_t +qeth_dev_ipato_add6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(ipato_add6, add6, 0644, + qeth_dev_ipato_add6_show, + qeth_dev_ipato_add6_store); + +static ssize_t +qeth_dev_ipato_del6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL, + qeth_dev_ipato_del6_store); +#endif /* CONFIG_QETH_IPV6 */ + +static struct device_attribute * qeth_ipato_device_attrs[] = { + &dev_attr_ipato_enable, + &dev_attr_ipato_invert4, + &dev_attr_ipato_add4, + &dev_attr_ipato_del4, +#ifdef CONFIG_QETH_IPV6 + &dev_attr_ipato_invert6, + &dev_attr_ipato_add6, + &dev_attr_ipato_del6, +#endif + NULL, +}; + +static struct attribute_group qeth_device_ipato_group = { + .name = "ipa_takeover", + .attrs = (struct attribute **)qeth_ipato_device_attrs, +}; + +static inline ssize_t +qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, + enum qeth_prot_versions proto) +{ + struct qeth_ipaddr *ipaddr; + char addr_str[40]; + int entry_len; /* length of 1 entry string, differs between v4 and v6 */ + unsigned long flags; + int i = 0; + + if (qeth_check_layer2(card)) + return -EPERM; + + entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; + entry_len += 2; /* \n + terminator */ + spin_lock_irqsave(&card->ip_lock, flags); + list_for_each_entry(ipaddr, &card->ip_list, entry){ + if (ipaddr->proto != proto) + continue; + if (ipaddr->type != QETH_IP_TYPE_VIPA) + continue; + /* String must not be longer than PAGE_SIZE. So we check if + * string length gets near PAGE_SIZE. Then we can savely display + * the next IPv6 address (worst case, compared to IPv4) */ + if ((PAGE_SIZE - i) <= entry_len) + break; + qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); + i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + } + spin_unlock_irqrestore(&card->ip_lock, flags); + i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + + return i; +} + +static ssize_t +qeth_dev_vipa_add4_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); +} + +static inline int +qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, + u8 *addr) +{ + if (qeth_string_to_ipaddr(buf, proto, addr)){ + PRINT_WARN("Invalid IP address format!\n"); + return -EINVAL; + } + return 0; +} + +static inline ssize_t +qeth_dev_vipa_add_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16] = {0, }; + int rc; + + if (qeth_check_layer2(card)) + return -EPERM; + if ((rc = qeth_parse_vipae(buf, proto, addr))) + return rc; + + if ((rc = qeth_add_vipa(card, proto, addr))) + return rc; + + return count; +} + +static ssize_t +qeth_dev_vipa_add4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, + qeth_dev_vipa_add4_show, + qeth_dev_vipa_add4_store); + +static inline ssize_t +qeth_dev_vipa_del_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16]; + int rc; + + if (qeth_check_layer2(card)) + return -EPERM; + if ((rc = qeth_parse_vipae(buf, proto, addr))) + return rc; + + qeth_del_vipa(card, proto, addr); + + return count; +} + +static ssize_t +qeth_dev_vipa_del4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL, + qeth_dev_vipa_del4_store); + +#ifdef CONFIG_QETH_IPV6 +static ssize_t +qeth_dev_vipa_add6_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV6); +} + +static ssize_t +qeth_dev_vipa_add6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(vipa_add6, add6, 0644, + qeth_dev_vipa_add6_show, + qeth_dev_vipa_add6_store); + +static ssize_t +qeth_dev_vipa_del6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + if (qeth_check_layer2(card)) + return -EPERM; + + return qeth_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL, + qeth_dev_vipa_del6_store); +#endif /* CONFIG_QETH_IPV6 */ + +static struct device_attribute * qeth_vipa_device_attrs[] = { + &dev_attr_vipa_add4, + &dev_attr_vipa_del4, +#ifdef CONFIG_QETH_IPV6 + &dev_attr_vipa_add6, + &dev_attr_vipa_del6, +#endif + NULL, +}; + +static struct attribute_group qeth_device_vipa_group = { + .name = "vipa", + .attrs = (struct attribute **)qeth_vipa_device_attrs, +}; + +static inline ssize_t +qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, + enum qeth_prot_versions proto) +{ + struct qeth_ipaddr *ipaddr; + char addr_str[40]; + int entry_len; /* length of 1 entry string, differs between v4 and v6 */ + unsigned long flags; + int i = 0; + + if (qeth_check_layer2(card)) + return -EPERM; + + entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; + entry_len += 2; /* \n + terminator */ + spin_lock_irqsave(&card->ip_lock, flags); + list_for_each_entry(ipaddr, &card->ip_list, entry){ + if (ipaddr->proto != proto) + continue; + if (ipaddr->type != QETH_IP_TYPE_RXIP) + continue; + /* String must not be longer than PAGE_SIZE. So we check if + * string length gets near PAGE_SIZE. Then we can savely display + * the next IPv6 address (worst case, compared to IPv4) */ + if ((PAGE_SIZE - i) <= entry_len) + break; + qeth_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, addr_str); + i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str); + } + spin_unlock_irqrestore(&card->ip_lock, flags); + i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + + return i; +} + +static ssize_t +qeth_dev_rxip_add4_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); +} + +static inline int +qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, + u8 *addr) +{ + if (qeth_string_to_ipaddr(buf, proto, addr)){ + PRINT_WARN("Invalid IP address format!\n"); + return -EINVAL; + } + return 0; +} + +static inline ssize_t +qeth_dev_rxip_add_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16] = {0, }; + int rc; + + if (qeth_check_layer2(card)) + return -EPERM; + if ((rc = qeth_parse_rxipe(buf, proto, addr))) + return rc; + + if ((rc = qeth_add_rxip(card, proto, addr))) + return rc; + + return count; +} + +static ssize_t +qeth_dev_rxip_add4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, + qeth_dev_rxip_add4_show, + qeth_dev_rxip_add4_store); + +static inline ssize_t +qeth_dev_rxip_del_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16]; + int rc; + + if (qeth_check_layer2(card)) + return -EPERM; + if ((rc = qeth_parse_rxipe(buf, proto, addr))) + return rc; + + qeth_del_rxip(card, proto, addr); + + return count; +} + +static ssize_t +qeth_dev_rxip_del4_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL, + qeth_dev_rxip_del4_store); + +#ifdef CONFIG_QETH_IPV6 +static ssize_t +qeth_dev_rxip_add6_show(struct device *dev, char *buf) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV6); +} + +static ssize_t +qeth_dev_rxip_add6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(rxip_add6, add6, 0644, + qeth_dev_rxip_add6_show, + qeth_dev_rxip_add6_store); + +static ssize_t +qeth_dev_rxip_del6_store(struct device *dev, const char *buf, size_t count) +{ + struct qeth_card *card = dev->driver_data; + + if (!card) + return -EINVAL; + + return qeth_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL, + qeth_dev_rxip_del6_store); +#endif /* CONFIG_QETH_IPV6 */ + +static struct device_attribute * qeth_rxip_device_attrs[] = { + &dev_attr_rxip_add4, + &dev_attr_rxip_del4, +#ifdef CONFIG_QETH_IPV6 + &dev_attr_rxip_add6, + &dev_attr_rxip_del6, +#endif + NULL, +}; + +static struct attribute_group qeth_device_rxip_group = { + .name = "rxip", + .attrs = (struct attribute **)qeth_rxip_device_attrs, +}; + +int +qeth_create_device_attributes(struct device *dev) +{ + int ret; + + if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group))) + return ret; + if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){ + sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); + return ret; + } + if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group))){ + sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); + sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); + return ret; + } + if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group))){ + sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); + sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); + sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); + } + if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group))) + return ret; + + return ret; +} + +void +qeth_remove_device_attributes(struct device *dev) +{ + sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); + sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); + sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); + sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group); + sysfs_remove_group(&dev->kobj, &qeth_device_blkt_group); +} + +/**********************/ +/* DRIVER ATTRIBUTES */ +/**********************/ +static ssize_t +qeth_driver_group_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + const char *start, *end; + char bus_ids[3][BUS_ID_SIZE], *argv[3]; + int i; + int err; + + start = buf; + for (i = 0; i < 3; i++) { + static const char delim[] = { ',', ',', '\n' }; + int len; + + if (!(end = strchr(start, delim[i]))) + return -EINVAL; + len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start); + strncpy(bus_ids[i], start, len); + bus_ids[i][len] = '\0'; + start = end + 1; + argv[i] = bus_ids[i]; + } + err = ccwgroup_create(qeth_root_dev, qeth_ccwgroup_driver.driver_id, + &qeth_ccw_driver, 3, argv); + if (err) + return err; + else + return count; +} + + +static DRIVER_ATTR(group, 0200, 0, qeth_driver_group_store); + +static ssize_t +qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + int rc; + int signum; + char *tmp, *tmp2; + + tmp = strsep((char **) &buf, "\n"); + if (!strncmp(tmp, "unregister", 10)){ + if ((rc = qeth_notifier_unregister(current))) + return rc; + return count; + } + + signum = simple_strtoul(tmp, &tmp2, 10); + if ((signum < 0) || (signum > 32)){ + PRINT_WARN("Signal number %d is out of range\n", signum); + return -EINVAL; + } + if ((rc = qeth_notifier_register(current, signum))) + return rc; + + return count; +} + +static DRIVER_ATTR(notifier_register, 0200, 0, + qeth_driver_notifier_register_store); + +int +qeth_create_driver_attributes(void) +{ + int rc; + + if ((rc = driver_create_file(&qeth_ccwgroup_driver.driver, + &driver_attr_group))) + return rc; + return driver_create_file(&qeth_ccwgroup_driver.driver, + &driver_attr_notifier_register); +} + +void +qeth_remove_driver_attributes(void) +{ + driver_remove_file(&qeth_ccwgroup_driver.driver, + &driver_attr_group); + driver_remove_file(&qeth_ccwgroup_driver.driver, + &driver_attr_notifier_register); +} diff --git a/drivers/s390/net/qeth_tso.c b/drivers/s390/net/qeth_tso.c new file mode 100644 index 000000000000..c91976274e7b --- /dev/null +++ b/drivers/s390/net/qeth_tso.c @@ -0,0 +1,285 @@ +/* + * linux/drivers/s390/net/qeth_tso.c ($Revision: 1.6 $) + * + * Header file for qeth TCP Segmentation Offload support. + * + * Copyright 2004 IBM Corporation + * + * Author(s): Frank Pavlic <pavlic@de.ibm.com> + * + * $Revision: 1.6 $ $Date: 2005/03/24 09:04:18 $ + * + */ + +#include <linux/skbuff.h> +#include <linux/tcp.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <net/ip6_checksum.h> +#include "qeth.h" +#include "qeth_mpc.h" +#include "qeth_tso.h" + +/** + * skb already partially prepared + * classic qdio header in skb->data + * */ +static inline struct qeth_hdr_tso * +qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb) +{ + int rc = 0; + + QETH_DBF_TEXT(trace, 5, "tsoprsk"); + rc = qeth_realloc_headroom(card, skb,sizeof(struct qeth_hdr_ext_tso)); + if (rc) + return NULL; + + return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_ext_tso)); +} + +/** + * fill header for a TSO packet + */ +static inline void +qeth_tso_fill_header(struct qeth_card *card, struct sk_buff *skb) +{ + struct qeth_hdr_tso *hdr; + struct tcphdr *tcph; + struct iphdr *iph; + + QETH_DBF_TEXT(trace, 5, "tsofhdr"); + + hdr = (struct qeth_hdr_tso *) skb->data; + iph = skb->nh.iph; + tcph = skb->h.th; + /*fix header to TSO values ...*/ + hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; + /*set values which are fix for the first approach ...*/ + hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); + hdr->ext.imb_hdr_no = 1; + hdr->ext.hdr_type = 1; + hdr->ext.hdr_version = 1; + hdr->ext.hdr_len = 28; + /*insert non-fix values */ + hdr->ext.mss = skb_shinfo(skb)->tso_size; + hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); + hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - + sizeof(struct qeth_hdr_tso)); +} + +/** + * change some header values as requested by hardware + */ +static inline void +qeth_tso_set_tcpip_header(struct qeth_card *card, struct sk_buff *skb) +{ + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct tcphdr *tcph; + + iph = skb->nh.iph; + ip6h = skb->nh.ipv6h; + tcph = skb->h.th; + + tcph->check = 0; + if (skb->protocol == ETH_P_IPV6) { + ip6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + 0, IPPROTO_TCP, 0); + return; + } + /*OSA want us to set these values ...*/ + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + iph->tot_len = 0; + iph->check = 0; +} + +static inline struct qeth_hdr_tso * +qeth_tso_prepare_packet(struct qeth_card *card, struct sk_buff *skb, + int ipv, int cast_type) +{ + struct qeth_hdr_tso *hdr; + int rc = 0; + + QETH_DBF_TEXT(trace, 5, "tsoprep"); + + /*get headroom for tso qdio header */ + hdr = (struct qeth_hdr_tso *) qeth_tso_prepare_skb(card, &skb); + if (hdr == NULL) { + QETH_DBF_TEXT_(trace, 4, "2err%d", rc); + return NULL; + } + memset(hdr, 0, sizeof(struct qeth_hdr_tso)); + /*fill first 32 bytes of qdio header as used + *FIXME: TSO has two struct members + * with different names but same size + * */ + qeth_fill_header(card, &hdr->hdr, skb, ipv, cast_type); + qeth_tso_fill_header(card, skb); + qeth_tso_set_tcpip_header(card, skb); + return hdr; +} + +static inline int +qeth_tso_get_queue_buffer(struct qeth_qdio_out_q *queue) +{ + struct qeth_qdio_out_buffer *buffer; + int flush_cnt = 0; + + QETH_DBF_TEXT(trace, 5, "tsobuf"); + + /* force to non-packing*/ + if (queue->do_pack) + queue->do_pack = 0; + buffer = &queue->bufs[queue->next_buf_to_fill]; + /* get a new buffer if current is already in use*/ + if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && + (buffer->next_element_to_fill > 0)) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + flush_cnt++; + } + return flush_cnt; +} + +static inline void +__qeth_tso_fill_buffer_frag(struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag; + struct qdio_buffer *buffer; + int fragno, cnt, element; + unsigned long addr; + + QETH_DBF_TEXT(trace, 6, "tsfilfrg"); + + /*initialize variables ...*/ + fragno = skb_shinfo(skb)->nr_frags; + buffer = buf->buffer; + element = buf->next_element_to_fill; + /*fill buffer elements .....*/ + for (cnt = 0; cnt < fragno; cnt++) { + frag = &skb_shinfo(skb)->frags[cnt]; + addr = (page_to_pfn(frag->page) << PAGE_SHIFT) + + frag->page_offset; + buffer->element[element].addr = (char *)addr; + buffer->element[element].length = frag->size; + if (cnt < (fragno - 1)) + buffer->element[element].flags = + SBAL_FLAGS_MIDDLE_FRAG; + else + buffer->element[element].flags = + SBAL_FLAGS_LAST_FRAG; + element++; + } + buf->next_element_to_fill = element; +} + +static inline int +qeth_tso_fill_buffer(struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb) +{ + int length, length_here, element; + int hdr_len; + struct qdio_buffer *buffer; + struct qeth_hdr_tso *hdr; + char *data; + + QETH_DBF_TEXT(trace, 3, "tsfilbuf"); + + /*increment user count and queue skb ...*/ + atomic_inc(&skb->users); + skb_queue_tail(&buf->skb_list, skb); + + /*initialize all variables...*/ + buffer = buf->buffer; + hdr = (struct qeth_hdr_tso *)skb->data; + hdr_len = sizeof(struct qeth_hdr_tso) + hdr->ext.dg_hdr_len; + data = skb->data + hdr_len; + length = skb->len - hdr_len; + element = buf->next_element_to_fill; + /*fill first buffer entry only with header information */ + buffer->element[element].addr = skb->data; + buffer->element[element].length = hdr_len; + buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; + buf->next_element_to_fill++; + + if (skb_shinfo(skb)->nr_frags > 0) { + __qeth_tso_fill_buffer_frag(buf, skb); + goto out; + } + + /*start filling buffer entries ...*/ + element++; + while (length > 0) { + /* length_here is the remaining amount of data in this page */ + length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); + if (length < length_here) + length_here = length; + buffer->element[element].addr = data; + buffer->element[element].length = length_here; + length -= length_here; + if (!length) + buffer->element[element].flags = + SBAL_FLAGS_LAST_FRAG; + else + buffer->element[element].flags = + SBAL_FLAGS_MIDDLE_FRAG; + data += length_here; + element++; + } + /*set the buffer to primed ...*/ + buf->next_element_to_fill = element; +out: + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + return 1; +} + +int +qeth_tso_send_packet(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type) +{ + int flush_cnt = 0; + struct qeth_hdr_tso *hdr; + struct qeth_qdio_out_buffer *buffer; + int start_index; + + QETH_DBF_TEXT(trace, 3, "tsosend"); + + if (!(hdr = qeth_tso_prepare_packet(card, skb, ipv, cast_type))) + return -ENOMEM; + /*check if skb fits in one SBAL ...*/ + if (!(qeth_get_elements_no(card, (void*)hdr, skb))) + return -EINVAL; + /*lock queue, force switching to non-packing and send it ...*/ + while (atomic_compare_and_swap(QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED, + &queue->state)); + start_index = queue->next_buf_to_fill; + buffer = &queue->bufs[queue->next_buf_to_fill]; + /*check if card is too busy ...*/ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){ + card->stats.tx_dropped++; + goto out; + } + /*let's force to non-packing and get a new SBAL*/ + flush_cnt += qeth_tso_get_queue_buffer(queue); + buffer = &queue->bufs[queue->next_buf_to_fill]; + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { + card->stats.tx_dropped++; + goto out; + } + flush_cnt += qeth_tso_fill_buffer(buffer, skb); + queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; +out: + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + if (flush_cnt) + qeth_flush_buffers(queue, 0, start_index, flush_cnt); + /*do some statistics */ + card->stats.tx_packets++; + card->stats.tx_bytes += skb->len; + return 0; +} diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h new file mode 100644 index 000000000000..83504dee3f57 --- /dev/null +++ b/drivers/s390/net/qeth_tso.h @@ -0,0 +1,58 @@ +/* + * linux/drivers/s390/net/qeth_tso.h ($Revision: 1.4 $) + * + * Header file for qeth TCP Segmentation Offload support. + * + * Copyright 2004 IBM Corporation + * + * Author(s): Frank Pavlic <pavlic@de.ibm.com> + * + * $Revision: 1.4 $ $Date: 2005/03/24 09:04:18 $ + * + */ +#ifndef __QETH_TSO_H__ +#define __QETH_TSO_H__ + + +extern int +qeth_tso_send_packet(struct qeth_card *, struct sk_buff *, + struct qeth_qdio_out_q *, int , int); + +struct qeth_hdr_ext_tso { + __u16 hdr_tot_len; + __u8 imb_hdr_no; + __u8 reserved; + __u8 hdr_type; + __u8 hdr_version; + __u16 hdr_len; + __u32 payload_len; + __u16 mss; + __u16 dg_hdr_len; + __u8 padding[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_tso { + struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/ + struct qeth_hdr_ext_tso ext; +} __attribute__ ((packed)); + +/*some helper functions*/ + +static inline int +qeth_get_elements_no(struct qeth_card *card, void *hdr, struct sk_buff *skb) +{ + int elements_needed = 0; + + if (skb_shinfo(skb)->nr_frags > 0) + elements_needed = (skb_shinfo(skb)->nr_frags + 1); + if (elements_needed == 0 ) + elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) + + skb->len) >> PAGE_SHIFT); + if (elements_needed > QETH_MAX_BUFFER_ELEMENTS(card)){ + PRINT_ERR("qeth_do_send_packet: invalid size of " + "IP packet. Discarded."); + return 0; + } + return elements_needed; +} +#endif /* __QETH_TSO_H__ */ diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c new file mode 100644 index 000000000000..a3d285859564 --- /dev/null +++ b/drivers/s390/net/smsgiucv.c @@ -0,0 +1,180 @@ +/* + * IUCV special message driver + * + * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <asm/cpcmd.h> +#include <asm/ebcdic.h> + +#include "iucv.h" + +struct smsg_callback { + struct list_head list; + char *prefix; + int len; + void (*callback)(char *str); +}; + +MODULE_AUTHOR + ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); +MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); + +static iucv_handle_t smsg_handle; +static unsigned short smsg_pathid; +static DEFINE_SPINLOCK(smsg_list_lock); +static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list); + +static void +smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data) +{ +} + + +static void +smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) +{ + struct smsg_callback *cb; + unsigned char *msg; + unsigned short len; + int rc; + + len = eib->ln1msg2.ipbfln1f; + msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA); + if (!msg) { + iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls); + return; + } + rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, + msg, len, 0, 0, 0); + if (rc == 0) { + msg[len] = 0; + EBCASC(msg, len); + spin_lock(&smsg_list_lock); + list_for_each_entry(cb, &smsg_list, list) + if (strncmp(msg + 8, cb->prefix, cb->len) == 0) { + cb->callback(msg + 8); + break; + } + spin_unlock(&smsg_list_lock); + } + kfree(msg); +} + +static iucv_interrupt_ops_t smsg_ops = { + .ConnectionComplete = smsg_connection_complete, + .MessagePending = smsg_message_pending, +}; + +static struct device_driver smsg_driver = { + .name = "SMSGIUCV", + .bus = &iucv_bus, +}; + +int +smsg_register_callback(char *prefix, void (*callback)(char *str)) +{ + struct smsg_callback *cb; + + cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL); + if (!cb) + return -ENOMEM; + cb->prefix = prefix; + cb->len = strlen(prefix); + cb->callback = callback; + spin_lock(&smsg_list_lock); + list_add_tail(&cb->list, &smsg_list); + spin_unlock(&smsg_list_lock); + return 0; +} + +void +smsg_unregister_callback(char *prefix, void (*callback)(char *str)) +{ + struct smsg_callback *cb, *tmp; + + spin_lock(&smsg_list_lock); + cb = 0; + list_for_each_entry(tmp, &smsg_list, list) + if (tmp->callback == callback && + strcmp(tmp->prefix, prefix) == 0) { + cb = tmp; + list_del(&cb->list); + break; + } + spin_unlock(&smsg_list_lock); + kfree(cb); +} + +static void __exit +smsg_exit(void) +{ + if (smsg_handle > 0) { + cpcmd("SET SMSG OFF", 0, 0); + iucv_sever(smsg_pathid, 0); + iucv_unregister_program(smsg_handle); + driver_unregister(&smsg_driver); + } + return; +} + +static int __init +smsg_init(void) +{ + static unsigned char pgmmask[24] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + int rc; + + rc = driver_register(&smsg_driver); + if (rc != 0) { + printk(KERN_ERR "SMSGIUCV: failed to register driver.\n"); + return rc; + } + smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ", + pgmmask, &smsg_ops, 0); + if (!smsg_handle) { + printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); + driver_unregister(&smsg_driver); + return -EIO; /* better errno ? */ + } + rc = iucv_connect (&smsg_pathid, 1, 0, "*MSG ", 0, 0, 0, 0, + smsg_handle, 0); + if (rc) { + printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); + iucv_unregister_program(smsg_handle); + driver_unregister(&smsg_driver); + smsg_handle = 0; + return -EIO; + } + cpcmd("SET SMSG IUCV", 0, 0); + return 0; +} + +module_init(smsg_init); +module_exit(smsg_exit); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(smsg_register_callback); +EXPORT_SYMBOL(smsg_unregister_callback); diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h new file mode 100644 index 000000000000..04cd87152964 --- /dev/null +++ b/drivers/s390/net/smsgiucv.h @@ -0,0 +1,10 @@ +/* + * IUCV special message driver + * + * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +int smsg_register_callback(char *, void (*)(char *)); +void smsg_unregister_callback(char *, void (*)(char *)); + |