summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-06-20 15:51:24 +0100
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-06-20 15:51:24 +0100
commitdf5179854bca84ac5be500849b12dd33ce03f03f (patch)
tree78cf16415489e70f34c58f2c7f5c2e63696e9761 /include
parent0f45aa18e65cf3d768082d7d86054a0d2a20bb18 (diff)
parent8b22c249e7de453961e4d253b19fc2a0bdd65d53 (diff)
downloadlinux-df5179854bca84ac5be500849b12dd33ce03f03f.tar.gz
linux-df5179854bca84ac5be500849b12dd33ce03f03f.tar.bz2
linux-df5179854bca84ac5be500849b12dd33ce03f03f.zip
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'include')
-rw-r--r--include/asm-arm/arch-integrator/smp.h19
-rw-r--r--include/asm-arm/smp.h14
-rw-r--r--include/asm-arm/system.h1
-rw-r--r--include/linux/ata.h1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/chio.h168
-rw-r--r--include/linux/dm9000.h36
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/ipv6.h13
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/major.h1
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/rtnetlink.h176
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/tcp.h28
-rw-r--r--include/linux/wireless.h283
-rw-r--r--include/linux/xfrm.h4
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/request_sock.h255
-rw-r--r--include/net/sch_generic.h122
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h160
-rw-r--r--include/net/tcp_ecn.h13
-rw-r--r--include/net/xfrm.h24
-rw-r--r--include/scsi/scsi.h4
-rw-r--r--include/scsi/scsi_device.h4
-rw-r--r--include/scsi/scsi_host.h25
-rw-r--r--include/scsi/scsi_transport.h38
31 files changed, 1292 insertions, 168 deletions
diff --git a/include/asm-arm/arch-integrator/smp.h b/include/asm-arm/arch-integrator/smp.h
new file mode 100644
index 000000000000..0ec7093f7c37
--- /dev/null
+++ b/include/asm-arm/arch-integrator/smp.h
@@ -0,0 +1,19 @@
+#ifndef ASMARM_ARCH_SMP_H
+#define ASMARM_ARCH_SMP_H
+
+#include <linux/config.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/io.h>
+
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__("mrc p15, 0, %0, c0, c0, 5" \
+ : "=r" (cpunum)); \
+ cpunum &= 0x0F; \
+ })
+
+extern void secondary_scan_irqs(void);
+
+#endif
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h
index f21fd8f6bcdd..bd44f894690f 100644
--- a/include/asm-arm/smp.h
+++ b/include/asm-arm/smp.h
@@ -55,4 +55,18 @@ extern void smp_cross_call(cpumask_t callmap);
*/
extern int boot_secondary(unsigned int cpu, struct task_struct *);
+/*
+ * Perform platform specific initialisation of the specified CPU.
+ */
+extern void platform_secondary_init(unsigned int cpu);
+
+/*
+ * Initial data for bringing up a secondary CPU.
+ */
+struct secondary_data {
+ unsigned long pgdir;
+ void *stack;
+};
+extern struct secondary_data secondary_data;
+
#endif /* ifndef __ASM_ARM_SMP_H */
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index b13a8da4847b..8405eb6558ed 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -104,6 +104,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
extern int cpu_architecture(void);
+extern void cpu_init(void);
#define set_cr(x) \
__asm__ __volatile__( \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index f178894edd04..ca5fcadf9981 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -224,6 +224,7 @@ struct ata_taskfile {
};
#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0)
+#define ata_id_is_sata(id) ((id)[93] == 0)
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
#define ata_id_has_flush(id) ((id)[83] & (1 << 12))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ef1afc178c0a..4a99b76c5a33 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -544,7 +544,7 @@ extern void blk_end_sync_rq(struct request *rq);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern void __blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int);
-extern void blk_insert_request(request_queue_t *, struct request *, int, void *, int);
+extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
extern void blk_requeue_request(request_queue_t *, struct request *);
extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
diff --git a/include/linux/chio.h b/include/linux/chio.h
new file mode 100644
index 000000000000..63035ae67e63
--- /dev/null
+++ b/include/linux/chio.h
@@ -0,0 +1,168 @@
+/*
+ * ioctl interface for the scsi media changer driver
+ */
+
+/* changer element types */
+#define CHET_MT 0 /* media transport element (robot) */
+#define CHET_ST 1 /* storage element (media slots) */
+#define CHET_IE 2 /* import/export element */
+#define CHET_DT 3 /* data transfer element (tape/cdrom/whatever) */
+#define CHET_V1 4 /* vendor specific #1 */
+#define CHET_V2 5 /* vendor specific #2 */
+#define CHET_V3 6 /* vendor specific #3 */
+#define CHET_V4 7 /* vendor specific #4 */
+
+
+/*
+ * CHIOGPARAMS
+ * query changer properties
+ *
+ * CHIOVGPARAMS
+ * query vendor-specific element types
+ *
+ * accessing elements works by specifing type and unit of the element.
+ * for eample, storage elements are addressed with type = CHET_ST and
+ * unit = 0 .. cp_nslots-1
+ *
+ */
+struct changer_params {
+ int cp_curpicker; /* current transport element */
+ int cp_npickers; /* number of transport elements (CHET_MT) */
+ int cp_nslots; /* number of storage elements (CHET_ST) */
+ int cp_nportals; /* number of import/export elements (CHET_IE) */
+ int cp_ndrives; /* number of data transfer elements (CHET_DT) */
+};
+struct changer_vendor_params {
+ int cvp_n1; /* number of vendor specific elems (CHET_V1) */
+ char cvp_label1[16];
+ int cvp_n2; /* number of vendor specific elems (CHET_V2) */
+ char cvp_label2[16];
+ int cvp_n3; /* number of vendor specific elems (CHET_V3) */
+ char cvp_label3[16];
+ int cvp_n4; /* number of vendor specific elems (CHET_V4) */
+ char cvp_label4[16];
+ int reserved[8];
+};
+
+
+/*
+ * CHIOMOVE
+ * move a medium from one element to another
+ */
+struct changer_move {
+ int cm_fromtype; /* type/unit of source element */
+ int cm_fromunit;
+ int cm_totype; /* type/unit of destination element */
+ int cm_tounit;
+ int cm_flags;
+};
+#define CM_INVERT 1 /* flag: rotate media (for double-sided like MOD) */
+
+
+/*
+ * CHIOEXCHANGE
+ * move one medium from element #1 to element #2,
+ * and another one from element #2 to element #3.
+ * element #1 and #3 are allowed to be identical.
+ */
+struct changer_exchange {
+ int ce_srctype; /* type/unit of element #1 */
+ int ce_srcunit;
+ int ce_fdsttype; /* type/unit of element #2 */
+ int ce_fdstunit;
+ int ce_sdsttype; /* type/unit of element #3 */
+ int ce_sdstunit;
+ int ce_flags;
+};
+#define CE_INVERT1 1
+#define CE_INVERT2 2
+
+
+/*
+ * CHIOPOSITION
+ * move the transport element (robot arm) to a specific element.
+ */
+struct changer_position {
+ int cp_type;
+ int cp_unit;
+ int cp_flags;
+};
+#define CP_INVERT 1
+
+
+/*
+ * CHIOGSTATUS
+ * get element status for all elements of a specific type
+ */
+struct changer_element_status {
+ int ces_type;
+ unsigned char *ces_data;
+};
+#define CESTATUS_FULL 0x01 /* full */
+#define CESTATUS_IMPEXP 0x02 /* media was imported (inserted by sysop) */
+#define CESTATUS_EXCEPT 0x04 /* error condition */
+#define CESTATUS_ACCESS 0x08 /* access allowed */
+#define CESTATUS_EXENAB 0x10 /* element can export media */
+#define CESTATUS_INENAB 0x20 /* element can import media */
+
+
+/*
+ * CHIOGELEM
+ * get more detailed status informtion for a single element
+ */
+struct changer_get_element {
+ int cge_type; /* type/unit */
+ int cge_unit;
+ int cge_status; /* status */
+ int cge_errno; /* errno */
+ int cge_srctype; /* source element of the last move/exchange */
+ int cge_srcunit;
+ int cge_id; /* scsi id (for data transfer elements) */
+ int cge_lun; /* scsi lun (for data transfer elements) */
+ char cge_pvoltag[36]; /* primary volume tag */
+ char cge_avoltag[36]; /* alternate volume tag */
+ int cge_flags;
+};
+/* flags */
+#define CGE_ERRNO 0x01 /* errno available */
+#define CGE_INVERT 0x02 /* media inverted */
+#define CGE_SRC 0x04 /* media src available */
+#define CGE_IDLUN 0x08 /* ID+LUN available */
+#define CGE_PVOLTAG 0x10 /* primary volume tag available */
+#define CGE_AVOLTAG 0x20 /* alternate volume tag available */
+
+
+/*
+ * CHIOSVOLTAG
+ * set volume tag
+ */
+struct changer_set_voltag {
+ int csv_type; /* type/unit */
+ int csv_unit;
+ char csv_voltag[36]; /* volume tag */
+ int csv_flags;
+};
+#define CSV_PVOLTAG 0x01 /* primary volume tag */
+#define CSV_AVOLTAG 0x02 /* alternate volume tag */
+#define CSV_CLEARTAG 0x04 /* clear volume tag */
+
+/* ioctls */
+#define CHIOMOVE _IOW('c', 1,struct changer_move)
+#define CHIOEXCHANGE _IOW('c', 2,struct changer_exchange)
+#define CHIOPOSITION _IOW('c', 3,struct changer_position)
+#define CHIOGPICKER _IOR('c', 4,int) /* not impl. */
+#define CHIOSPICKER _IOW('c', 5,int) /* not impl. */
+#define CHIOGPARAMS _IOR('c', 6,struct changer_params)
+#define CHIOGSTATUS _IOW('c', 8,struct changer_element_status)
+#define CHIOGELEM _IOW('c',16,struct changer_get_element)
+#define CHIOINITELEM _IO('c',17)
+#define CHIOSVOLTAG _IOW('c',18,struct changer_set_voltag)
+#define CHIOGVPARAMS _IOR('c',19,struct changer_vendor_params)
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
new file mode 100644
index 000000000000..0008e2ad0c9f
--- /dev/null
+++ b/include/linux/dm9000.h
@@ -0,0 +1,36 @@
+/* include/linux/dm9000.h
+ *
+ * Copyright (c) 2004 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Header file for dm9000 platform data
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#ifndef __DM9000_PLATFORM_DATA
+#define __DM9000_PLATFORM_DATA __FILE__
+
+/* IO control flags */
+
+#define DM9000_PLATF_8BITONLY (0x0001)
+#define DM9000_PLATF_16BITONLY (0x0002)
+#define DM9000_PLATF_32BITONLY (0x0004)
+
+/* platfrom data for platfrom device structure's platfrom_data field */
+
+struct dm9000_plat_data {
+ unsigned int flags;
+
+ /* allow replacement IO routines */
+
+ void (*inblk)(void __iomem *reg, void *data, int len);
+ void (*outblk)(void __iomem *reg, void *data, int len);
+ void (*dumpblk)(void __iomem *reg, int len);
+};
+
+#endif /* __DM9000_PLATFORM_DATA */
+
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 806c305332c1..2d80cc761a15 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -14,7 +14,12 @@ enum dma_data_direction {
};
#define DMA_64BIT_MASK 0xffffffffffffffffULL
+#define DMA_40BIT_MASK 0x000000ffffffffffULL
+#define DMA_39BIT_MASK 0x0000007fffffffffULL
#define DMA_32BIT_MASK 0x00000000ffffffffULL
+#define DMA_31BIT_MASK 0x000000007fffffffULL
+#define DMA_30BIT_MASK 0x000000003fffffffULL
+#define DMA_29BIT_MASK 0x000000001fffffffULL
#include <asm/dma-mapping.h>
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index 503194e62fe1..ed2927ef1ff7 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -1,7 +1,7 @@
/*
* Generic HDLC support routines for Linux
*
- * Copyright (C) 1999-2003 Krzysztof Halasa <khc@pm.waw.pl>
+ * Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -41,6 +41,7 @@
#define LMI_NONE 1 /* No LMI, all PVCs are static */
#define LMI_ANSI 2 /* ANSI Annex D */
#define LMI_CCITT 3 /* ITU-T Annex A */
+#define LMI_CISCO 4 /* The "original" LMI, aka Gang of Four */
#define HDLC_MAX_MTU 1500 /* Ethernet 1500 bytes */
#define HDLC_MAX_MRU (HDLC_MAX_MTU + 10 + 14 + 4) /* for ETH+VLAN over FR */
@@ -89,6 +90,7 @@ typedef struct pvc_device_struct {
unsigned int deleted: 1;
unsigned int fecn: 1;
unsigned int becn: 1;
+ unsigned int bandwidth; /* Cisco LMI reporting only */
}state;
}pvc_device;
diff --git a/include/linux/if.h b/include/linux/if.h
index d73a9d62f208..ce627d9092ef 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -33,7 +33,7 @@
#define IFF_LOOPBACK 0x8 /* is a loopback net */
#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
-#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_RUNNING 0x40 /* interface running and carrier ok */
#define IFF_NOARP 0x80 /* no ARP protocol */
#define IFF_PROMISC 0x100 /* receive all packets */
#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 8438c68591f9..31e7cedd9f84 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -81,6 +81,7 @@
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/types.h>
+#include <net/request_sock.h>
#include <net/sock.h>
#include <linux/igmp.h>
#include <net/flow.h>
@@ -107,6 +108,26 @@ struct ip_options {
#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
+struct inet_request_sock {
+ struct request_sock req;
+ u32 loc_addr;
+ u32 rmt_addr;
+ u16 rmt_port;
+ u16 snd_wscale : 4,
+ rcv_wscale : 4,
+ tstamp_ok : 1,
+ sack_ok : 1,
+ wscale_ok : 1,
+ ecn_ok : 1,
+ acked : 1;
+ struct ip_options *opt;
+};
+
+static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
+{
+ return (struct inet_request_sock *)sk;
+}
+
struct ipv6_pinfo;
struct inet_sock {
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ab0d0efbf240..6fcd6a0ade24 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -193,6 +193,19 @@ struct inet6_skb_parm {
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
+struct tcp6_request_sock {
+ struct tcp_request_sock req;
+ struct in6_addr loc_addr;
+ struct in6_addr rmt_addr;
+ struct sk_buff *pktopts;
+ int iif;
+};
+
+static inline struct tcp6_request_sock *tcp6_rsk(const struct request_sock *sk)
+{
+ return (struct tcp6_request_sock *)sk;
+}
+
/**
* struct ipv6_pinfo - ipv6 private area
*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b009f801e7c5..6cd9ba63563b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -421,6 +421,7 @@ extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
extern unsigned int ata_dev_classify(struct ata_taskfile *tf);
extern void ata_dev_id_string(u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
+extern void ata_dev_config(struct ata_port *ap, unsigned int i);
extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
extern void ata_bmdma_start (struct ata_queued_cmd *qc);
extern void ata_bmdma_stop(struct ata_port *ap);
diff --git a/include/linux/major.h b/include/linux/major.h
index 4b62c42b842c..e36a46702d94 100644
--- a/include/linux/major.h
+++ b/include/linux/major.h
@@ -100,6 +100,7 @@
#define I2O_MAJOR 80 /* 80->87 */
#define SHMIQ_MAJOR 85 /* Linux/mips, SGI /dev/shmiq */
+#define SCSI_CHANGER_MAJOR 86
#define IDE6_MAJOR 88
#define IDE7_MAJOR 89
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index b2738ac8bc99..e38407a23d04 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -156,7 +156,7 @@ struct netlink_notify
};
static __inline__ struct nlmsghdr *
-__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
+__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
{
struct nlmsghdr *nlh;
int size = NLMSG_LENGTH(len);
@@ -164,15 +164,31 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
- nlh->nlmsg_flags = 0;
+ nlh->nlmsg_flags = flags;
nlh->nlmsg_pid = pid;
nlh->nlmsg_seq = seq;
return nlh;
}
+#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
+({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) \
+ goto nlmsg_failure; \
+ __nlmsg_put(skb, pid, seq, type, len, flags); })
+
#define NLMSG_PUT(skb, pid, seq, type, len) \
-({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) goto nlmsg_failure; \
- __nlmsg_put(skb, pid, seq, type, len); })
+ NLMSG_NEW(skb, pid, seq, type, len, 0)
+
+#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \
+ NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \
+ (cb)->nlh->nlmsg_seq, type, len, flags)
+
+#define NLMSG_END(skb, nlh) \
+({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \
+ (skb)->len; })
+
+#define NLMSG_CANCEL(skb, nlh) \
+({ skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \
+ -1; })
extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct nlmsghdr *nlh,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 91ac97c20777..e68dbf0bf579 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -89,6 +89,13 @@ enum {
RTM_GETANYCAST = 62,
#define RTM_GETANYCAST RTM_GETANYCAST
+ RTM_NEWNEIGHTBL = 64,
+#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL
+ RTM_GETNEIGHTBL = 66,
+#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL
+ RTM_SETNEIGHTBL,
+#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -493,6 +500,106 @@ struct nda_cacheinfo
__u32 ndm_refcnt;
};
+
+/*****************************************************************
+ * Neighbour tables specific messages.
+ *
+ * To retrieve the neighbour tables send RTM_GETNEIGHTBL with the
+ * NLM_F_DUMP flag set. Every neighbour table configuration is
+ * spread over multiple messages to avoid running into message
+ * size limits on systems with many interfaces. The first message
+ * in the sequence transports all not device specific data such as
+ * statistics, configuration, and the default parameter set.
+ * This message is followed by 0..n messages carrying device
+ * specific parameter sets.
+ * Although the ordering should be sufficient, NDTA_NAME can be
+ * used to identify sequences. The initial message can be identified
+ * by checking for NDTA_CONFIG. The device specific messages do
+ * not contain this TLV but have NDTPA_IFINDEX set to the
+ * corresponding interface index.
+ *
+ * To change neighbour table attributes, send RTM_SETNEIGHTBL
+ * with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3],
+ * NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked
+ * otherwise. Device specific parameter sets can be changed by
+ * setting NDTPA_IFINDEX to the interface index of the corresponding
+ * device.
+ ****/
+
+struct ndt_stats
+{
+ __u64 ndts_allocs;
+ __u64 ndts_destroys;
+ __u64 ndts_hash_grows;
+ __u64 ndts_res_failed;
+ __u64 ndts_lookups;
+ __u64 ndts_hits;
+ __u64 ndts_rcv_probes_mcast;
+ __u64 ndts_rcv_probes_ucast;
+ __u64 ndts_periodic_gc_runs;
+ __u64 ndts_forced_gc_runs;
+};
+
+enum {
+ NDTPA_UNSPEC,
+ NDTPA_IFINDEX, /* u32, unchangeable */
+ NDTPA_REFCNT, /* u32, read-only */
+ NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */
+ NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */
+ NDTPA_RETRANS_TIME, /* u64, msecs */
+ NDTPA_GC_STALETIME, /* u64, msecs */
+ NDTPA_DELAY_PROBE_TIME, /* u64, msecs */
+ NDTPA_QUEUE_LEN, /* u32 */
+ NDTPA_APP_PROBES, /* u32 */
+ NDTPA_UCAST_PROBES, /* u32 */
+ NDTPA_MCAST_PROBES, /* u32 */
+ NDTPA_ANYCAST_DELAY, /* u64, msecs */
+ NDTPA_PROXY_DELAY, /* u64, msecs */
+ NDTPA_PROXY_QLEN, /* u32 */
+ NDTPA_LOCKTIME, /* u64, msecs */
+ __NDTPA_MAX
+};
+#define NDTPA_MAX (__NDTPA_MAX - 1)
+
+struct ndtmsg
+{
+ __u8 ndtm_family;
+ __u8 ndtm_pad1;
+ __u16 ndtm_pad2;
+};
+
+struct ndt_config
+{
+ __u16 ndtc_key_len;
+ __u16 ndtc_entry_size;
+ __u32 ndtc_entries;
+ __u32 ndtc_last_flush; /* delta to now in msecs */
+ __u32 ndtc_last_rand; /* delta to now in msecs */
+ __u32 ndtc_hash_rnd;
+ __u32 ndtc_hash_mask;
+ __u32 ndtc_hash_chain_gc;
+ __u32 ndtc_proxy_qlen;
+};
+
+enum {
+ NDTA_UNSPEC,
+ NDTA_NAME, /* char *, unchangeable */
+ NDTA_THRESH1, /* u32 */
+ NDTA_THRESH2, /* u32 */
+ NDTA_THRESH3, /* u32 */
+ NDTA_CONFIG, /* struct ndt_config, read-only */
+ NDTA_PARMS, /* nested TLV NDTPA_* */
+ NDTA_STATS, /* struct ndt_stats, read-only */
+ NDTA_GC_INTERVAL, /* u64, msecs */
+ __NDTA_MAX
+};
+#define NDTA_MAX (__NDTA_MAX - 1)
+
+#define NDTA_RTA(r) ((struct rtattr*)(((char*)(r)) + \
+ NLMSG_ALIGN(sizeof(struct ndtmsg))))
+#define NDTA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndtmsg))
+
+
/****
* General form of address family dependent message.
****/
@@ -789,6 +896,75 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
goto rtattr_failure; \
memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); })
+
+#define RTA_PUT_U8(skb, attrtype, value) \
+({ u8 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
+
+#define RTA_PUT_U16(skb, attrtype, value) \
+({ u16 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
+
+#define RTA_PUT_U32(skb, attrtype, value) \
+({ u32 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
+
+#define RTA_PUT_U64(skb, attrtype, value) \
+({ u64 _tmp = (value); \
+ RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
+
+#define RTA_PUT_SECS(skb, attrtype, value) \
+ RTA_PUT_U64(skb, attrtype, (value) / HZ)
+
+#define RTA_PUT_MSECS(skb, attrtype, value) \
+ RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
+
+#define RTA_PUT_STRING(skb, attrtype, value) \
+ RTA_PUT(skb, attrtype, strlen(value) + 1, value)
+
+#define RTA_PUT_FLAG(skb, attrtype) \
+ RTA_PUT(skb, attrtype, 0, NULL);
+
+#define RTA_NEST(skb, type) \
+({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \
+ RTA_PUT(skb, type, 0, NULL); \
+ __start; })
+
+#define RTA_NEST_END(skb, start) \
+({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \
+ (skb)->len; })
+
+#define RTA_NEST_CANCEL(skb, start) \
+({ if (start) \
+ skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
+ -1; })
+
+#define RTA_GET_U8(rta) \
+({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
+ goto rtattr_failure; \
+ *(u8 *) RTA_DATA(rta); })
+
+#define RTA_GET_U16(rta) \
+({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
+ goto rtattr_failure; \
+ *(u16 *) RTA_DATA(rta); })
+
+#define RTA_GET_U32(rta) \
+({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
+ goto rtattr_failure; \
+ *(u32 *) RTA_DATA(rta); })
+
+#define RTA_GET_U64(rta) \
+({ u64 _tmp; \
+ if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
+ goto rtattr_failure; \
+ memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
+ _tmp; })
+
+#define RTA_GET_FLAG(rta) (!!(rta))
+
+#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
+#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
static inline struct rtattr *
__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7d66385ae750..76cf7e60216c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,7 @@ extern int kmem_cache_shrink(kmem_cache_t *);
extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
+extern const char *kmem_cache_name(kmem_cache_t *);
extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);
/* Size description struct for general caches. */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 14a55e3e3a50..97a7c9e03df5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -230,6 +230,17 @@ struct tcp_options_received {
__u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
};
+struct tcp_request_sock {
+ struct inet_request_sock req;
+ __u32 rcv_isn;
+ __u32 snt_isn;
+};
+
+static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
+{
+ return (struct tcp_request_sock *)req;
+}
+
struct tcp_sock {
/* inet_sock has to be the first member of tcp_sock */
struct inet_sock inet;
@@ -368,22 +379,7 @@ struct tcp_sock {
__u32 total_retrans; /* Total retransmits for entire connection */
- /* The syn_wait_lock is necessary only to avoid proc interface having
- * to grab the main lock sock while browsing the listening hash
- * (otherwise it's deadlock prone).
- * This lock is acquired in read mode only from listening_get_next()
- * and it's acquired in write mode _only_ from code that is actively
- * changing the syn_wait_queue. All readers that are holding
- * the master sock lock don't need to grab this lock in read mode
- * too as the syn_wait_queue writes are always protected from
- * the main sock lock.
- */
- rwlock_t syn_wait_lock;
- struct tcp_listen_opt *listen_opt;
-
- /* FIFO of established children */
- struct open_request *accept_queue;
- struct open_request *accept_queue_tail;
+ struct request_sock_queue accept_queue; /* FIFO of established children */
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 2f51f2b6562e..ae485f9c916e 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -1,10 +1,10 @@
/*
* This file define a set of standard wireless extensions
*
- * Version : 17 21.6.04
+ * Version : 18 12.3.05
*
* Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
- * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved.
+ * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved.
*/
#ifndef _LINUX_WIRELESS_H
@@ -82,7 +82,7 @@
* (there is some stuff that will be added in the future...)
* I just plan to increment with each new version.
*/
-#define WIRELESS_EXT 17
+#define WIRELESS_EXT 18
/*
* Changes :
@@ -182,6 +182,21 @@
* - Document (struct iw_quality *)->updated, add new flags (INVALID)
* - Wireless Event capability in struct iw_range
* - Add support for relative TxPower (yick !)
+ *
+ * V17 to V18 (From Jouni Malinen <jkmaline@cc.hut.fi>)
+ * ----------
+ * - Add support for WPA/WPA2
+ * - Add extended encoding configuration (SIOCSIWENCODEEXT and
+ * SIOCGIWENCODEEXT)
+ * - Add SIOCSIWGENIE/SIOCGIWGENIE
+ * - Add SIOCSIWMLME
+ * - Add SIOCSIWPMKSA
+ * - Add struct iw_range bit field for supported encoding capabilities
+ * - Add optional scan request parameters for SIOCSIWSCAN
+ * - Add SIOCSIWAUTH/SIOCGIWAUTH for setting authentication and WPA
+ * related parameters (extensible up to 4096 parameter values)
+ * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE,
+ * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND
*/
/**************************** CONSTANTS ****************************/
@@ -256,6 +271,30 @@
#define SIOCSIWPOWER 0x8B2C /* set Power Management settings */
#define SIOCGIWPOWER 0x8B2D /* get Power Management settings */
+/* WPA : Generic IEEE 802.11 informatiom element (e.g., for WPA/RSN/WMM).
+ * This ioctl uses struct iw_point and data buffer that includes IE id and len
+ * fields. More than one IE may be included in the request. Setting the generic
+ * IE to empty buffer (len=0) removes the generic IE from the driver. Drivers
+ * are allowed to generate their own WPA/RSN IEs, but in these cases, drivers
+ * are required to report the used IE as a wireless event, e.g., when
+ * associating with an AP. */
+#define SIOCSIWGENIE 0x8B30 /* set generic IE */
+#define SIOCGIWGENIE 0x8B31 /* get generic IE */
+
+/* WPA : IEEE 802.11 MLME requests */
+#define SIOCSIWMLME 0x8B16 /* request MLME operation; uses
+ * struct iw_mlme */
+/* WPA : Authentication mode parameters */
+#define SIOCSIWAUTH 0x8B32 /* set authentication mode params */
+#define SIOCGIWAUTH 0x8B33 /* get authentication mode params */
+
+/* WPA : Extended version of encoding configuration */
+#define SIOCSIWENCODEEXT 0x8B34 /* set encoding token & mode */
+#define SIOCGIWENCODEEXT 0x8B35 /* get encoding token & mode */
+
+/* WPA2 : PMKSA cache management */
+#define SIOCSIWPMKSA 0x8B36 /* PMKSA cache operation */
+
/* -------------------- DEV PRIVATE IOCTL LIST -------------------- */
/* These 32 ioctl are wireless device private, for 16 commands.
@@ -297,6 +336,34 @@
#define IWEVCUSTOM 0x8C02 /* Driver specific ascii string */
#define IWEVREGISTERED 0x8C03 /* Discovered a new node (AP mode) */
#define IWEVEXPIRED 0x8C04 /* Expired a node (AP mode) */
+#define IWEVGENIE 0x8C05 /* Generic IE (WPA, RSN, WMM, ..)
+ * (scan results); This includes id and
+ * length fields. One IWEVGENIE may
+ * contain more than one IE. Scan
+ * results may contain one or more
+ * IWEVGENIE events. */
+#define IWEVMICHAELMICFAILURE 0x8C06 /* Michael MIC failure
+ * (struct iw_michaelmicfailure)
+ */
+#define IWEVASSOCREQIE 0x8C07 /* IEs used in (Re)Association Request.
+ * The data includes id and length
+ * fields and may contain more than one
+ * IE. This event is required in
+ * Managed mode if the driver
+ * generates its own WPA/RSN IE. This
+ * should be sent just before
+ * IWEVREGISTERED event for the
+ * association. */
+#define IWEVASSOCRESPIE 0x8C08 /* IEs used in (Re)Association
+ * Response. The data includes id and
+ * length fields and may contain more
+ * than one IE. This may be sent
+ * between IWEVASSOCREQIE and
+ * IWEVREGISTERED events for the
+ * association. */
+#define IWEVPMKIDCAND 0x8C09 /* PMKID candidate for RSN
+ * pre-authentication
+ * (struct iw_pmkid_cand) */
#define IWEVFIRST 0x8C00
@@ -432,12 +499,94 @@
#define IW_SCAN_THIS_MODE 0x0020 /* Scan only this Mode */
#define IW_SCAN_ALL_RATE 0x0040 /* Scan all Bit-Rates */
#define IW_SCAN_THIS_RATE 0x0080 /* Scan only this Bit-Rate */
+/* struct iw_scan_req scan_type */
+#define IW_SCAN_TYPE_ACTIVE 0
+#define IW_SCAN_TYPE_PASSIVE 1
/* Maximum size of returned data */
#define IW_SCAN_MAX_DATA 4096 /* In bytes */
/* Max number of char in custom event - use multiple of them if needed */
#define IW_CUSTOM_MAX 256 /* In bytes */
+/* Generic information element */
+#define IW_GENERIC_IE_MAX 1024
+
+/* MLME requests (SIOCSIWMLME / struct iw_mlme) */
+#define IW_MLME_DEAUTH 0
+#define IW_MLME_DISASSOC 1
+
+/* SIOCSIWAUTH/SIOCGIWAUTH struct iw_param flags */
+#define IW_AUTH_INDEX 0x0FFF
+#define IW_AUTH_FLAGS 0xF000
+/* SIOCSIWAUTH/SIOCGIWAUTH parameters (0 .. 4095)
+ * (IW_AUTH_INDEX mask in struct iw_param flags; this is the index of the
+ * parameter that is being set/get to; value will be read/written to
+ * struct iw_param value field) */
+#define IW_AUTH_WPA_VERSION 0
+#define IW_AUTH_CIPHER_PAIRWISE 1
+#define IW_AUTH_CIPHER_GROUP 2
+#define IW_AUTH_KEY_MGMT 3
+#define IW_AUTH_TKIP_COUNTERMEASURES 4
+#define IW_AUTH_DROP_UNENCRYPTED 5
+#define IW_AUTH_80211_AUTH_ALG 6
+#define IW_AUTH_WPA_ENABLED 7
+#define IW_AUTH_RX_UNENCRYPTED_EAPOL 8
+#define IW_AUTH_ROAMING_CONTROL 9
+#define IW_AUTH_PRIVACY_INVOKED 10
+
+/* IW_AUTH_WPA_VERSION values (bit field) */
+#define IW_AUTH_WPA_VERSION_DISABLED 0x00000001
+#define IW_AUTH_WPA_VERSION_WPA 0x00000002
+#define IW_AUTH_WPA_VERSION_WPA2 0x00000004
+
+/* IW_AUTH_PAIRWISE_CIPHER and IW_AUTH_GROUP_CIPHER values (bit field) */
+#define IW_AUTH_CIPHER_NONE 0x00000001
+#define IW_AUTH_CIPHER_WEP40 0x00000002
+#define IW_AUTH_CIPHER_TKIP 0x00000004
+#define IW_AUTH_CIPHER_CCMP 0x00000008
+#define IW_AUTH_CIPHER_WEP104 0x00000010
+
+/* IW_AUTH_KEY_MGMT values (bit field) */
+#define IW_AUTH_KEY_MGMT_802_1X 1
+#define IW_AUTH_KEY_MGMT_PSK 2
+
+/* IW_AUTH_80211_AUTH_ALG values (bit field) */
+#define IW_AUTH_ALG_OPEN_SYSTEM 0x00000001
+#define IW_AUTH_ALG_SHARED_KEY 0x00000002
+#define IW_AUTH_ALG_LEAP 0x00000004
+
+/* IW_AUTH_ROAMING_CONTROL values */
+#define IW_AUTH_ROAMING_ENABLE 0 /* driver/firmware based roaming */
+#define IW_AUTH_ROAMING_DISABLE 1 /* user space program used for roaming
+ * control */
+
+/* SIOCSIWENCODEEXT definitions */
+#define IW_ENCODE_SEQ_MAX_SIZE 8
+/* struct iw_encode_ext ->alg */
+#define IW_ENCODE_ALG_NONE 0
+#define IW_ENCODE_ALG_WEP 1
+#define IW_ENCODE_ALG_TKIP 2
+#define IW_ENCODE_ALG_CCMP 3
+/* struct iw_encode_ext ->ext_flags */
+#define IW_ENCODE_EXT_TX_SEQ_VALID 0x00000001
+#define IW_ENCODE_EXT_RX_SEQ_VALID 0x00000002
+#define IW_ENCODE_EXT_GROUP_KEY 0x00000004
+#define IW_ENCODE_EXT_SET_TX_KEY 0x00000008
+
+/* IWEVMICHAELMICFAILURE : struct iw_michaelmicfailure ->flags */
+#define IW_MICFAILURE_KEY_ID 0x00000003 /* Key ID 0..3 */
+#define IW_MICFAILURE_GROUP 0x00000004
+#define IW_MICFAILURE_PAIRWISE 0x00000008
+#define IW_MICFAILURE_STAKEY 0x00000010
+#define IW_MICFAILURE_COUNT 0x00000060 /* 1 or 2 (0 = count not supported)
+ */
+
+/* Bit field values for enc_capa in struct iw_range */
+#define IW_ENC_CAPA_WPA 0x00000001
+#define IW_ENC_CAPA_WPA2 0x00000002
+#define IW_ENC_CAPA_CIPHER_TKIP 0x00000004
+#define IW_ENC_CAPA_CIPHER_CCMP 0x00000008
+
/* Event capability macros - in (struct iw_range *)->event_capa
* Because we have more than 32 possible events, we use an array of
* 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */
@@ -546,6 +695,132 @@ struct iw_thrspy
struct iw_quality high; /* High threshold */
};
+/*
+ * Optional data for scan request
+ *
+ * Note: these optional parameters are controlling parameters for the
+ * scanning behavior, these do not apply to getting scan results
+ * (SIOCGIWSCAN). Drivers are expected to keep a local BSS table and
+ * provide a merged results with all BSSes even if the previous scan
+ * request limited scanning to a subset, e.g., by specifying an SSID.
+ * Especially, scan results are required to include an entry for the
+ * current BSS if the driver is in Managed mode and associated with an AP.
+ */
+struct iw_scan_req
+{
+ __u8 scan_type; /* IW_SCAN_TYPE_{ACTIVE,PASSIVE} */
+ __u8 essid_len;
+ __u8 num_channels; /* num entries in channel_list;
+ * 0 = scan all allowed channels */
+ __u8 flags; /* reserved as padding; use zero, this may
+ * be used in the future for adding flags
+ * to request different scan behavior */
+ struct sockaddr bssid; /* ff:ff:ff:ff:ff:ff for broadcast BSSID or
+ * individual address of a specific BSS */
+
+ /*
+ * Use this ESSID if IW_SCAN_THIS_ESSID flag is used instead of using
+ * the current ESSID. This allows scan requests for specific ESSID
+ * without having to change the current ESSID and potentially breaking
+ * the current association.
+ */
+ __u8 essid[IW_ESSID_MAX_SIZE];
+
+ /*
+ * Optional parameters for changing the default scanning behavior.
+ * These are based on the MLME-SCAN.request from IEEE Std 802.11.
+ * TU is 1.024 ms. If these are set to 0, driver is expected to use
+ * reasonable default values. min_channel_time defines the time that
+ * will be used to wait for the first reply on each channel. If no
+ * replies are received, next channel will be scanned after this. If
+ * replies are received, total time waited on the channel is defined by
+ * max_channel_time.
+ */
+ __u32 min_channel_time; /* in TU */
+ __u32 max_channel_time; /* in TU */
+
+ struct iw_freq channel_list[IW_MAX_FREQUENCIES];
+};
+
+/* ------------------------- WPA SUPPORT ------------------------- */
+
+/*
+ * Extended data structure for get/set encoding (this is used with
+ * SIOCSIWENCODEEXT/SIOCGIWENCODEEXT. struct iw_point and IW_ENCODE_*
+ * flags are used in the same way as with SIOCSIWENCODE/SIOCGIWENCODE and
+ * only the data contents changes (key data -> this structure, including
+ * key data).
+ *
+ * If the new key is the first group key, it will be set as the default
+ * TX key. Otherwise, default TX key index is only changed if
+ * IW_ENCODE_EXT_SET_TX_KEY flag is set.
+ *
+ * Key will be changed with SIOCSIWENCODEEXT in all cases except for
+ * special "change TX key index" operation which is indicated by setting
+ * key_len = 0 and ext_flags |= IW_ENCODE_EXT_SET_TX_KEY.
+ *
+ * tx_seq/rx_seq are only used when respective
+ * IW_ENCODE_EXT_{TX,RX}_SEQ_VALID flag is set in ext_flags. Normal
+ * TKIP/CCMP operation is to set RX seq with SIOCSIWENCODEEXT and start
+ * TX seq from zero whenever key is changed. SIOCGIWENCODEEXT is normally
+ * used only by an Authenticator (AP or an IBSS station) to get the
+ * current TX sequence number. Using TX_SEQ_VALID for SIOCSIWENCODEEXT and
+ * RX_SEQ_VALID for SIOCGIWENCODEEXT are optional, but can be useful for
+ * debugging/testing.
+ */
+struct iw_encode_ext
+{
+ __u32 ext_flags; /* IW_ENCODE_EXT_* */
+ __u8 tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ __u8 rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+ struct sockaddr addr; /* ff:ff:ff:ff:ff:ff for broadcast/multicast
+ * (group) keys or unicast address for
+ * individual keys */
+ __u16 alg; /* IW_ENCODE_ALG_* */
+ __u16 key_len;
+ __u8 key[0];
+};
+
+/* SIOCSIWMLME data */
+struct iw_mlme
+{
+ __u16 cmd; /* IW_MLME_* */
+ __u16 reason_code;
+ struct sockaddr addr;
+};
+
+/* SIOCSIWPMKSA data */
+#define IW_PMKSA_ADD 1
+#define IW_PMKSA_REMOVE 2
+#define IW_PMKSA_FLUSH 3
+
+#define IW_PMKID_LEN 16
+
+struct iw_pmksa
+{
+ __u32 cmd; /* IW_PMKSA_* */
+ struct sockaddr bssid;
+ __u8 pmkid[IW_PMKID_LEN];
+};
+
+/* IWEVMICHAELMICFAILURE data */
+struct iw_michaelmicfailure
+{
+ __u32 flags;
+ struct sockaddr src_addr;
+ __u8 tsc[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+};
+
+/* IWEVPMKIDCAND data */
+#define IW_PMKID_CAND_PREAUTH 0x00000001 /* RNS pre-authentication enabled */
+struct iw_pmkid_cand
+{
+ __u32 flags; /* IW_PMKID_CAND_* */
+ __u32 index; /* the smaller the index, the higher the
+ * priority */
+ struct sockaddr bssid;
+};
+
/* ------------------------ WIRELESS STATS ------------------------ */
/*
* Wireless statistics (used for /proc/net/wireless)
@@ -725,6 +1000,8 @@ struct iw_range
struct iw_freq freq[IW_MAX_FREQUENCIES]; /* list */
/* Note : this frequency list doesn't need to fit channel numbers,
* because each entry contain its channel index */
+
+ __u32 enc_capa; /* IW_ENC_CAPA_* bit field */
};
/*
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index fd2ef742a9fd..d68391a9b9f3 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -174,6 +174,8 @@ enum xfrm_attr_type_t {
XFRMA_ALG_COMP, /* struct xfrm_algo */
XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */
XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */
+ XFRMA_SA,
+ XFRMA_POLICY,
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -257,5 +259,7 @@ struct xfrm_usersa_flush {
#define XFRMGRP_ACQUIRE 1
#define XFRMGRP_EXPIRE 2
+#define XFRMGRP_SA 4
+#define XFRMGRP_POLICY 8
#endif /* _LINUX_XFRM_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 4f33bbc21e7f..89809891e5ab 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -65,11 +65,10 @@ struct neighbour;
struct neigh_parms
{
+ struct net_device *dev;
struct neigh_parms *next;
int (*neigh_setup)(struct neighbour *);
struct neigh_table *tbl;
- int entries;
- void *priv;
void *sysctl_table;
@@ -192,7 +191,6 @@ struct neigh_table
atomic_t entries;
rwlock_t lock;
unsigned long last_rand;
- struct neigh_parms *parms_list;
kmem_cache_t *kmem_cachep;
struct neigh_statistics *stats;
struct neighbour **hash_buckets;
@@ -252,6 +250,9 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
extern void neigh_app_ns(struct neighbour *n);
+extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
+extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
+
extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
new file mode 100644
index 000000000000..72fd6f5e86b1
--- /dev/null
+++ b/include/net/request_sock.h
@@ -0,0 +1,255 @@
+/*
+ * NET Generic infrastructure for Network protocols.
+ *
+ * Definitions for request_sock
+ *
+ * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *
+ * From code originally in include/net/tcp.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _REQUEST_SOCK_H
+#define _REQUEST_SOCK_H
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <net/sock.h>
+
+struct request_sock;
+struct sk_buff;
+struct dst_entry;
+struct proto;
+
+struct request_sock_ops {
+ int family;
+ kmem_cache_t *slab;
+ int obj_size;
+ int (*rtx_syn_ack)(struct sock *sk,
+ struct request_sock *req,
+ struct dst_entry *dst);
+ void (*send_ack)(struct sk_buff *skb,
+ struct request_sock *req);
+ void (*send_reset)(struct sk_buff *skb);
+ void (*destructor)(struct request_sock *req);
+};
+
+/* struct request_sock - mini sock to represent a connection request
+ */
+struct request_sock {
+ struct request_sock *dl_next; /* Must be first member! */
+ u16 mss;
+ u8 retrans;
+ u8 __pad;
+ /* The following two fields can be easily recomputed I think -AK */
+ u32 window_clamp; /* window clamp at creation time */
+ u32 rcv_wnd; /* rcv_wnd offered first time */
+ u32 ts_recent;
+ unsigned long expires;
+ struct request_sock_ops *rsk_ops;
+ struct sock *sk;
+};
+
+static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
+{
+ struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
+
+ if (req != NULL)
+ req->rsk_ops = ops;
+
+ return req;
+}
+
+static inline void __reqsk_free(struct request_sock *req)
+{
+ kmem_cache_free(req->rsk_ops->slab, req);
+}
+
+static inline void reqsk_free(struct request_sock *req)
+{
+ req->rsk_ops->destructor(req);
+ __reqsk_free(req);
+}
+
+extern int sysctl_max_syn_backlog;
+
+/** struct listen_sock - listen state
+ *
+ * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
+ */
+struct listen_sock {
+ u8 max_qlen_log;
+ /* 3 bytes hole, try to use */
+ int qlen;
+ int qlen_young;
+ int clock_hand;
+ u32 hash_rnd;
+ struct request_sock *syn_table[0];
+};
+
+/** struct request_sock_queue - queue of request_socks
+ *
+ * @rskq_accept_head - FIFO head of established children
+ * @rskq_accept_tail - FIFO tail of established children
+ * @syn_wait_lock - serializer
+ *
+ * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
+ * lock sock while browsing the listening hash (otherwise it's deadlock prone).
+ *
+ * This lock is acquired in read mode only from listening_get_next() seq_file
+ * op and it's acquired in write mode _only_ from code that is actively
+ * changing rskq_accept_head. All readers that are holding the master sock lock
+ * don't need to grab this lock in read mode too as rskq_accept_head. writes
+ * are always protected from the main sock lock.
+ */
+struct request_sock_queue {
+ struct request_sock *rskq_accept_head;
+ struct request_sock *rskq_accept_tail;
+ rwlock_t syn_wait_lock;
+ struct listen_sock *listen_opt;
+};
+
+extern int reqsk_queue_alloc(struct request_sock_queue *queue,
+ const int nr_table_entries);
+
+static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt;
+
+ write_lock_bh(&queue->syn_wait_lock);
+ lopt = queue->listen_opt;
+ queue->listen_opt = NULL;
+ write_unlock_bh(&queue->syn_wait_lock);
+
+ return lopt;
+}
+
+static inline void reqsk_queue_destroy(struct request_sock_queue *queue)
+{
+ kfree(reqsk_queue_yank_listen_sk(queue));
+}
+
+static inline struct request_sock *
+ reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
+{
+ struct request_sock *req = queue->rskq_accept_head;
+
+ queue->rskq_accept_head = queue->rskq_accept_head = NULL;
+ return req;
+}
+
+static inline int reqsk_queue_empty(struct request_sock_queue *queue)
+{
+ return queue->rskq_accept_head == NULL;
+}
+
+static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
+ struct request_sock *req,
+ struct request_sock **prev_req)
+{
+ write_lock(&queue->syn_wait_lock);
+ *prev_req = req->dl_next;
+ write_unlock(&queue->syn_wait_lock);
+}
+
+static inline void reqsk_queue_add(struct request_sock_queue *queue,
+ struct request_sock *req,
+ struct sock *parent,
+ struct sock *child)
+{
+ req->sk = child;
+ sk_acceptq_added(parent);
+
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_head = req;
+ else
+ queue->rskq_accept_tail->dl_next = req;
+
+ queue->rskq_accept_tail = req;
+ req->dl_next = NULL;
+}
+
+static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
+{
+ struct request_sock *req = queue->rskq_accept_head;
+
+ BUG_TRAP(req != NULL);
+
+ queue->rskq_accept_head = req->dl_next;
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_tail = NULL;
+
+ return req;
+}
+
+static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
+ struct sock *parent)
+{
+ struct request_sock *req = reqsk_queue_remove(queue);
+ struct sock *child = req->sk;
+
+ BUG_TRAP(child != NULL);
+
+ sk_acceptq_removed(parent);
+ __reqsk_free(req);
+ return child;
+}
+
+static inline int reqsk_queue_removed(struct request_sock_queue *queue,
+ struct request_sock *req)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+
+ if (req->retrans == 0)
+ --lopt->qlen_young;
+
+ return --lopt->qlen;
+}
+
+static inline int reqsk_queue_added(struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+ const int prev_qlen = lopt->qlen;
+
+ lopt->qlen_young++;
+ lopt->qlen++;
+ return prev_qlen;
+}
+
+static inline int reqsk_queue_len(struct request_sock_queue *queue)
+{
+ return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
+}
+
+static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
+{
+ return queue->listen_opt->qlen_young;
+}
+
+static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
+{
+ return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
+}
+
+static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ u32 hash, struct request_sock *req,
+ unsigned timeout)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+
+ req->expires = jiffies + timeout;
+ req->retrans = 0;
+ req->sk = NULL;
+ req->dl_next = lopt->syn_table[hash];
+
+ write_lock(&queue->syn_wait_lock);
+ lopt->syn_table[hash] = req;
+ write_unlock(&queue->syn_wait_lock);
+}
+
+#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c57504b3b518..7b97405e2dbf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -172,4 +172,126 @@ tcf_destroy(struct tcf_proto *tp)
kfree(tp);
}
+static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ __skb_queue_tail(list, skb);
+ sch->qstats.backlog += skb->len;
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
+
+ return NET_XMIT_SUCCESS;
+}
+
+static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
+{
+ return __qdisc_enqueue_tail(skb, sch, &sch->q);
+}
+
+static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb = __skb_dequeue(list);
+
+ if (likely(skb != NULL))
+ sch->qstats.backlog -= skb->len;
+
+ return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
+{
+ return __qdisc_dequeue_head(sch, &sch->q);
+}
+
+static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb = __skb_dequeue_tail(list);
+
+ if (likely(skb != NULL))
+ sch->qstats.backlog -= skb->len;
+
+ return skb;
+}
+
+static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
+{
+ return __qdisc_dequeue_tail(sch, &sch->q);
+}
+
+static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ __skb_queue_head(list, skb);
+ sch->qstats.backlog += skb->len;
+ sch->qstats.requeues++;
+
+ return NET_XMIT_SUCCESS;
+}
+
+static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ return __qdisc_requeue(skb, sch, &sch->q);
+}
+
+static inline void __qdisc_reset_queue(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ /*
+ * We do not know the backlog in bytes of this list, it
+ * is up to the caller to correct it
+ */
+ skb_queue_purge(list);
+}
+
+static inline void qdisc_reset_queue(struct Qdisc *sch)
+{
+ __qdisc_reset_queue(sch, &sch->q);
+ sch->qstats.backlog = 0;
+}
+
+static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
+
+ if (likely(skb != NULL)) {
+ unsigned int len = skb->len;
+ kfree_skb(skb);
+ return len;
+ }
+
+ return 0;
+}
+
+static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
+{
+ return __qdisc_queue_drop(sch, &sch->q);
+}
+
+static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
+{
+ kfree_skb(skb);
+ sch->qstats.drops++;
+
+ return NET_XMIT_DROP;
+}
+
+static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
+{
+ sch->qstats.drops++;
+
+#ifdef CONFIG_NET_CLS_POLICE
+ if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
+ goto drop;
+
+ return NET_XMIT_SUCCESS;
+
+drop:
+#endif
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+
#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index a9ef3a6a13f3..e593af5b1ecc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -484,6 +484,8 @@ extern void sk_stream_kill_queues(struct sock *sk);
extern int sk_wait_data(struct sock *sk, long *timeo);
+struct request_sock_ops;
+
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
@@ -547,6 +549,8 @@ struct proto {
kmem_cache_t *slab;
unsigned int obj_size;
+ struct request_sock_ops *rsk_prot;
+
struct module *owner;
char name[32];
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e71f8ba3e101..f730935b824a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -31,6 +31,7 @@
#include <linux/cache.h>
#include <linux/percpu.h>
#include <net/checksum.h>
+#include <net/request_sock.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
@@ -563,7 +564,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
/* sysctl variables for tcp */
-extern int sysctl_max_syn_backlog;
extern int sysctl_tcp_timestamps;
extern int sysctl_tcp_window_scaling;
extern int sysctl_tcp_sack;
@@ -613,74 +613,6 @@ extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
extern int tcp_memory_pressure;
-struct open_request;
-
-struct or_calltable {
- int family;
- int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
- void (*send_ack) (struct sk_buff *skb, struct open_request *req);
- void (*destructor) (struct open_request *req);
- void (*send_reset) (struct sk_buff *skb);
-};
-
-struct tcp_v4_open_req {
- __u32 loc_addr;
- __u32 rmt_addr;
- struct ip_options *opt;
-};
-
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
-struct tcp_v6_open_req {
- struct in6_addr loc_addr;
- struct in6_addr rmt_addr;
- struct sk_buff *pktopts;
- int iif;
-};
-#endif
-
-/* this structure is too big */
-struct open_request {
- struct open_request *dl_next; /* Must be first member! */
- __u32 rcv_isn;
- __u32 snt_isn;
- __u16 rmt_port;
- __u16 mss;
- __u8 retrans;
- __u8 __pad;
- __u16 snd_wscale : 4,
- rcv_wscale : 4,
- tstamp_ok : 1,
- sack_ok : 1,
- wscale_ok : 1,
- ecn_ok : 1,
- acked : 1;
- /* The following two fields can be easily recomputed I think -AK */
- __u32 window_clamp; /* window clamp at creation time */
- __u32 rcv_wnd; /* rcv_wnd offered first time */
- __u32 ts_recent;
- unsigned long expires;
- struct or_calltable *class;
- struct sock *sk;
- union {
- struct tcp_v4_open_req v4_req;
-#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
- struct tcp_v6_open_req v6_req;
-#endif
- } af;
-};
-
-/* SLAB cache for open requests. */
-extern kmem_cache_t *tcp_openreq_cachep;
-
-#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
-#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
-
-static inline void tcp_openreq_free(struct open_request *req)
-{
- req->class->destructor(req);
- tcp_openreq_fastfree(req);
-}
-
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
#else
@@ -708,7 +640,7 @@ struct tcp_func {
struct sock * (*syn_recv_sock) (struct sock *sk,
struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst);
int (*remember_stamp) (struct sock *sk);
@@ -852,8 +784,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
unsigned len);
extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
- struct open_request *req,
- struct open_request **prev);
+ struct request_sock *req,
+ struct request_sock **prev);
extern int tcp_child_process(struct sock *parent,
struct sock *child,
struct sk_buff *skb);
@@ -903,12 +835,12 @@ extern int tcp_v4_conn_request(struct sock *sk,
struct sk_buff *skb);
extern struct sock * tcp_create_openreq_child(struct sock *sk,
- struct open_request *req,
+ struct request_sock *req,
struct sk_buff *skb);
extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
struct sk_buff *skb,
- struct open_request *req,
+ struct request_sock *req,
struct dst_entry *dst);
extern int tcp_v4_do_rcv(struct sock *sk,
@@ -922,7 +854,7 @@ extern int tcp_connect(struct sock *sk);
extern struct sk_buff * tcp_make_synack(struct sock *sk,
struct dst_entry *dst,
- struct open_request *req);
+ struct request_sock *req);
extern int tcp_disconnect(struct sock *sk, int flags);
@@ -1750,99 +1682,71 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk->sk_rcvbuf);
}
-static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
+static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
struct sock *child)
{
- struct tcp_sock *tp = tcp_sk(sk);
-
- req->sk = child;
- sk_acceptq_added(sk);
-
- if (!tp->accept_queue_tail) {
- tp->accept_queue = req;
- } else {
- tp->accept_queue_tail->dl_next = req;
- }
- tp->accept_queue_tail = req;
- req->dl_next = NULL;
+ reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
}
-struct tcp_listen_opt
-{
- u8 max_qlen_log; /* log_2 of maximal queued SYNs */
- int qlen;
- int qlen_young;
- int clock_hand;
- u32 hash_rnd;
- struct open_request *syn_table[TCP_SYNQ_HSIZE];
-};
-
static inline void
-tcp_synq_removed(struct sock *sk, struct open_request *req)
+tcp_synq_removed(struct sock *sk, struct request_sock *req)
{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (--lopt->qlen == 0)
+ if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
tcp_delete_keepalive_timer(sk);
- if (req->retrans == 0)
- lopt->qlen_young--;
}
static inline void tcp_synq_added(struct sock *sk)
{
- struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt;
-
- if (lopt->qlen++ == 0)
+ if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
- lopt->qlen_young++;
}
static inline int tcp_synq_len(struct sock *sk)
{
- return tcp_sk(sk)->listen_opt->qlen;
+ return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
}
static inline int tcp_synq_young(struct sock *sk)
{
- return tcp_sk(sk)->listen_opt->qlen_young;
+ return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
}
static inline int tcp_synq_is_full(struct sock *sk)
{
- return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log;
+ return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
}
-static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req,
- struct open_request **prev)
+static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
+ struct request_sock **prev)
{
- write_lock(&tp->syn_wait_lock);
- *prev = req->dl_next;
- write_unlock(&tp->syn_wait_lock);
+ reqsk_queue_unlink(&tp->accept_queue, req, prev);
}
-static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
- struct open_request **prev)
+static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
+ struct request_sock **prev)
{
tcp_synq_unlink(tcp_sk(sk), req, prev);
tcp_synq_removed(sk, req);
- tcp_openreq_free(req);
+ reqsk_free(req);
}
-static __inline__ void tcp_openreq_init(struct open_request *req,
+static __inline__ void tcp_openreq_init(struct request_sock *req,
struct tcp_options_received *rx_opt,
struct sk_buff *skb)
{
+ struct inet_request_sock *ireq = inet_rsk(req);
+
req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
- req->rcv_isn = TCP_SKB_CB(skb)->seq;
+ tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
- req->tstamp_ok = rx_opt->tstamp_ok;
- req->sack_ok = rx_opt->sack_ok;
- req->snd_wscale = rx_opt->snd_wscale;
- req->wscale_ok = rx_opt->wscale_ok;
- req->acked = 0;
- req->ecn_ok = 0;
- req->rmt_port = skb->h.th->source;
+ ireq->tstamp_ok = rx_opt->tstamp_ok;
+ ireq->sack_ok = rx_opt->sack_ok;
+ ireq->snd_wscale = rx_opt->snd_wscale;
+ ireq->wscale_ok = rx_opt->wscale_ok;
+ ireq->acked = 0;
+ ireq->ecn_ok = 0;
+ ireq->rmt_port = skb->h.th->source;
}
extern void tcp_enter_memory_pressure(void);
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index dc1456389a97..64980ee8c92a 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -2,6 +2,7 @@
#define _NET_TCP_ECN_H_ 1
#include <net/inet_ecn.h>
+#include <net/request_sock.h>
#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
@@ -38,9 +39,9 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
}
static __inline__ void
-TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th)
+TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
{
- if (req->ecn_ok)
+ if (inet_rsk(req)->ecn_ok)
th->ece = 1;
}
@@ -111,16 +112,16 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
}
static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
- struct open_request *req)
+ struct request_sock *req)
{
- tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0;
+ tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
}
static __inline__ void
-TCP_ECN_create_request(struct open_request *req, struct tcphdr *th)
+TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
{
if (sysctl_tcp_ecn && th->ece && th->cwr)
- req->ecn_ok = 1;
+ inet_rsk(req)->ecn_ok = 1;
}
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d675836ba6c3..0e65e02b7a1d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -158,6 +158,20 @@ enum {
XFRM_STATE_DEAD
};
+/* callback structure passed from either netlink or pfkey */
+struct km_event
+{
+ union {
+ u32 hard;
+ u32 proto;
+ u32 byid;
+ } data;
+
+ u32 seq;
+ u32 pid;
+ u32 event;
+};
+
struct xfrm_type;
struct xfrm_dst;
struct xfrm_policy_afinfo {
@@ -179,6 +193,8 @@ struct xfrm_policy_afinfo {
extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
+extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
#define XFRM_ACQ_EXPIRES 30
@@ -290,11 +306,11 @@ struct xfrm_mgr
{
struct list_head list;
char *id;
- int (*notify)(struct xfrm_state *x, int event);
+ int (*notify)(struct xfrm_state *x, struct km_event *c);
int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir);
int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
- int (*notify_policy)(struct xfrm_policy *x, int dir, int event);
+ int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
};
extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -656,7 +672,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
return 0;
}
-extern void xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
static inline void xfrm_sk_free_policy(struct sock *sk)
{
@@ -817,7 +833,7 @@ extern int xfrm_state_add(struct xfrm_state *x);
extern int xfrm_state_update(struct xfrm_state *x);
extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
-extern void xfrm_state_delete(struct xfrm_state *x);
+extern int xfrm_state_delete(struct xfrm_state *x);
extern void xfrm_state_flush(u8 proto);
extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 659ecf48fb4a..1fb233741513 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -41,6 +41,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
#define FORMAT_UNIT 0x04
#define READ_BLOCK_LIMITS 0x05
#define REASSIGN_BLOCKS 0x07
+#define INITIALIZE_ELEMENT_STATUS 0x07
#define READ_6 0x08
#define WRITE_6 0x0a
#define SEEK_6 0x0b
@@ -65,6 +66,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
#define READ_10 0x28
#define WRITE_10 0x2a
#define SEEK_10 0x2b
+#define POSITION_TO_ELEMENT 0x2b
#define WRITE_VERIFY 0x2e
#define VERIFY 0x2f
#define SEARCH_HIGH 0x30
@@ -97,6 +99,7 @@ extern const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE];
#define PERSISTENT_RESERVE_OUT 0x5f
#define REPORT_LUNS 0xa0
#define MOVE_MEDIUM 0xa5
+#define EXCHANGE_MEDIUM 0xa6
#define READ_12 0xa8
#define WRITE_12 0xaa
#define WRITE_VERIFY_12 0xae
@@ -210,6 +213,7 @@ static inline int scsi_status_is_good(int status)
#define TYPE_COMM 0x09 /* Communications device */
#define TYPE_ENCLOSURE 0x0d /* Enclosure Services Device */
#define TYPE_RAID 0x0c
+#define TYPE_RBC 0x0e
#define TYPE_NO_LUN 0x7f
/*
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index c018020d9160..63c91dd85ca1 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -154,7 +154,9 @@ struct scsi_target {
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
unsigned long create:1; /* signal that it needs to be added */
- unsigned long starget_data[0];
+ void *hostdata; /* available to low-level driver */
+ unsigned long starget_data[0]; /* for the transport */
+ /* starget_data must be the last element!!!! */
} __attribute__((aligned(sizeof(unsigned long))));
#define to_scsi_target(d) container_of(d, struct scsi_target, dev)
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 1cee1e100943..db9914adeac9 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -10,6 +10,7 @@ struct block_device;
struct module;
struct scsi_cmnd;
struct scsi_device;
+struct scsi_target;
struct Scsi_Host;
struct scsi_host_cmd_pool;
struct scsi_transport_template;
@@ -228,6 +229,30 @@ struct scsi_host_template {
void (* slave_destroy)(struct scsi_device *);
/*
+ * Before the mid layer attempts to scan for a new device attached
+ * to a target where no target currently exists, it will call this
+ * entry in your driver. Should your driver need to allocate any
+ * structs or perform any other init items in order to send commands
+ * to a currently unused target, then this is where you can perform
+ * those allocations.
+ *
+ * Return values: 0 on success, non-0 on failure
+ *
+ * Status: OPTIONAL
+ */
+ int (* target_alloc)(struct scsi_target *);
+
+ /*
+ * Immediately prior to deallocating the target structure, and
+ * after all activity to attached scsi devices has ceased, the
+ * midlayer calls this point so that the driver may deallocate
+ * and terminate any references to the target.
+ *
+ * Status: OPTIONAL
+ */
+ void (* target_destroy)(struct scsi_target *);
+
+ /*
* fill in this function to allow the queue depth of this host
* to be changeable (on a per device basis). returns either
* the current queue depth setting (may be different from what
diff --git a/include/scsi/scsi_transport.h b/include/scsi/scsi_transport.h
index 2dcee7a84752..a4f1837a33b1 100644
--- a/include/scsi/scsi_transport.h
+++ b/include/scsi/scsi_transport.h
@@ -21,6 +21,7 @@
#define SCSI_TRANSPORT_H
#include <linux/transport_class.h>
+#include <scsi/scsi_host.h>
struct scsi_transport_template {
/* the attribute containers */
@@ -32,8 +33,11 @@ struct scsi_transport_template {
* space of this size will be left at the end of the
* scsi_* structure */
int device_size;
+ int device_private_offset;
int target_size;
+ int target_private_offset;
int host_size;
+ /* no private offset for the host; there's an alternative mechanism */
/*
* True if the transport wants to use a host-based work-queue
@@ -45,4 +49,38 @@ struct scsi_transport_template {
dev_to_shost((tc)->dev)
+/* Private area maintenance. The driver requested allocations come
+ * directly after the transport class allocations (if any). The idea
+ * is that you *must* call these only once. The code assumes that the
+ * initial values are the ones the transport specific code requires */
+static inline void
+scsi_transport_reserve_target(struct scsi_transport_template * t, int space)
+{
+ BUG_ON(t->target_private_offset != 0);
+ t->target_private_offset = ALIGN(t->target_size, sizeof(void *));
+ t->target_size = t->target_private_offset + space;
+}
+static inline void
+scsi_transport_reserve_device(struct scsi_transport_template * t, int space)
+{
+ BUG_ON(t->device_private_offset != 0);
+ t->device_private_offset = ALIGN(t->device_size, sizeof(void *));
+ t->device_size = t->device_private_offset + space;
+}
+static inline void *
+scsi_transport_target_data(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ return (u8 *)starget->starget_data
+ + shost->transportt->target_private_offset;
+
+}
+static inline void *
+scsi_transport_device_data(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ return (u8 *)sdev->sdev_data
+ + shost->transportt->device_private_offset;
+}
+
#endif /* SCSI_TRANSPORT_H */